diff --git a/.circleci/config.yml b/.circleci/config.yml index ecae22f872..0a12aa73b8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -49,7 +49,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.54.0 + pip install openai==1.66.1 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -71,7 +71,7 @@ jobs: pip install "Pillow==10.3.0" pip install "jsonschema==4.22.0" pip install "pytest-xdist==3.6.1" - pip install "websockets==10.4" + pip install "websockets==13.1.0" pip uninstall posthog -y - save_cache: paths: @@ -168,7 +168,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.54.0 + pip install openai==1.66.1 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -189,6 +189,7 @@ jobs: pip install "diskcache==5.6.1" pip install "Pillow==10.3.0" pip install "jsonschema==4.22.0" + pip install "websockets==13.1.0" - save_cache: paths: - ./venv @@ -267,7 +268,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.54.0 + pip install openai==1.66.1 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -288,6 +289,7 @@ jobs: pip install "diskcache==5.6.1" pip install "Pillow==10.3.0" pip install "jsonschema==4.22.0" + pip install "websockets==13.1.0" - save_cache: paths: - ./venv @@ -511,7 +513,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.54.0 + pip install openai==1.66.1 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -678,6 +680,48 @@ jobs: paths: - llm_translation_coverage.xml - llm_translation_coverage + llm_responses_api_testing: + docker: + - image: cimg/python:3.11 + auth: + username: ${DOCKERHUB_USERNAME} + password: ${DOCKERHUB_PASSWORD} + working_directory: ~/project + + steps: + - checkout + - run: + name: Install Dependencies + command: | + python -m pip install --upgrade pip + python -m pip install -r requirements.txt + pip install "pytest==7.3.1" + pip install "pytest-retry==1.6.3" + pip install "pytest-cov==5.0.0" + pip install "pytest-asyncio==0.21.1" + pip install "respx==0.21.1" + # Run pytest and generate JUnit XML report + - run: + name: Run tests + command: | + pwd + ls + python -m pytest -vv tests/llm_responses_api_testing --cov=litellm --cov-report=xml -x -s -v --junitxml=test-results/junit.xml --durations=5 + no_output_timeout: 120m + - run: + name: Rename the coverage files + command: | + mv coverage.xml llm_responses_api_coverage.xml + mv .coverage llm_responses_api_coverage + + # Store test results + - store_test_results: + path: test-results + - persist_to_workspace: + root: . + paths: + - llm_responses_api_coverage.xml + - llm_responses_api_coverage litellm_mapped_tests: docker: - image: cimg/python:3.11 @@ -1234,7 +1278,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.54.0 " + pip install "openai==1.66.1" - run: name: Install Grype command: | @@ -1309,13 +1353,13 @@ jobs: command: | pwd ls - python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/image_gen_tests --ignore=tests/pass_through_unit_tests + python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/llm_responses_api_testing --ignore=tests/image_gen_tests --ignore=tests/pass_through_unit_tests no_output_timeout: 120m # Store test results - store_test_results: path: test-results - e2e_openai_misc_endpoints: + e2e_openai_endpoints: machine: image: ubuntu-2204:2023.10.1 resource_class: xlarge @@ -1370,7 +1414,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.54.0 " + pip install "openai==1.66.1" # Run pytest and generate JUnit XML report - run: name: Build Docker image @@ -1432,7 +1476,7 @@ jobs: command: | pwd ls - python -m pytest -s -vv tests/openai_misc_endpoints_tests --junitxml=test-results/junit.xml --durations=5 + python -m pytest -s -vv tests/openai_endpoints_tests --junitxml=test-results/junit.xml --durations=5 no_output_timeout: 120m # Store test results @@ -1492,7 +1536,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.54.0 " + pip install "openai==1.66.1" - run: name: Build Docker image command: docker build -t my-app:latest -f ./docker/Dockerfile.database . @@ -1921,7 +1965,7 @@ jobs: pip install "pytest-asyncio==0.21.1" pip install "google-cloud-aiplatform==1.43.0" pip install aiohttp - pip install "openai==1.54.0 " + pip install "openai==1.66.1" pip install "assemblyai==0.37.0" python -m pip install --upgrade pip pip install "pydantic==2.7.1" @@ -2068,7 +2112,7 @@ jobs: python -m venv venv . venv/bin/activate pip install coverage - coverage combine llm_translation_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage pass_through_unit_tests_coverage batches_coverage litellm_proxy_security_tests_coverage + coverage combine llm_translation_coverage llm_responses_api_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage pass_through_unit_tests_coverage batches_coverage litellm_proxy_security_tests_coverage coverage xml - codecov/upload: file: ./coverage.xml @@ -2197,7 +2241,7 @@ jobs: pip install "pytest-retry==1.6.3" pip install "pytest-asyncio==0.21.1" pip install aiohttp - pip install "openai==1.54.0 " + pip install "openai==1.66.1" python -m pip install --upgrade pip pip install "pydantic==2.7.1" pip install "pytest==7.3.1" @@ -2387,7 +2431,7 @@ workflows: only: - main - /litellm_.*/ - - e2e_openai_misc_endpoints: + - e2e_openai_endpoints: filters: branches: only: @@ -2429,6 +2473,12 @@ workflows: only: - main - /litellm_.*/ + - llm_responses_api_testing: + filters: + branches: + only: + - main + - /litellm_.*/ - litellm_mapped_tests: filters: branches: @@ -2468,6 +2518,7 @@ workflows: - upload-coverage: requires: - llm_translation_testing + - llm_responses_api_testing - litellm_mapped_tests - batches_testing - litellm_utils_testing @@ -2522,10 +2573,11 @@ workflows: requires: - local_testing - build_and_test - - e2e_openai_misc_endpoints + - e2e_openai_endpoints - load_testing - test_bad_database_url - llm_translation_testing + - llm_responses_api_testing - litellm_mapped_tests - batches_testing - litellm_utils_testing diff --git a/.circleci/requirements.txt b/.circleci/requirements.txt index 12e83a40f2..e63fb9dd9a 100644 --- a/.circleci/requirements.txt +++ b/.circleci/requirements.txt @@ -1,5 +1,5 @@ # used by CI/CD testing -openai==1.54.0 +openai==1.66.1 python-dotenv tiktoken importlib_metadata diff --git a/.github/workflows/ghcr_deploy.yml b/.github/workflows/ghcr_deploy.yml index 587abc8ea7..58c8a1e2e1 100644 --- a/.github/workflows/ghcr_deploy.yml +++ b/.github/workflows/ghcr_deploy.yml @@ -80,7 +80,6 @@ jobs: permissions: contents: read packages: write - # steps: - name: Checkout repository uses: actions/checkout@v4 @@ -112,7 +111,11 @@ jobs: with: context: . push: true - tags: ${{ steps.meta.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }}, ${{ steps.meta.outputs.tags }}-${{ github.event.inputs.release_type }} # if a tag is provided, use that, otherwise use the release tag, and if neither is available, use 'latest' + tags: | + ${{ steps.meta.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }}, + ${{ steps.meta.outputs.tags }}-${{ github.event.inputs.release_type }} + ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }}, + ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm:main-stable', env.REGISTRY) || '' }} labels: ${{ steps.meta.outputs.labels }} platforms: local,linux/amd64,linux/arm64,linux/arm64/v8 @@ -151,8 +154,12 @@ jobs: context: . file: ./docker/Dockerfile.database push: true - tags: ${{ steps.meta-database.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }}, ${{ steps.meta-database.outputs.tags }}-${{ github.event.inputs.release_type }} - labels: ${{ steps.meta-database.outputs.labels }} + tags: | + ${{ steps.meta-database.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }}, + ${{ steps.meta-database.outputs.tags }}-${{ github.event.inputs.release_type }} + ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-database:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }}, + ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-database:main-stable', env.REGISTRY) || '' }} + labels: ${{ steps.meta-database.outputs.labels }} platforms: local,linux/amd64,linux/arm64,linux/arm64/v8 build-and-push-image-non_root: @@ -190,7 +197,11 @@ jobs: context: . file: ./docker/Dockerfile.non_root push: true - tags: ${{ steps.meta-non_root.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }}, ${{ steps.meta-non_root.outputs.tags }}-${{ github.event.inputs.release_type }} + tags: | + ${{ steps.meta-non_root.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }}, + ${{ steps.meta-non_root.outputs.tags }}-${{ github.event.inputs.release_type }} + ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-non_root:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }}, + ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-non_root:main-stable', env.REGISTRY) || '' }} labels: ${{ steps.meta-non_root.outputs.labels }} platforms: local,linux/amd64,linux/arm64,linux/arm64/v8 @@ -229,7 +240,11 @@ jobs: context: . file: ./litellm-js/spend-logs/Dockerfile push: true - tags: ${{ steps.meta-spend-logs.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }}, ${{ steps.meta-spend-logs.outputs.tags }}-${{ github.event.inputs.release_type }} + tags: | + ${{ steps.meta-spend-logs.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }}, + ${{ steps.meta-spend-logs.outputs.tags }}-${{ github.event.inputs.release_type }} + ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-spend_logs:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }}, + ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-spend_logs:main-stable', env.REGISTRY) || '' }} platforms: local,linux/amd64,linux/arm64,linux/arm64/v8 build-and-push-helm-chart: diff --git a/.gitignore b/.gitignore index d35923f7c3..dab6d4ec81 100644 --- a/.gitignore +++ b/.gitignore @@ -79,3 +79,7 @@ litellm/proxy/_experimental/out/model_hub.html litellm/proxy/application.log tests/llm_translation/vertex_test_account.json tests/llm_translation/test_vertex_key.json +litellm/proxy/migrations/0_init/migration.sql +litellm/proxy/db/migrations/0_init/migration.sql +litellm/proxy/db/migrations/* +litellm/proxy/migrations/* \ No newline at end of file diff --git a/Makefile b/Makefile index 6bd3cb57d4..6c231d3cc2 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # LiteLLM Makefile # Simple Makefile for running tests and basic development tasks -.PHONY: help test test-unit test-integration +.PHONY: help test test-unit test-integration lint format # Default target help: @@ -9,6 +9,14 @@ help: @echo " make test - Run all tests" @echo " make test-unit - Run unit tests" @echo " make test-integration - Run integration tests" + @echo " make test-unit-helm - Run helm unit tests" + +install-dev: + poetry install --with dev + +lint: install-dev + poetry run pip install types-requests types-setuptools types-redis types-PyYAML + cd litellm && poetry run mypy . --ignore-missing-imports # Testing test: @@ -18,4 +26,7 @@ test-unit: poetry run pytest tests/litellm/ test-integration: - poetry run pytest tests/ -k "not litellm" \ No newline at end of file + poetry run pytest tests/ -k "not litellm" + +test-unit-helm: + helm unittest -f 'tests/*.yaml' deploy/charts/litellm-helm \ No newline at end of file diff --git a/deploy/charts/litellm-helm/Chart.yaml b/deploy/charts/litellm-helm/Chart.yaml index f1f2fd8d64..4d856fdc0f 100644 --- a/deploy/charts/litellm-helm/Chart.yaml +++ b/deploy/charts/litellm-helm/Chart.yaml @@ -18,7 +18,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.4.1 +version: 0.4.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/deploy/charts/litellm-helm/README.md b/deploy/charts/litellm-helm/README.md index 8b2196f577..a0ba5781df 100644 --- a/deploy/charts/litellm-helm/README.md +++ b/deploy/charts/litellm-helm/README.md @@ -22,6 +22,8 @@ If `db.useStackgresOperator` is used (not yet implemented): | Name | Description | Value | | ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- | | `replicaCount` | The number of LiteLLM Proxy pods to be deployed | `1` | +| `masterkeySecretName` | The name of the Kubernetes Secret that contains the Master API Key for LiteLLM. If not specified, use the generated secret name. | N/A | +| `masterkeySecretKey` | The key within the Kubernetes Secret that contains the Master API Key for LiteLLM. If not specified, use `masterkey` as the key. | N/A | | `masterkey` | The Master API Key for LiteLLM. If not specified, a random key is generated. | N/A | | `environmentSecrets` | An optional array of Secret object names. The keys and values in these secrets will be presented to the LiteLLM proxy pod as environment variables. See below for an example Secret object. | `[]` | | `environmentConfigMaps` | An optional array of ConfigMap object names. The keys and values in these configmaps will be presented to the LiteLLM proxy pod as environment variables. See below for an example Secret object. | `[]` | diff --git a/deploy/charts/litellm-helm/templates/deployment.yaml b/deploy/charts/litellm-helm/templates/deployment.yaml index 697148abf8..52f761ed15 100644 --- a/deploy/charts/litellm-helm/templates/deployment.yaml +++ b/deploy/charts/litellm-helm/templates/deployment.yaml @@ -78,8 +78,8 @@ spec: - name: PROXY_MASTER_KEY valueFrom: secretKeyRef: - name: {{ include "litellm.fullname" . }}-masterkey - key: masterkey + name: {{ .Values.masterkeySecretName | default (printf "%s-masterkey" (include "litellm.fullname" .)) }} + key: {{ .Values.masterkeySecretKey | default "masterkey" }} {{- if .Values.redis.enabled }} - name: REDIS_HOST value: {{ include "litellm.redis.serviceName" . }} diff --git a/deploy/charts/litellm-helm/templates/secret-masterkey.yaml b/deploy/charts/litellm-helm/templates/secret-masterkey.yaml index 57b854cc0f..5632957dc0 100644 --- a/deploy/charts/litellm-helm/templates/secret-masterkey.yaml +++ b/deploy/charts/litellm-helm/templates/secret-masterkey.yaml @@ -1,3 +1,4 @@ +{{- if not .Values.masterkeySecretName }} {{ $masterkey := (.Values.masterkey | default (randAlphaNum 17)) }} apiVersion: v1 kind: Secret @@ -5,4 +6,5 @@ metadata: name: {{ include "litellm.fullname" . }}-masterkey data: masterkey: {{ $masterkey | b64enc }} -type: Opaque \ No newline at end of file +type: Opaque +{{- end }} diff --git a/deploy/charts/litellm-helm/tests/deployment_tests.yaml b/deploy/charts/litellm-helm/tests/deployment_tests.yaml index e7ce44b052..0e4b8e0b1f 100644 --- a/deploy/charts/litellm-helm/tests/deployment_tests.yaml +++ b/deploy/charts/litellm-helm/tests/deployment_tests.yaml @@ -52,3 +52,31 @@ tests: - equal: path: spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] value: antarctica-east1 + - it: should work without masterkeySecretName or masterkeySecretKey + template: deployment.yaml + set: + masterkeySecretName: "" + masterkeySecretKey: "" + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: PROXY_MASTER_KEY + valueFrom: + secretKeyRef: + name: RELEASE-NAME-litellm-masterkey + key: masterkey + - it: should work with masterkeySecretName and masterkeySecretKey + template: deployment.yaml + set: + masterkeySecretName: my-secret + masterkeySecretKey: my-key + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: PROXY_MASTER_KEY + valueFrom: + secretKeyRef: + name: my-secret + key: my-key diff --git a/deploy/charts/litellm-helm/tests/masterkey-secret_tests.yaml b/deploy/charts/litellm-helm/tests/masterkey-secret_tests.yaml new file mode 100644 index 0000000000..eb1d3c3967 --- /dev/null +++ b/deploy/charts/litellm-helm/tests/masterkey-secret_tests.yaml @@ -0,0 +1,18 @@ +suite: test masterkey secret +templates: + - secret-masterkey.yaml +tests: + - it: should create a secret if masterkeySecretName is not set + template: secret-masterkey.yaml + set: + masterkeySecretName: "" + asserts: + - isKind: + of: Secret + - it: should not create a secret if masterkeySecretName is set + template: secret-masterkey.yaml + set: + masterkeySecretName: my-secret + asserts: + - hasDocuments: + count: 0 diff --git a/deploy/charts/litellm-helm/values.yaml b/deploy/charts/litellm-helm/values.yaml index 9f21fc40ad..70f6c2ef23 100644 --- a/deploy/charts/litellm-helm/values.yaml +++ b/deploy/charts/litellm-helm/values.yaml @@ -75,6 +75,12 @@ ingress: # masterkey: changeit +# if set, use this secret for the master key; otherwise, autogenerate a new one +masterkeySecretName: "" + +# if set, use this secret key for the master key; otherwise, use the default key +masterkeySecretKey: "" + # The elements within proxy_config are rendered as config.yaml for the proxy # Examples: https://github.com/BerriAI/litellm/tree/main/litellm/proxy/example_config_yaml # Reference: https://docs.litellm.ai/docs/proxy/configs diff --git a/docker-compose.yml b/docker-compose.yml index 78044c03b8..d16ec6ed20 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,10 +20,18 @@ services: STORE_MODEL_IN_DB: "True" # allows adding models to proxy via UI env_file: - .env # Load local .env file + depends_on: + - db # Indicates that this service depends on the 'db' service, ensuring 'db' starts first + healthcheck: # Defines the health check configuration for the container + test: [ "CMD", "curl", "-f", "http://localhost:4000/health/liveliness || exit 1" ] # Command to execute for health check + interval: 30s # Perform health check every 30 seconds + timeout: 10s # Health check command times out after 10 seconds + retries: 3 # Retry up to 3 times if health check fails + start_period: 40s # Wait 40 seconds after container start before beginning health checks db: - image: postgres + image: postgres:16 restart: always environment: POSTGRES_DB: litellm @@ -31,6 +39,8 @@ services: POSTGRES_PASSWORD: dbpassword9090 ports: - "5432:5432" + volumes: + - postgres_data:/var/lib/postgresql/data # Persists Postgres data across container restarts healthcheck: test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"] interval: 1s @@ -53,6 +63,8 @@ services: volumes: prometheus_data: driver: local + postgres_data: + name: litellm_postgres_data # Named volume for Postgres data persistence # ...rest of your docker-compose config if any diff --git a/docs/my-website/docs/anthropic_unified.md b/docs/my-website/docs/anthropic_unified.md index 71b9203399..cf6ba798d5 100644 --- a/docs/my-website/docs/anthropic_unified.md +++ b/docs/my-website/docs/anthropic_unified.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# [BETA] `/v1/messages` +# /v1/messages [BETA] LiteLLM provides a BETA endpoint in the spec of Anthropic's `/v1/messages` endpoint. diff --git a/docs/my-website/docs/assistants.md b/docs/my-website/docs/assistants.md index 5e68e8dded..4032c74557 100644 --- a/docs/my-website/docs/assistants.md +++ b/docs/my-website/docs/assistants.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Assistants API +# /assistants Covers Threads, Messages, Assistants. diff --git a/docs/my-website/docs/batches.md b/docs/my-website/docs/batches.md index 4ac9fa61e3..4918e30d1f 100644 --- a/docs/my-website/docs/batches.md +++ b/docs/my-website/docs/batches.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# [BETA] Batches API +# /batches Covers Batches, Files diff --git a/docs/my-website/docs/completion/prompt_caching.md b/docs/my-website/docs/completion/prompt_caching.md index 5c795778ef..9447a11d52 100644 --- a/docs/my-website/docs/completion/prompt_caching.md +++ b/docs/my-website/docs/completion/prompt_caching.md @@ -3,7 +3,13 @@ import TabItem from '@theme/TabItem'; # Prompt Caching -For OpenAI + Anthropic + Deepseek, LiteLLM follows the OpenAI prompt caching usage object format: +Supported Providers: +- OpenAI (`openai/`) +- Anthropic API (`anthropic/`) +- Bedrock (`bedrock/`, `bedrock/invoke/`, `bedrock/converse`) ([All models bedrock supports prompt caching on](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-caching.html)) +- Deepseek API (`deepseek/`) + +For the supported providers, LiteLLM follows the OpenAI prompt caching usage object format: ```bash "usage": { @@ -499,4 +505,4 @@ curl -L -X GET 'http://0.0.0.0:4000/v1/model/info' \ -This checks our maintained [model info/cost map](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) \ No newline at end of file +This checks our maintained [model info/cost map](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) diff --git a/docs/my-website/docs/embedding/supported_embedding.md b/docs/my-website/docs/embedding/supported_embedding.md index d0cb59b46e..06d4107372 100644 --- a/docs/my-website/docs/embedding/supported_embedding.md +++ b/docs/my-website/docs/embedding/supported_embedding.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Embeddings +# /embeddings ## Quick Start ```python diff --git a/docs/my-website/docs/enterprise.md b/docs/my-website/docs/enterprise.md index 0306a5b452..5aeeb710ff 100644 --- a/docs/my-website/docs/enterprise.md +++ b/docs/my-website/docs/enterprise.md @@ -34,9 +34,9 @@ You can use our cloud product where we setup a dedicated instance for you. Professional Support can assist with LLM/Provider integrations, deployment, upgrade management, and LLM Provider troubleshooting. We can’t solve your own infrastructure-related issues but we will guide you to fix them. -- 1 hour for Sev0 issues -- 6 hours for Sev1 -- 24h for Sev2-Sev3 between 7am – 7pm PT (Monday through Saturday) +- 1 hour for Sev0 issues - 100% production traffic is failing +- 6 hours for Sev1 - <100% production traffic is failing +- 24h for Sev2-Sev3 between 7am – 7pm PT (Monday through Saturday) - setup issues e.g. Redis working on our end, but not on your infrastructure. - 72h SLA for patching vulnerabilities in the software. **We can offer custom SLAs** based on your needs and the severity of the issue diff --git a/docs/my-website/docs/extras/contributing_code.md b/docs/my-website/docs/extras/contributing_code.md index 0fe7675ead..ee46a33095 100644 --- a/docs/my-website/docs/extras/contributing_code.md +++ b/docs/my-website/docs/extras/contributing_code.md @@ -8,7 +8,7 @@ Here are the core requirements for any PR submitted to LiteLLM - [ ] Add testing, **Adding at least 1 test is a hard requirement** - [see details](#2-adding-testing-to-your-pr) - [ ] Ensure your PR passes the following tests: - [ ] [Unit Tests](#3-running-unit-tests) - - [ ] Formatting / Linting Tests + - [ ] [Formatting / Linting Tests](#35-running-linting-tests) - [ ] Keep scope as isolated as possible. As a general rule, your changes should address 1 specific problem at a time @@ -56,6 +56,16 @@ run the following command on the root of the litellm directory make test-unit ``` +## 3.5 Running Linting Tests + +run the following command on the root of the litellm directory + +```shell +make lint +``` + +LiteLLM uses mypy for linting. On ci/cd we also run `black` for formatting. + ## 4. Submit a PR with your changes! - push your fork to your GitHub repo diff --git a/docs/my-website/docs/files_endpoints.md b/docs/my-website/docs/files_endpoints.md index cccb35daa9..7e20982ff4 100644 --- a/docs/my-website/docs/files_endpoints.md +++ b/docs/my-website/docs/files_endpoints.md @@ -2,7 +2,7 @@ import TabItem from '@theme/TabItem'; import Tabs from '@theme/Tabs'; -# Files API +# /files Files are used to upload documents that can be used with features like Assistants, Fine-tuning, and Batch API. diff --git a/docs/my-website/docs/fine_tuning.md b/docs/my-website/docs/fine_tuning.md index fd5d99a6a1..f9a9297e06 100644 --- a/docs/my-website/docs/fine_tuning.md +++ b/docs/my-website/docs/fine_tuning.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# [Beta] Fine-tuning API +# /fine_tuning :::info diff --git a/docs/my-website/docs/moderation.md b/docs/my-website/docs/moderation.md index 6dd092fb52..95fe8b2856 100644 --- a/docs/my-website/docs/moderation.md +++ b/docs/my-website/docs/moderation.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Moderation +# /moderations ### Usage diff --git a/docs/my-website/docs/providers/bedrock.md b/docs/my-website/docs/providers/bedrock.md index 628132b448..45ad3f0c61 100644 --- a/docs/my-website/docs/providers/bedrock.md +++ b/docs/my-website/docs/providers/bedrock.md @@ -79,6 +79,7 @@ aws_session_name: Optional[str], aws_profile_name: Optional[str], aws_role_name: Optional[str], aws_web_identity_token: Optional[str], +aws_bedrock_runtime_endpoint: Optional[str], ``` ### 2. Start the proxy @@ -1262,6 +1263,473 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \ +## Bedrock Imported Models (Deepseek, Deepseek R1) + +### Deepseek R1 + +This is a separate route, as the chat template is different. + +| Property | Details | +|----------|---------| +| Provider Route | `bedrock/deepseek_r1/{model_arn}` | +| Provider Documentation | [Bedrock Imported Models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html), [Deepseek Bedrock Imported Model](https://aws.amazon.com/blogs/machine-learning/deploy-deepseek-r1-distilled-llama-models-with-amazon-bedrock-custom-model-import/) | + + + + +```python +from litellm import completion +import os + +response = completion( + model="bedrock/deepseek_r1/arn:aws:bedrock:us-east-1:086734376398:imported-model/r4c4kewx2s0n", # bedrock/deepseek_r1/{your-model-arn} + messages=[{"role": "user", "content": "Tell me a joke"}], +) +``` + + + + + + +**1. Add to config** + +```yaml +model_list: + - model_name: DeepSeek-R1-Distill-Llama-70B + litellm_params: + model: bedrock/deepseek_r1/arn:aws:bedrock:us-east-1:086734376398:imported-model/r4c4kewx2s0n + +``` + +**2. Start proxy** + +```bash +litellm --config /path/to/config.yaml + +# RUNNING at http://0.0.0.0:4000 +``` + +**3. Test it!** + +```bash +curl --location 'http://0.0.0.0:4000/chat/completions' \ + --header 'Authorization: Bearer sk-1234' \ + --header 'Content-Type: application/json' \ + --data '{ + "model": "DeepSeek-R1-Distill-Llama-70B", # 👈 the 'model_name' in config + "messages": [ + { + "role": "user", + "content": "what llm are you" + } + ], + }' +``` + + + + + +### Deepseek (not R1) + +| Property | Details | +|----------|---------| +| Provider Route | `bedrock/llama/{model_arn}` | +| Provider Documentation | [Bedrock Imported Models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html), [Deepseek Bedrock Imported Model](https://aws.amazon.com/blogs/machine-learning/deploy-deepseek-r1-distilled-llama-models-with-amazon-bedrock-custom-model-import/) | + + + +Use this route to call Bedrock Imported Models that follow the `llama` Invoke Request / Response spec + + + + + +```python +from litellm import completion +import os + +response = completion( + model="bedrock/llama/arn:aws:bedrock:us-east-1:086734376398:imported-model/r4c4kewx2s0n", # bedrock/llama/{your-model-arn} + messages=[{"role": "user", "content": "Tell me a joke"}], +) +``` + + + + + + +**1. Add to config** + +```yaml +model_list: + - model_name: DeepSeek-R1-Distill-Llama-70B + litellm_params: + model: bedrock/llama/arn:aws:bedrock:us-east-1:086734376398:imported-model/r4c4kewx2s0n + +``` + +**2. Start proxy** + +```bash +litellm --config /path/to/config.yaml + +# RUNNING at http://0.0.0.0:4000 +``` + +**3. Test it!** + +```bash +curl --location 'http://0.0.0.0:4000/chat/completions' \ + --header 'Authorization: Bearer sk-1234' \ + --header 'Content-Type: application/json' \ + --data '{ + "model": "DeepSeek-R1-Distill-Llama-70B", # 👈 the 'model_name' in config + "messages": [ + { + "role": "user", + "content": "what llm are you" + } + ], + }' +``` + + + + + + +## Provisioned throughput models +To use provisioned throughput Bedrock models pass +- `model=bedrock/`, example `model=bedrock/anthropic.claude-v2`. Set `model` to any of the [Supported AWS models](#supported-aws-bedrock-models) +- `model_id=provisioned-model-arn` + +Completion +```python +import litellm +response = litellm.completion( + model="bedrock/anthropic.claude-instant-v1", + model_id="provisioned-model-arn", + messages=[{"content": "Hello, how are you?", "role": "user"}] +) +``` + +Embedding +```python +import litellm +response = litellm.embedding( + model="bedrock/amazon.titan-embed-text-v1", + model_id="provisioned-model-arn", + input=["hi"], +) +``` + + +## Supported AWS Bedrock Models +Here's an example of using a bedrock model with LiteLLM. For a complete list, refer to the [model cost map](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) + +| Model Name | Command | +|----------------------------|------------------------------------------------------------------| +| Anthropic Claude-V3.5 Sonnet | `completion(model='bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Anthropic Claude-V3 sonnet | `completion(model='bedrock/anthropic.claude-3-sonnet-20240229-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Anthropic Claude-V3 Haiku | `completion(model='bedrock/anthropic.claude-3-haiku-20240307-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Anthropic Claude-V3 Opus | `completion(model='bedrock/anthropic.claude-3-opus-20240229-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Anthropic Claude-V2.1 | `completion(model='bedrock/anthropic.claude-v2:1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Anthropic Claude-V2 | `completion(model='bedrock/anthropic.claude-v2', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Anthropic Claude-Instant V1 | `completion(model='bedrock/anthropic.claude-instant-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Meta llama3-1-405b | `completion(model='bedrock/meta.llama3-1-405b-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Meta llama3-1-70b | `completion(model='bedrock/meta.llama3-1-70b-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Meta llama3-1-8b | `completion(model='bedrock/meta.llama3-1-8b-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Meta llama3-70b | `completion(model='bedrock/meta.llama3-70b-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Meta llama3-8b | `completion(model='bedrock/meta.llama3-8b-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | +| Amazon Titan Lite | `completion(model='bedrock/amazon.titan-text-lite-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| Amazon Titan Express | `completion(model='bedrock/amazon.titan-text-express-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| Cohere Command | `completion(model='bedrock/cohere.command-text-v14', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| AI21 J2-Mid | `completion(model='bedrock/ai21.j2-mid-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| AI21 J2-Ultra | `completion(model='bedrock/ai21.j2-ultra-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| AI21 Jamba-Instruct | `completion(model='bedrock/ai21.jamba-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| Meta Llama 2 Chat 13b | `completion(model='bedrock/meta.llama2-13b-chat-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| Meta Llama 2 Chat 70b | `completion(model='bedrock/meta.llama2-70b-chat-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| Mistral 7B Instruct | `completion(model='bedrock/mistral.mistral-7b-instruct-v0:2', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | +| Mixtral 8x7B Instruct | `completion(model='bedrock/mistral.mixtral-8x7b-instruct-v0:1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | + +## Bedrock Embedding + +### API keys +This can be set as env variables or passed as **params to litellm.embedding()** +```python +import os +os.environ["AWS_ACCESS_KEY_ID"] = "" # Access key +os.environ["AWS_SECRET_ACCESS_KEY"] = "" # Secret access key +os.environ["AWS_REGION_NAME"] = "" # us-east-1, us-east-2, us-west-1, us-west-2 +``` + +### Usage +```python +from litellm import embedding +response = embedding( + model="bedrock/amazon.titan-embed-text-v1", + input=["good morning from litellm"], +) +print(response) +``` + +## Supported AWS Bedrock Embedding Models + +| Model Name | Usage | Supported Additional OpenAI params | +|----------------------|---------------------------------------------|-----| +| Titan Embeddings V2 | `embedding(model="bedrock/amazon.titan-embed-text-v2:0", input=input)` | [here](https://github.com/BerriAI/litellm/blob/f5905e100068e7a4d61441d7453d7cf5609c2121/litellm/llms/bedrock/embed/amazon_titan_v2_transformation.py#L59) | +| Titan Embeddings - V1 | `embedding(model="bedrock/amazon.titan-embed-text-v1", input=input)` | [here](https://github.com/BerriAI/litellm/blob/f5905e100068e7a4d61441d7453d7cf5609c2121/litellm/llms/bedrock/embed/amazon_titan_g1_transformation.py#L53) +| Titan Multimodal Embeddings | `embedding(model="bedrock/amazon.titan-embed-image-v1", input=input)` | [here](https://github.com/BerriAI/litellm/blob/f5905e100068e7a4d61441d7453d7cf5609c2121/litellm/llms/bedrock/embed/amazon_titan_multimodal_transformation.py#L28) | +| Cohere Embeddings - English | `embedding(model="bedrock/cohere.embed-english-v3", input=input)` | [here](https://github.com/BerriAI/litellm/blob/f5905e100068e7a4d61441d7453d7cf5609c2121/litellm/llms/bedrock/embed/cohere_transformation.py#L18) +| Cohere Embeddings - Multilingual | `embedding(model="bedrock/cohere.embed-multilingual-v3", input=input)` | [here](https://github.com/BerriAI/litellm/blob/f5905e100068e7a4d61441d7453d7cf5609c2121/litellm/llms/bedrock/embed/cohere_transformation.py#L18) + +### Advanced - [Drop Unsupported Params](https://docs.litellm.ai/docs/completion/drop_params#openai-proxy-usage) + +### Advanced - [Pass model/provider-specific Params](https://docs.litellm.ai/docs/completion/provider_specific_params#proxy-usage) + +## Image Generation +Use this for stable diffusion, and amazon nova canvas on bedrock + + +### Usage + + + + +```python +import os +from litellm import image_generation + +os.environ["AWS_ACCESS_KEY_ID"] = "" +os.environ["AWS_SECRET_ACCESS_KEY"] = "" +os.environ["AWS_REGION_NAME"] = "" + +response = image_generation( + prompt="A cute baby sea otter", + model="bedrock/stability.stable-diffusion-xl-v0", + ) +print(f"response: {response}") +``` + +**Set optional params** +```python +import os +from litellm import image_generation + +os.environ["AWS_ACCESS_KEY_ID"] = "" +os.environ["AWS_SECRET_ACCESS_KEY"] = "" +os.environ["AWS_REGION_NAME"] = "" + +response = image_generation( + prompt="A cute baby sea otter", + model="bedrock/stability.stable-diffusion-xl-v0", + ### OPENAI-COMPATIBLE ### + size="128x512", # width=128, height=512 + ### PROVIDER-SPECIFIC ### see `AmazonStabilityConfig` in bedrock.py for all params + seed=30 + ) +print(f"response: {response}") +``` + + + +1. Setup config.yaml + +```yaml +model_list: + - model_name: amazon.nova-canvas-v1:0 + litellm_params: + model: bedrock/amazon.nova-canvas-v1:0 + aws_region_name: "us-east-1" + aws_secret_access_key: my-key # OPTIONAL - all boto3 auth params supported + aws_secret_access_id: my-id # OPTIONAL - all boto3 auth params supported +``` + +2. Start proxy + +```bash +litellm --config /path/to/config.yaml +``` + +3. Test it! + +```bash +curl -L -X POST 'http://0.0.0.0:4000/v1/images/generations' \ +-H 'Content-Type: application/json' \ +-H 'Authorization: Bearer $LITELLM_VIRTUAL_KEY' \ +-d '{ + "model": "amazon.nova-canvas-v1:0", + "prompt": "A cute baby sea otter" +}' +``` + + + + +## Supported AWS Bedrock Image Generation Models + +| Model Name | Function Call | +|----------------------|---------------------------------------------| +| Stable Diffusion 3 - v0 | `embedding(model="bedrock/stability.stability.sd3-large-v1:0", prompt=prompt)` | +| Stable Diffusion - v0 | `embedding(model="bedrock/stability.stable-diffusion-xl-v0", prompt=prompt)` | +| Stable Diffusion - v0 | `embedding(model="bedrock/stability.stable-diffusion-xl-v1", prompt=prompt)` | + + +## Rerank API + +Use Bedrock's Rerank API in the Cohere `/rerank` format. + +Supported Cohere Rerank Params +- `model` - the foundation model ARN +- `query` - the query to rerank against +- `documents` - the list of documents to rerank +- `top_n` - the number of results to return + + + + +```python +from litellm import rerank +import os + +os.environ["AWS_ACCESS_KEY_ID"] = "" +os.environ["AWS_SECRET_ACCESS_KEY"] = "" +os.environ["AWS_REGION_NAME"] = "" + +response = rerank( + model="bedrock/arn:aws:bedrock:us-west-2::foundation-model/amazon.rerank-v1:0", # provide the model ARN - get this here https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock/client/list_foundation_models.html + query="hello", + documents=["hello", "world"], + top_n=2, +) + +print(response) +``` + + + + +1. Setup config.yaml + +```yaml +model_list: + - model_name: bedrock-rerank + litellm_params: + model: bedrock/arn:aws:bedrock:us-west-2::foundation-model/amazon.rerank-v1:0 + aws_access_key_id: os.environ/AWS_ACCESS_KEY_ID + aws_secret_access_key: os.environ/AWS_SECRET_ACCESS_KEY + aws_region_name: os.environ/AWS_REGION_NAME +``` + +2. Start proxy server + +```bash +litellm --config config.yaml + +# RUNNING on http://0.0.0.0:4000 +``` + +3. Test it! + +```bash +curl http://0.0.0.0:4000/rerank \ + -H "Authorization: Bearer sk-1234" \ + -H "Content-Type: application/json" \ + -d '{ + "model": "bedrock-rerank", + "query": "What is the capital of the United States?", + "documents": [ + "Carson City is the capital city of the American state of Nevada.", + "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan.", + "Washington, D.C. is the capital of the United States.", + "Capital punishment has existed in the United States since before it was a country." + ], + "top_n": 3 + + + }' +``` + + + + + +## Bedrock Application Inference Profile + +Use Bedrock Application Inference Profile to track costs for projects on AWS. + +You can either pass it in the model name - `model="bedrock/arn:...` or as a separate `model_id="arn:..` param. + +### Set via `model_id` + + + + +```python +from litellm import completion +import os + +os.environ["AWS_ACCESS_KEY_ID"] = "" +os.environ["AWS_SECRET_ACCESS_KEY"] = "" +os.environ["AWS_REGION_NAME"] = "" + +response = completion( + model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=[{"role": "user", "content": "Hello, how are you?"}], + model_id="arn:aws:bedrock:eu-central-1:000000000000:application-inference-profile/a0a0a0a0a0a0", +) + +print(response) +``` + + + + +1. Setup config.yaml + +```yaml +model_list: + - model_name: anthropic-claude-3-5-sonnet + litellm_params: + model: bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0 + # You have to set the ARN application inference profile in the model_id parameter + model_id: arn:aws:bedrock:eu-central-1:000000000000:application-inference-profile/a0a0a0a0a0a0 +``` + +2. Start proxy + +```bash +litellm --config /path/to/config.yaml +``` + +3. Test it! + +```bash +curl -L -X POST 'http://0.0.0.0:4000/v1/chat/completions' \ +-H 'Content-Type: application/json' \ +-H 'Authorization: Bearer $LITELLM_API_KEY' \ +-d '{ + "model": "anthropic-claude-3-5-sonnet", + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "List 5 important events in the XIX century" + } + ] + } + ] +}' +``` + + + + ## Boto3 - Authentication ### Passing credentials as parameters - Completion() @@ -1497,7 +1965,7 @@ response = completion( aws_bedrock_client=bedrock, ) ``` -## Calling via Internal Proxy +## Calling via Internal Proxy (not bedrock url compatible) Use the `bedrock/converse_like/model` endpoint to call bedrock converse model via your internal proxy. @@ -1563,359 +2031,3 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \ ```bash https://some-api-url/models ``` - -## Bedrock Imported Models (Deepseek, Deepseek R1) - -### Deepseek R1 - -This is a separate route, as the chat template is different. - -| Property | Details | -|----------|---------| -| Provider Route | `bedrock/deepseek_r1/{model_arn}` | -| Provider Documentation | [Bedrock Imported Models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html), [Deepseek Bedrock Imported Model](https://aws.amazon.com/blogs/machine-learning/deploy-deepseek-r1-distilled-llama-models-with-amazon-bedrock-custom-model-import/) | - - - - -```python -from litellm import completion -import os - -response = completion( - model="bedrock/deepseek_r1/arn:aws:bedrock:us-east-1:086734376398:imported-model/r4c4kewx2s0n", # bedrock/deepseek_r1/{your-model-arn} - messages=[{"role": "user", "content": "Tell me a joke"}], -) -``` - - - - - - -**1. Add to config** - -```yaml -model_list: - - model_name: DeepSeek-R1-Distill-Llama-70B - litellm_params: - model: bedrock/deepseek_r1/arn:aws:bedrock:us-east-1:086734376398:imported-model/r4c4kewx2s0n - -``` - -**2. Start proxy** - -```bash -litellm --config /path/to/config.yaml - -# RUNNING at http://0.0.0.0:4000 -``` - -**3. Test it!** - -```bash -curl --location 'http://0.0.0.0:4000/chat/completions' \ - --header 'Authorization: Bearer sk-1234' \ - --header 'Content-Type: application/json' \ - --data '{ - "model": "DeepSeek-R1-Distill-Llama-70B", # 👈 the 'model_name' in config - "messages": [ - { - "role": "user", - "content": "what llm are you" - } - ], - }' -``` - - - - - -### Deepseek (not R1) - -| Property | Details | -|----------|---------| -| Provider Route | `bedrock/llama/{model_arn}` | -| Provider Documentation | [Bedrock Imported Models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html), [Deepseek Bedrock Imported Model](https://aws.amazon.com/blogs/machine-learning/deploy-deepseek-r1-distilled-llama-models-with-amazon-bedrock-custom-model-import/) | - - - -Use this route to call Bedrock Imported Models that follow the `llama` Invoke Request / Response spec - - - - - -```python -from litellm import completion -import os - -response = completion( - model="bedrock/llama/arn:aws:bedrock:us-east-1:086734376398:imported-model/r4c4kewx2s0n", # bedrock/llama/{your-model-arn} - messages=[{"role": "user", "content": "Tell me a joke"}], -) -``` - - - - - - -**1. Add to config** - -```yaml -model_list: - - model_name: DeepSeek-R1-Distill-Llama-70B - litellm_params: - model: bedrock/llama/arn:aws:bedrock:us-east-1:086734376398:imported-model/r4c4kewx2s0n - -``` - -**2. Start proxy** - -```bash -litellm --config /path/to/config.yaml - -# RUNNING at http://0.0.0.0:4000 -``` - -**3. Test it!** - -```bash -curl --location 'http://0.0.0.0:4000/chat/completions' \ - --header 'Authorization: Bearer sk-1234' \ - --header 'Content-Type: application/json' \ - --data '{ - "model": "DeepSeek-R1-Distill-Llama-70B", # 👈 the 'model_name' in config - "messages": [ - { - "role": "user", - "content": "what llm are you" - } - ], - }' -``` - - - - - - -## Provisioned throughput models -To use provisioned throughput Bedrock models pass -- `model=bedrock/`, example `model=bedrock/anthropic.claude-v2`. Set `model` to any of the [Supported AWS models](#supported-aws-bedrock-models) -- `model_id=provisioned-model-arn` - -Completion -```python -import litellm -response = litellm.completion( - model="bedrock/anthropic.claude-instant-v1", - model_id="provisioned-model-arn", - messages=[{"content": "Hello, how are you?", "role": "user"}] -) -``` - -Embedding -```python -import litellm -response = litellm.embedding( - model="bedrock/amazon.titan-embed-text-v1", - model_id="provisioned-model-arn", - input=["hi"], -) -``` - - -## Supported AWS Bedrock Models -Here's an example of using a bedrock model with LiteLLM. For a complete list, refer to the [model cost map](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) - -| Model Name | Command | -|----------------------------|------------------------------------------------------------------| -| Anthropic Claude-V3.5 Sonnet | `completion(model='bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Anthropic Claude-V3 sonnet | `completion(model='bedrock/anthropic.claude-3-sonnet-20240229-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Anthropic Claude-V3 Haiku | `completion(model='bedrock/anthropic.claude-3-haiku-20240307-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Anthropic Claude-V3 Opus | `completion(model='bedrock/anthropic.claude-3-opus-20240229-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Anthropic Claude-V2.1 | `completion(model='bedrock/anthropic.claude-v2:1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Anthropic Claude-V2 | `completion(model='bedrock/anthropic.claude-v2', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Anthropic Claude-Instant V1 | `completion(model='bedrock/anthropic.claude-instant-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Meta llama3-1-405b | `completion(model='bedrock/meta.llama3-1-405b-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Meta llama3-1-70b | `completion(model='bedrock/meta.llama3-1-70b-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Meta llama3-1-8b | `completion(model='bedrock/meta.llama3-1-8b-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Meta llama3-70b | `completion(model='bedrock/meta.llama3-70b-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Meta llama3-8b | `completion(model='bedrock/meta.llama3-8b-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` | -| Amazon Titan Lite | `completion(model='bedrock/amazon.titan-text-lite-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | -| Amazon Titan Express | `completion(model='bedrock/amazon.titan-text-express-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | -| Cohere Command | `completion(model='bedrock/cohere.command-text-v14', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | -| AI21 J2-Mid | `completion(model='bedrock/ai21.j2-mid-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | -| AI21 J2-Ultra | `completion(model='bedrock/ai21.j2-ultra-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | -| AI21 Jamba-Instruct | `completion(model='bedrock/ai21.jamba-instruct-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | -| Meta Llama 2 Chat 13b | `completion(model='bedrock/meta.llama2-13b-chat-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | -| Meta Llama 2 Chat 70b | `completion(model='bedrock/meta.llama2-70b-chat-v1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | -| Mistral 7B Instruct | `completion(model='bedrock/mistral.mistral-7b-instruct-v0:2', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | -| Mixtral 8x7B Instruct | `completion(model='bedrock/mistral.mixtral-8x7b-instruct-v0:1', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']`, `os.environ['AWS_REGION_NAME']` | - -## Bedrock Embedding - -### API keys -This can be set as env variables or passed as **params to litellm.embedding()** -```python -import os -os.environ["AWS_ACCESS_KEY_ID"] = "" # Access key -os.environ["AWS_SECRET_ACCESS_KEY"] = "" # Secret access key -os.environ["AWS_REGION_NAME"] = "" # us-east-1, us-east-2, us-west-1, us-west-2 -``` - -### Usage -```python -from litellm import embedding -response = embedding( - model="bedrock/amazon.titan-embed-text-v1", - input=["good morning from litellm"], -) -print(response) -``` - -## Supported AWS Bedrock Embedding Models - -| Model Name | Usage | Supported Additional OpenAI params | -|----------------------|---------------------------------------------|-----| -| Titan Embeddings V2 | `embedding(model="bedrock/amazon.titan-embed-text-v2:0", input=input)` | [here](https://github.com/BerriAI/litellm/blob/f5905e100068e7a4d61441d7453d7cf5609c2121/litellm/llms/bedrock/embed/amazon_titan_v2_transformation.py#L59) | -| Titan Embeddings - V1 | `embedding(model="bedrock/amazon.titan-embed-text-v1", input=input)` | [here](https://github.com/BerriAI/litellm/blob/f5905e100068e7a4d61441d7453d7cf5609c2121/litellm/llms/bedrock/embed/amazon_titan_g1_transformation.py#L53) -| Titan Multimodal Embeddings | `embedding(model="bedrock/amazon.titan-embed-image-v1", input=input)` | [here](https://github.com/BerriAI/litellm/blob/f5905e100068e7a4d61441d7453d7cf5609c2121/litellm/llms/bedrock/embed/amazon_titan_multimodal_transformation.py#L28) | -| Cohere Embeddings - English | `embedding(model="bedrock/cohere.embed-english-v3", input=input)` | [here](https://github.com/BerriAI/litellm/blob/f5905e100068e7a4d61441d7453d7cf5609c2121/litellm/llms/bedrock/embed/cohere_transformation.py#L18) -| Cohere Embeddings - Multilingual | `embedding(model="bedrock/cohere.embed-multilingual-v3", input=input)` | [here](https://github.com/BerriAI/litellm/blob/f5905e100068e7a4d61441d7453d7cf5609c2121/litellm/llms/bedrock/embed/cohere_transformation.py#L18) - -### Advanced - [Drop Unsupported Params](https://docs.litellm.ai/docs/completion/drop_params#openai-proxy-usage) - -### Advanced - [Pass model/provider-specific Params](https://docs.litellm.ai/docs/completion/provider_specific_params#proxy-usage) - -## Image Generation -Use this for stable diffusion on bedrock - - -### Usage -```python -import os -from litellm import image_generation - -os.environ["AWS_ACCESS_KEY_ID"] = "" -os.environ["AWS_SECRET_ACCESS_KEY"] = "" -os.environ["AWS_REGION_NAME"] = "" - -response = image_generation( - prompt="A cute baby sea otter", - model="bedrock/stability.stable-diffusion-xl-v0", - ) -print(f"response: {response}") -``` - -**Set optional params** -```python -import os -from litellm import image_generation - -os.environ["AWS_ACCESS_KEY_ID"] = "" -os.environ["AWS_SECRET_ACCESS_KEY"] = "" -os.environ["AWS_REGION_NAME"] = "" - -response = image_generation( - prompt="A cute baby sea otter", - model="bedrock/stability.stable-diffusion-xl-v0", - ### OPENAI-COMPATIBLE ### - size="128x512", # width=128, height=512 - ### PROVIDER-SPECIFIC ### see `AmazonStabilityConfig` in bedrock.py for all params - seed=30 - ) -print(f"response: {response}") -``` - -## Supported AWS Bedrock Image Generation Models - -| Model Name | Function Call | -|----------------------|---------------------------------------------| -| Stable Diffusion 3 - v0 | `embedding(model="bedrock/stability.stability.sd3-large-v1:0", prompt=prompt)` | -| Stable Diffusion - v0 | `embedding(model="bedrock/stability.stable-diffusion-xl-v0", prompt=prompt)` | -| Stable Diffusion - v0 | `embedding(model="bedrock/stability.stable-diffusion-xl-v1", prompt=prompt)` | - - -## Rerank API - -Use Bedrock's Rerank API in the Cohere `/rerank` format. - -Supported Cohere Rerank Params -- `model` - the foundation model ARN -- `query` - the query to rerank against -- `documents` - the list of documents to rerank -- `top_n` - the number of results to return - - - - -```python -from litellm import rerank -import os - -os.environ["AWS_ACCESS_KEY_ID"] = "" -os.environ["AWS_SECRET_ACCESS_KEY"] = "" -os.environ["AWS_REGION_NAME"] = "" - -response = rerank( - model="bedrock/arn:aws:bedrock:us-west-2::foundation-model/amazon.rerank-v1:0", # provide the model ARN - get this here https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock/client/list_foundation_models.html - query="hello", - documents=["hello", "world"], - top_n=2, -) - -print(response) -``` - - - - -1. Setup config.yaml - -```yaml -model_list: - - model_name: bedrock-rerank - litellm_params: - model: bedrock/arn:aws:bedrock:us-west-2::foundation-model/amazon.rerank-v1:0 - aws_access_key_id: os.environ/AWS_ACCESS_KEY_ID - aws_secret_access_key: os.environ/AWS_SECRET_ACCESS_KEY - aws_region_name: os.environ/AWS_REGION_NAME -``` - -2. Start proxy server - -```bash -litellm --config config.yaml - -# RUNNING on http://0.0.0.0:4000 -``` - -3. Test it! - -```bash -curl http://0.0.0.0:4000/rerank \ - -H "Authorization: Bearer sk-1234" \ - -H "Content-Type: application/json" \ - -d '{ - "model": "bedrock-rerank", - "query": "What is the capital of the United States?", - "documents": [ - "Carson City is the capital city of the American state of Nevada.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan.", - "Washington, D.C. is the capital of the United States.", - "Capital punishment has existed in the United States since before it was a country." - ], - "top_n": 3 - - - }' -``` - - - - - diff --git a/docs/my-website/docs/providers/litellm_proxy.md b/docs/my-website/docs/providers/litellm_proxy.md index e204caba0a..a66423dac5 100644 --- a/docs/my-website/docs/providers/litellm_proxy.md +++ b/docs/my-website/docs/providers/litellm_proxy.md @@ -57,7 +57,7 @@ messages = [{ "content": "Hello, how are you?","role": "user"}] # litellm proxy call response = completion( model="litellm_proxy/your-model-name", - messages, + messages=messages, api_base = "your-litellm-proxy-url", api_key = "your-litellm-proxy-api-key" ) @@ -76,7 +76,7 @@ messages = [{ "content": "Hello, how are you?","role": "user"}] # openai call response = completion( model="litellm_proxy/your-model-name", - messages, + messages=messages, api_base = "your-litellm-proxy-url", stream=True ) diff --git a/docs/my-website/docs/providers/snowflake.md b/docs/my-website/docs/providers/snowflake.md new file mode 100644 index 0000000000..c708613e2f --- /dev/null +++ b/docs/my-website/docs/providers/snowflake.md @@ -0,0 +1,90 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Snowflake +| Property | Details | +|-------|-------| +| Description | The Snowflake Cortex LLM REST API lets you access the COMPLETE function via HTTP POST requests| +| Provider Route on LiteLLM | `snowflake/` | +| Link to Provider Doc | [Snowflake ↗](https://docs.snowflake.com/en/user-guide/snowflake-cortex/cortex-llm-rest-api) | +| Base URL | [https://{account-id}.snowflakecomputing.com/api/v2/cortex/inference:complete/](https://{account-id}.snowflakecomputing.com/api/v2/cortex/inference:complete) | +| Supported OpenAI Endpoints | `/chat/completions`, `/completions` | + + + +Currently, Snowflake's REST API does not have an endpoint for `snowflake-arctic-embed` embedding models. If you want to use these embedding models with Litellm, you can call them through our Hugging Face provider. + +Find the Arctic Embed models [here](https://huggingface.co/collections/Snowflake/arctic-embed-661fd57d50fab5fc314e4c18) on Hugging Face. + +## Supported OpenAI Parameters +``` + "temperature", + "max_tokens", + "top_p", + "response_format" +``` + +## API KEYS + +Snowflake does have API keys. Instead, you access the Snowflake API with your JWT token and account identifier. + +```python +import os +os.environ["SNOWFLAKE_JWT"] = "YOUR JWT" +os.environ["SNOWFLAKE_ACCOUNT_ID"] = "YOUR ACCOUNT IDENTIFIER" +``` +## Usage + +```python +from litellm import completion + +## set ENV variables +os.environ["SNOWFLAKE_JWT"] = "YOUR JWT" +os.environ["SNOWFLAKE_ACCOUNT_ID"] = "YOUR ACCOUNT IDENTIFIER" + +# Snowflake call +response = completion( + model="snowflake/mistral-7b", + messages = [{ "content": "Hello, how are you?","role": "user"}] +) +``` + +## Usage with LiteLLM Proxy + +#### 1. Required env variables +```bash +export SNOWFLAKE_JWT="" +export SNOWFLAKE_ACCOUNT_ID = "" +``` + +#### 2. Start the proxy~ +```yaml +model_list: + - model_name: mistral-7b + litellm_params: + model: snowflake/mistral-7b + api_key: YOUR_API_KEY + api_base: https://YOUR-ACCOUNT-ID.snowflakecomputing.com/api/v2/cortex/inference:complete + +``` + +```bash +litellm --config /path/to/config.yaml +``` + +#### 3. Test it +```shell +curl --location 'http://0.0.0.0:4000/chat/completions' \ +--header 'Content-Type: application/json' \ +--data ' { + "model": "snowflake/mistral-7b", + "messages": [ + { + "role": "user", + "content": "Hello, how are you?" + } + ] + } +' +``` diff --git a/docs/my-website/docs/proxy/access_control.md b/docs/my-website/docs/proxy/access_control.md index 3d335380f4..69b8a3ff6d 100644 --- a/docs/my-website/docs/proxy/access_control.md +++ b/docs/my-website/docs/proxy/access_control.md @@ -10,17 +10,13 @@ Role-based access control (RBAC) is based on Organizations, Teams and Internal U ## Roles -**Admin Roles** - - `proxy_admin`: admin over the platform - - `proxy_admin_viewer`: can login, view all keys, view all spend. **Cannot** create keys/delete keys/add new users - -**Organization Roles** - - `org_admin`: admin over the organization. Can create teams and users within their organization - -**Internal User Roles** - - `internal_user`: can login, view/create/delete their own keys, view their spend. **Cannot** add new users. - - `internal_user_viewer`: can login, view their own keys, view their own spend. **Cannot** create/delete keys, add new users. - +| Role Type | Role Name | Permissions | +|-----------|-----------|-------------| +| **Admin** | `proxy_admin` | Admin over the platform | +| | `proxy_admin_viewer` | Can login, view all keys, view all spend. **Cannot** create keys/delete keys/add new users | +| **Organization** | `org_admin` | Admin over the organization. Can create teams and users within their organization | +| **Internal User** | `internal_user` | Can login, view/create/delete their own keys, view their spend. **Cannot** add new users | +| | `internal_user_viewer` | Can login, view their own keys, view their own spend. **Cannot** create/delete keys, add new users | ## Onboarding Organizations diff --git a/docs/my-website/docs/proxy/config_settings.md b/docs/my-website/docs/proxy/config_settings.md index 9e24437449..71ea045fb1 100644 --- a/docs/my-website/docs/proxy/config_settings.md +++ b/docs/my-website/docs/proxy/config_settings.md @@ -177,7 +177,7 @@ general_settings: | use_x_forwarded_for | str | If true, uses the X-Forwarded-For header to get the client IP address | | service_account_settings | List[Dict[str, Any]] | Set `service_account_settings` if you want to create settings that only apply to service account keys (Doc on service accounts)[./service_accounts.md] | | image_generation_model | str | The default model to use for image generation - ignores model set in request | -| store_model_in_db | boolean | If true, allows `/model/new` endpoint to store model information in db. Endpoint disabled by default. [Doc on `/model/new` endpoint](./model_management.md#create-a-new-model) | +| store_model_in_db | boolean | If true, enables storing model + credential information in the DB. | | store_prompts_in_spend_logs | boolean | If true, allows prompts and responses to be stored in the spend logs table. | | max_request_size_mb | int | The maximum size for requests in MB. Requests above this size will be rejected. | | max_response_size_mb | int | The maximum size for responses in MB. LLM Responses above this size will not be sent. | @@ -499,9 +499,11 @@ router_settings: | SMTP_USERNAME | Username for SMTP authentication (do not set if SMTP does not require auth) | SPEND_LOGS_URL | URL for retrieving spend logs | SSL_CERTIFICATE | Path to the SSL certificate file +| SSL_SECURITY_LEVEL | [BETA] Security level for SSL/TLS connections. E.g. `DEFAULT@SECLEVEL=1` | SSL_VERIFY | Flag to enable or disable SSL certificate verification | SUPABASE_KEY | API key for Supabase service | SUPABASE_URL | Base URL for Supabase instance +| STORE_MODEL_IN_DB | If true, enables storing model + credential information in the DB. | TEST_EMAIL_ADDRESS | Email address used for testing purposes | UI_LOGO_PATH | Path to the logo image used in the UI | UI_PASSWORD | Password for accessing the UI @@ -513,4 +515,3 @@ router_settings: | UPSTREAM_LANGFUSE_SECRET_KEY | Secret key for upstream Langfuse authentication | USE_AWS_KMS | Flag to enable AWS Key Management Service for encryption | WEBHOOK_URL | URL for receiving webhooks from external services - diff --git a/docs/my-website/docs/proxy/configs.md b/docs/my-website/docs/proxy/configs.md index efb263d344..db737f75af 100644 --- a/docs/my-website/docs/proxy/configs.md +++ b/docs/my-website/docs/proxy/configs.md @@ -448,6 +448,34 @@ model_list: s/o to [@David Manouchehri](https://www.linkedin.com/in/davidmanouchehri/) for helping with this. +### Centralized Credential Management + +Define credentials once and reuse them across multiple models. This helps with: +- Secret rotation +- Reducing config duplication + +```yaml +model_list: + - model_name: gpt-4o + litellm_params: + model: azure/gpt-4o + litellm_credential_name: default_azure_credential # Reference credential below + +credential_list: + - credential_name: default_azure_credential + credential_values: + api_key: os.environ/AZURE_API_KEY # Load from environment + api_base: os.environ/AZURE_API_BASE + api_version: "2023-05-15" + credential_info: + description: "Production credentials for EU region" +``` + +#### Key Parameters +- `credential_name`: Unique identifier for the credential set +- `credential_values`: Key-value pairs of credentials/secrets (supports `os.environ/` syntax) +- `credential_info`: Key-value pairs of user provided credentials information. No key-value pairs are required, but the dictionary must exist. + ### Load API Keys from Secret Managers (Azure Vault, etc) [**Using Secret Managers with LiteLLM Proxy**](../secret) @@ -641,4 +669,4 @@ docker run --name litellm-proxy \ ghcr.io/berriai/litellm-database:main-latest ``` - \ No newline at end of file + diff --git a/docs/my-website/docs/proxy/custom_prompt_management.md b/docs/my-website/docs/proxy/custom_prompt_management.md new file mode 100644 index 0000000000..72a7333276 --- /dev/null +++ b/docs/my-website/docs/proxy/custom_prompt_management.md @@ -0,0 +1,194 @@ +import Image from '@theme/IdealImage'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Custom Prompt Management + +Connect LiteLLM to your prompt management system with custom hooks. + +## Overview + + + + + +## How it works + +## Quick Start + +### 1. Create Your Custom Prompt Manager + +Create a class that inherits from `CustomPromptManagement` to handle prompt retrieval and formatting: + +**Example Implementation** + +Create a new file called `custom_prompt.py` and add this code. The key method here is `get_chat_completion_prompt` you can implement custom logic to retrieve and format prompts based on the `prompt_id` and `prompt_variables`. + +```python +from typing import List, Tuple, Optional +from litellm.integrations.custom_prompt_management import CustomPromptManagement +from litellm.types.llms.openai import AllMessageValues +from litellm.types.utils import StandardCallbackDynamicParams + +class MyCustomPromptManagement(CustomPromptManagement): + def get_chat_completion_prompt( + self, + model: str, + messages: List[AllMessageValues], + non_default_params: dict, + prompt_id: str, + prompt_variables: Optional[dict], + dynamic_callback_params: StandardCallbackDynamicParams, + ) -> Tuple[str, List[AllMessageValues], dict]: + """ + Retrieve and format prompts based on prompt_id. + + Returns: + - model: The model to use + - messages: The formatted messages + - non_default_params: Optional parameters like temperature + """ + # Example matching the diagram: Add system message for prompt_id "1234" + if prompt_id == "1234": + # Prepend system message while preserving existing messages + new_messages = [ + {"role": "system", "content": "Be a good Bot!"}, + ] + messages + return model, new_messages, non_default_params + + # Default: Return original messages if no prompt_id match + return model, messages, non_default_params + +prompt_management = MyCustomPromptManagement() +``` + +### 2. Configure Your Prompt Manager in LiteLLM `config.yaml` + +```yaml +model_list: + - model_name: gpt-4 + litellm_params: + model: openai/gpt-4 + api_key: os.environ/OPENAI_API_KEY + +litellm_settings: + callbacks: custom_prompt.prompt_management # sets litellm.callbacks = [prompt_management] +``` + +### 3. Start LiteLLM Gateway + + + + +Mount your `custom_logger.py` on the LiteLLM Docker container. + +```shell +docker run -d \ + -p 4000:4000 \ + -e OPENAI_API_KEY=$OPENAI_API_KEY \ + --name my-app \ + -v $(pwd)/my_config.yaml:/app/config.yaml \ + -v $(pwd)/custom_logger.py:/app/custom_logger.py \ + my-app:latest \ + --config /app/config.yaml \ + --port 4000 \ + --detailed_debug \ +``` + + + + + +```shell +litellm --config config.yaml --detailed_debug +``` + + + + +### 4. Test Your Custom Prompt Manager + +When you pass `prompt_id="1234"`, the custom prompt manager will add a system message "Be a good Bot!" to your conversation: + + + + +```python +from openai import OpenAI + +client = OpenAI( + api_key="sk-1234", + base_url="http://0.0.0.0:4000" +) + +response = client.chat.completions.create( + model="gemini-1.5-pro", + messages=[{"role": "user", "content": "hi"}], + prompt_id="1234" +) + +print(response.choices[0].message.content) +``` + + + + +```python +from langchain.chat_models import ChatOpenAI +from langchain.schema import HumanMessage + +chat = ChatOpenAI( + model="gpt-4", + openai_api_key="sk-1234", + openai_api_base="http://0.0.0.0:4000", + extra_body={ + "prompt_id": "1234" + } +) + +messages = [] +response = chat(messages) + +print(response.content) +``` + + + + +```shell +curl -X POST http://0.0.0.0:4000/v1/chat/completions \ +-H "Content-Type: application/json" \ +-H "Authorization: Bearer sk-1234" \ +-d '{ + "model": "gemini-1.5-pro", + "messages": [{"role": "user", "content": "hi"}], + "prompt_id": "1234" +}' +``` + + + +The request will be transformed from: +```json +{ + "model": "gemini-1.5-pro", + "messages": [{"role": "user", "content": "hi"}], + "prompt_id": "1234" +} +``` + +To: +```json +{ + "model": "gemini-1.5-pro", + "messages": [ + {"role": "system", "content": "Be a good Bot!"}, + {"role": "user", "content": "hi"} + ] +} +``` + + diff --git a/docs/my-website/docs/proxy/guardrails/aim_security.md b/docs/my-website/docs/proxy/guardrails/aim_security.md index 3de933c0b7..8f612b9dbe 100644 --- a/docs/my-website/docs/proxy/guardrails/aim_security.md +++ b/docs/my-website/docs/proxy/guardrails/aim_security.md @@ -37,7 +37,7 @@ guardrails: - guardrail_name: aim-protected-app litellm_params: guardrail: aim - mode: pre_call # 'during_call' is also available + mode: [pre_call, post_call] # "During_call" is also available api_key: os.environ/AIM_API_KEY api_base: os.environ/AIM_API_BASE # Optional, use only when using a self-hosted Aim Outpost ``` diff --git a/docs/my-website/docs/proxy/prompt_management.md b/docs/my-website/docs/proxy/prompt_management.md index 980043f455..c09231dd59 100644 --- a/docs/my-website/docs/proxy/prompt_management.md +++ b/docs/my-website/docs/proxy/prompt_management.md @@ -2,7 +2,7 @@ import Image from '@theme/IdealImage'; import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# [BETA] Prompt Management +# Prompt Management :::info @@ -12,9 +12,10 @@ This feature is currently in beta, and might change unexpectedly. We expect this Run experiments or change the specific model (e.g. from gpt-4o to gpt4o-mini finetune) from your prompt management tool (e.g. Langfuse) instead of making changes in the application. -Supported Integrations: -- [Langfuse](https://langfuse.com/docs/prompts/get-started) -- [Humanloop](../observability/humanloop) +| Supported Integrations | Link | +|------------------------|------| +| Langfuse | [Get Started](https://langfuse.com/docs/prompts/get-started) | +| Humanloop | [Get Started](../observability/humanloop) | ## Quick Start diff --git a/docs/my-website/docs/proxy/token_auth.md b/docs/my-website/docs/proxy/token_auth.md index 89bf2cfcd9..46e05b6c3e 100644 --- a/docs/my-website/docs/proxy/token_auth.md +++ b/docs/my-website/docs/proxy/token_auth.md @@ -102,7 +102,19 @@ curl --location 'http://0.0.0.0:4000/v1/chat/completions' \ -## Advanced - Set Accepted JWT Scope Names +## Advanced + +### Multiple OIDC providers + +Use this if you want LiteLLM to validate your JWT against multiple OIDC providers (e.g. Google Cloud, GitHub Auth) + +Set `JWT_PUBLIC_KEY_URL` in your environment to a comma-separated list of URLs for your OIDC providers. + +```bash +export JWT_PUBLIC_KEY_URL="https://demo.duendesoftware.com/.well-known/openid-configuration/jwks,https://accounts.google.com/.well-known/openid-configuration/jwks" +``` + +### Set Accepted JWT Scope Names Change the string in JWT 'scopes', that litellm evaluates to see if a user has admin access. @@ -114,7 +126,7 @@ general_settings: admin_jwt_scope: "litellm-proxy-admin" ``` -## Tracking End-Users / Internal Users / Team / Org +### Tracking End-Users / Internal Users / Team / Org Set the field in the jwt token, which corresponds to a litellm user / team / org. @@ -156,7 +168,7 @@ scope: ["litellm-proxy-admin",...] scope: "litellm-proxy-admin ..." ``` -## Control model access with Teams +### Control model access with Teams 1. Specify the JWT field that contains the team ids, that the user belongs to. @@ -207,7 +219,7 @@ OIDC Auth for API: [**See Walkthrough**](https://www.loom.com/share/00fe2deab59a - If all checks pass, allow the request -## Advanced - Custom Validate +### Custom JWT Validate This section allows you to add custom logic to intercept and perform validation of the JWT token. @@ -215,7 +227,7 @@ This can occur when there is additional logic that is needed to execute against > _Note_: You can expect the JWT will have ran the typical decrypting of the public key, token decoding, and expiration time checks before executing the custom validation function. -### 1. Setup custom validate function +#### 1. Setup custom validate function ```python from typing import Any, Literal @@ -236,7 +248,7 @@ def my_custom_validate(token: dict[str, Any]) -> Literal[True]: return True ``` -### 2. Setup config.yaml +#### 2. Setup config.yaml ```yaml general_settings: @@ -249,7 +261,7 @@ general_settings: custom_validate: custom_validate.my_custom_validate # 👈 custom validate function ``` -### 3. Test the flow +#### 3. Test the flow **Expected JWT** @@ -271,7 +283,7 @@ general_settings: } ``` -## Advanced - Allowed Routes +### Allowed Routes Configure which routes a JWT can access via the config. @@ -303,7 +315,7 @@ general_settings: team_allowed_routes: ["/v1/chat/completions"] # 👈 Set accepted routes ``` -## Advanced - Caching Public Keys +### Caching Public Keys Control how long public keys are cached for (in seconds). @@ -317,7 +329,7 @@ general_settings: public_key_ttl: 600 # 👈 KEY CHANGE ``` -## Advanced - Custom JWT Field +### Custom JWT Field Set a custom field in which the team_id exists. By default, the 'client_id' field is checked. @@ -329,14 +341,7 @@ general_settings: team_id_jwt_field: "client_id" # 👈 KEY CHANGE ``` -## All Params - -[**See Code**](https://github.com/BerriAI/litellm/blob/b204f0c01c703317d812a1553363ab0cb989d5b6/litellm/proxy/_types.py#L95) - - - - -## Advanced - Block Teams +### Block Teams To block all requests for a certain team id, use `/team/block` @@ -363,7 +368,7 @@ curl --location 'http://0.0.0.0:4000/team/unblock' \ ``` -## Advanced - Upsert Users + Allowed Email Domains +### Upsert Users + Allowed Email Domains Allow users who belong to a specific email domain, automatic access to the proxy. @@ -501,3 +506,7 @@ curl -L -X POST 'http://0.0.0.0:4000/v1/chat/completions' \ ] }' ``` + +## All JWT Params + +[**See Code**](https://github.com/BerriAI/litellm/blob/b204f0c01c703317d812a1553363ab0cb989d5b6/litellm/proxy/_types.py#L95) diff --git a/docs/my-website/docs/proxy/ui_credentials.md b/docs/my-website/docs/proxy/ui_credentials.md new file mode 100644 index 0000000000..ba9d1c4c66 --- /dev/null +++ b/docs/my-website/docs/proxy/ui_credentials.md @@ -0,0 +1,55 @@ +import Image from '@theme/IdealImage'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Adding LLM Credentials + +You can add LLM provider credentials on the UI. Once you add credentials you can re-use them when adding new models + +## Add a credential + model + +### 1. Navigate to LLM Credentials page + +Go to Models -> LLM Credentials -> Add Credential + + + +### 2. Add credentials + +Select your LLM provider, enter your API Key and click "Add Credential" + +**Note: Credentials are based on the provider, if you select Vertex AI then you will see `Vertex Project`, `Vertex Location` and `Vertex Credentials` fields** + + + + +### 3. Use credentials when adding a model + +Go to Add Model -> Existing Credentials -> Select your credential in the dropdown + + + + +## Create a Credential from an existing model + +Use this if you have already created a model and want to store the model credentials for future use + +### 1. Select model to create a credential from + +Go to Models -> Select your model -> Credential -> Create Credential + + + +### 2. Use new credential when adding a model + +Go to Add Model -> Existing Credentials -> Select your credential in the dropdown + + + +## Frequently Asked Questions + + +How are credentials stored? +Credentials in the DB are encrypted/decrypted using `LITELLM_SALT_KEY`, if set. If not, then they are encrypted using `LITELLM_MASTER_KEY`. These keys should be kept secret and not shared with others. + + diff --git a/docs/my-website/docs/proxy/ui_logs.md b/docs/my-website/docs/proxy/ui_logs.md new file mode 100644 index 0000000000..a3c5237962 --- /dev/null +++ b/docs/my-website/docs/proxy/ui_logs.md @@ -0,0 +1,55 @@ + +import Image from '@theme/IdealImage'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# UI Logs Page + +View Spend, Token Usage, Key, Team Name for Each Request to LiteLLM + + + + + +## Overview + +| Log Type | Tracked by Default | +|----------|-------------------| +| Success Logs | ✅ Yes | +| Error Logs | ✅ Yes | +| Request/Response Content Stored | ❌ No by Default, **opt in with `store_prompts_in_spend_logs`** | + + + +**By default LiteLLM does not track the request and response content.** + +## Tracking - Request / Response Content in Logs Page + +If you want to view request and response content on LiteLLM Logs, you need to opt in with this setting + +```yaml +general_settings: + store_prompts_in_spend_logs: true +``` + + + + +## Stop storing Error Logs in DB + +If you do not want to store error logs in DB, you can opt out with this setting + +```yaml +general_settings: + disable_error_logs: True # Only disable writing error logs to DB, regular spend logs will still be written unless `disable_spend_logs: True` +``` + +## Stop storing Spend Logs in DB + +If you do not want to store spend logs in DB, you can opt out with this setting + +```yaml +general_settings: + disable_spend_logs: True # Disable writing spend logs to DB +``` + diff --git a/docs/my-website/docs/realtime.md b/docs/my-website/docs/realtime.md index 28697f44b9..4611c8fdcd 100644 --- a/docs/my-website/docs/realtime.md +++ b/docs/my-website/docs/realtime.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Realtime Endpoints +# /realtime Use this to loadbalance across Azure + OpenAI. diff --git a/docs/my-website/docs/reasoning_content.md b/docs/my-website/docs/reasoning_content.md index 5cf287e737..1cce3f0570 100644 --- a/docs/my-website/docs/reasoning_content.md +++ b/docs/my-website/docs/reasoning_content.md @@ -3,11 +3,20 @@ import TabItem from '@theme/TabItem'; # 'Thinking' / 'Reasoning Content' +:::info + +Requires LiteLLM v1.63.0+ + +::: + Supported Providers: - Deepseek (`deepseek/`) - Anthropic API (`anthropic/`) - Bedrock (Anthropic + Deepseek) (`bedrock/`) - Vertex AI (Anthropic) (`vertexai/`) +- OpenRouter (`openrouter/`) + +LiteLLM will standardize the `reasoning_content` in the response and `thinking_blocks` in the assistant message. ```python "message": { diff --git a/docs/my-website/docs/rerank.md b/docs/my-website/docs/rerank.md index cc58c374c7..1e3cfd0fa5 100644 --- a/docs/my-website/docs/rerank.md +++ b/docs/my-website/docs/rerank.md @@ -1,4 +1,4 @@ -# Rerank +# /rerank :::tip diff --git a/docs/my-website/docs/response_api.md b/docs/my-website/docs/response_api.md new file mode 100644 index 0000000000..0604a42586 --- /dev/null +++ b/docs/my-website/docs/response_api.md @@ -0,0 +1,117 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# /responses [Beta] + +LiteLLM provides a BETA endpoint in the spec of [OpenAI's `/responses` API](https://platform.openai.com/docs/api-reference/responses) + +| Feature | Supported | Notes | +|---------|-----------|--------| +| Cost Tracking | ✅ | Works with all supported models | +| Logging | ✅ | Works across all integrations | +| End-user Tracking | ✅ | | +| Streaming | ✅ | | +| Fallbacks | ✅ | Works between supported models | +| Loadbalancing | ✅ | Works between supported models | +| Supported LiteLLM Versions | 1.63.8+ | | +| Supported LLM providers | `openai` | | + +## Usage + +## Create a model response + + + + +#### Non-streaming +```python +import litellm + +# Non-streaming response +response = litellm.responses( + model="gpt-4o", + input="Tell me a three sentence bedtime story about a unicorn.", + max_output_tokens=100 +) + +print(response) +``` + +#### Streaming +```python +import litellm + +# Streaming response +response = litellm.responses( + model="gpt-4o", + input="Tell me a three sentence bedtime story about a unicorn.", + stream=True +) + +for event in response: + print(event) +``` + + + + +First, add this to your litellm proxy config.yaml: +```yaml +model_list: + - model_name: gpt-4o + litellm_params: + model: openai/gpt-4o + api_key: os.environ/OPENAI_API_KEY +``` + +Start your LiteLLM proxy: +```bash +litellm --config /path/to/config.yaml + +# RUNNING on http://0.0.0.0:4000 +``` + +Then use the OpenAI SDK pointed to your proxy: + +#### Non-streaming +```python +from openai import OpenAI + +# Initialize client with your proxy URL +client = OpenAI( + base_url="http://localhost:4000", # Your proxy URL + api_key="your-api-key" # Your proxy API key +) + +# Non-streaming response +response = client.responses.create( + model="gpt-4o", + input="Tell me a three sentence bedtime story about a unicorn." +) + +print(response) +``` + +#### Streaming +```python +from openai import OpenAI + +# Initialize client with your proxy URL +client = OpenAI( + base_url="http://localhost:4000", # Your proxy URL + api_key="your-api-key" # Your proxy API key +) + +# Streaming response +response = client.responses.create( + model="gpt-4o", + input="Tell me a three sentence bedtime story about a unicorn.", + stream=True +) + +for event in response: + print(event) +``` + + + diff --git a/docs/my-website/docs/routing.md b/docs/my-website/docs/routing.md index 0ad28b24f4..967d5ad483 100644 --- a/docs/my-website/docs/routing.md +++ b/docs/my-website/docs/routing.md @@ -830,7 +830,7 @@ asyncio.run(router_acompletion()) Set `weight` on a deployment to pick one deployment more often than others. -This works across **ALL** routing strategies. +This works across **simple-shuffle** routing strategy (this is the default, if no routing strategy is selected). diff --git a/docs/my-website/docs/secret.md b/docs/my-website/docs/secret.md index 7676164259..9f0ff7059c 100644 --- a/docs/my-website/docs/secret.md +++ b/docs/my-website/docs/secret.md @@ -96,7 +96,7 @@ litellm --config /path/to/config.yaml ``` -### Using K/V pairs in 1 AWS Secret +#### Using K/V pairs in 1 AWS Secret You can read multiple keys from a single AWS Secret using the `primary_secret_name` parameter: diff --git a/docs/my-website/docs/text_completion.md b/docs/my-website/docs/text_completion.md index 8be40dfdcd..cbf2db00a0 100644 --- a/docs/my-website/docs/text_completion.md +++ b/docs/my-website/docs/text_completion.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Text Completion +# /completions ### Usage diff --git a/docs/my-website/img/custom_prompt_management.png b/docs/my-website/img/custom_prompt_management.png new file mode 100644 index 0000000000..2c96e0d116 Binary files /dev/null and b/docs/my-website/img/custom_prompt_management.png differ diff --git a/docs/my-website/img/release_notes/credentials.jpg b/docs/my-website/img/release_notes/credentials.jpg new file mode 100644 index 0000000000..1f11c67f05 Binary files /dev/null and b/docs/my-website/img/release_notes/credentials.jpg differ diff --git a/docs/my-website/img/release_notes/litellm_test_connection.gif b/docs/my-website/img/release_notes/litellm_test_connection.gif new file mode 100644 index 0000000000..2c8ea45ab4 Binary files /dev/null and b/docs/my-website/img/release_notes/litellm_test_connection.gif differ diff --git a/docs/my-website/img/release_notes/responses_api.png b/docs/my-website/img/release_notes/responses_api.png new file mode 100644 index 0000000000..045d86825d Binary files /dev/null and b/docs/my-website/img/release_notes/responses_api.png differ diff --git a/docs/my-website/img/ui_add_cred_2.png b/docs/my-website/img/ui_add_cred_2.png new file mode 100644 index 0000000000..199a15e178 Binary files /dev/null and b/docs/my-website/img/ui_add_cred_2.png differ diff --git a/docs/my-website/img/ui_cred_3.png b/docs/my-website/img/ui_cred_3.png new file mode 100644 index 0000000000..67a614d51b Binary files /dev/null and b/docs/my-website/img/ui_cred_3.png differ diff --git a/docs/my-website/img/ui_cred_4.png b/docs/my-website/img/ui_cred_4.png new file mode 100644 index 0000000000..84e70e0347 Binary files /dev/null and b/docs/my-website/img/ui_cred_4.png differ diff --git a/docs/my-website/img/ui_cred_add.png b/docs/my-website/img/ui_cred_add.png new file mode 100644 index 0000000000..7b03270b3c Binary files /dev/null and b/docs/my-website/img/ui_cred_add.png differ diff --git a/docs/my-website/img/ui_request_logs.png b/docs/my-website/img/ui_request_logs.png new file mode 100644 index 0000000000..912123522b Binary files /dev/null and b/docs/my-website/img/ui_request_logs.png differ diff --git a/docs/my-website/img/ui_request_logs_content.png b/docs/my-website/img/ui_request_logs_content.png new file mode 100644 index 0000000000..74673b5553 Binary files /dev/null and b/docs/my-website/img/ui_request_logs_content.png differ diff --git a/docs/my-website/img/use_model_cred.png b/docs/my-website/img/use_model_cred.png new file mode 100644 index 0000000000..35d4248555 Binary files /dev/null and b/docs/my-website/img/use_model_cred.png differ diff --git a/docs/my-website/package-lock.json b/docs/my-website/package-lock.json index b5392b32b4..6c07e67d91 100644 --- a/docs/my-website/package-lock.json +++ b/docs/my-website/package-lock.json @@ -706,12 +706,13 @@ } }, "node_modules/@babel/helpers": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.0.tgz", - "integrity": "sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw==", + "version": "7.26.10", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.10.tgz", + "integrity": "sha512-UPYc3SauzZ3JGgj87GgZ89JVdC5dj0AoetR5Bw6wj4niittNyFh6+eOGonYvJ1ao6B8lEa3Q3klS7ADZ53bc5g==", + "license": "MIT", "dependencies": { - "@babel/template": "^7.25.9", - "@babel/types": "^7.26.0" + "@babel/template": "^7.26.9", + "@babel/types": "^7.26.10" }, "engines": { "node": ">=6.9.0" @@ -796,11 +797,12 @@ } }, "node_modules/@babel/parser": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.3.tgz", - "integrity": "sha512-WJ/CvmY8Mea8iDXo6a7RK2wbmJITT5fN3BEkRuFlxVyNx8jOKIIhmC4fSkTcPcf8JyavbBwIe6OpiCOBXt/IcA==", + "version": "7.26.10", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.10.tgz", + "integrity": "sha512-6aQR2zGE/QFi8JpDLjUZEPYOs7+mhKXm86VaKFiLP35JQwQb6bwUE+XbvkH0EptsYhbNBSUGaUBLKqxH1xSgsA==", + "license": "MIT", "dependencies": { - "@babel/types": "^7.26.3" + "@babel/types": "^7.26.10" }, "bin": { "parser": "bin/babel-parser.js" @@ -2157,9 +2159,10 @@ } }, "node_modules/@babel/runtime-corejs3": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.26.0.tgz", - "integrity": "sha512-YXHu5lN8kJCb1LOb9PgV6pvak43X2h4HvRApcN5SdWeaItQOzfn1hgP6jasD6KWQyJDBxrVmA9o9OivlnNJK/w==", + "version": "7.26.10", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.26.10.tgz", + "integrity": "sha512-uITFQYO68pMEYR46AHgQoyBg7KPPJDAbGn4jUTIRgCFJIp88MIBUianVOplhZDEec07bp9zIyr4Kp0FCyQzmWg==", + "license": "MIT", "dependencies": { "core-js-pure": "^3.30.2", "regenerator-runtime": "^0.14.0" @@ -2169,13 +2172,14 @@ } }, "node_modules/@babel/template": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.9.tgz", - "integrity": "sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.26.9.tgz", + "integrity": "sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA==", + "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.25.9", - "@babel/parser": "^7.25.9", - "@babel/types": "^7.25.9" + "@babel/code-frame": "^7.26.2", + "@babel/parser": "^7.26.9", + "@babel/types": "^7.26.9" }, "engines": { "node": ">=6.9.0" @@ -2199,9 +2203,10 @@ } }, "node_modules/@babel/types": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.3.tgz", - "integrity": "sha512-vN5p+1kl59GVKMvTHt55NzzmYVxprfJD+ql7U9NFIfKCBkYE55LYtS+WtPlaYOyzydrKI8Nezd+aZextrd+FMA==", + "version": "7.26.10", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.10.tgz", + "integrity": "sha512-emqcG3vHrpxUKTrxcblR36dcrcoRDvKmnL/dCL6ZsHaShW80qxCAcNhzQZrpeM765VzEos+xOi4s+r4IXzTwdQ==", + "license": "MIT", "dependencies": { "@babel/helper-string-parser": "^7.25.9", "@babel/helper-validator-identifier": "^7.25.9" diff --git a/docs/my-website/release_notes/v1.63.11-stable/index.md b/docs/my-website/release_notes/v1.63.11-stable/index.md new file mode 100644 index 0000000000..f502420507 --- /dev/null +++ b/docs/my-website/release_notes/v1.63.11-stable/index.md @@ -0,0 +1,180 @@ +--- +title: v1.63.11-stable +slug: v1.63.11-stable +date: 2025-03-15T10:00:00 +authors: + - name: Krrish Dholakia + title: CEO, LiteLLM + url: https://www.linkedin.com/in/krish-d/ + image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI + - name: Ishaan Jaffer + title: CTO, LiteLLM + url: https://www.linkedin.com/in/reffajnaahsi/ + image_url: https://pbs.twimg.com/profile_images/1613813310264340481/lz54oEiB_400x400.jpg + +tags: [credential management, thinking content, responses api, snowflake] +hide_table_of_contents: false +--- + +import Image from '@theme/IdealImage'; + +These are the changes since `v1.63.2-stable`. + +This release is primarily focused on: +- [Beta] Responses API Support +- Snowflake Cortex Support, Amazon Nova Image Generation +- UI - Credential Management, re-use credentials when adding new models +- UI - Test Connection to LLM Provider before adding a model + +:::info + +This release will be live on 03/16/2025 + +::: + + + +## Known Issues +- 🚨 Known issue on Azure OpenAI - We don't recommend upgrading if you use Azure OpenAI. This version failed our Azure OpenAI load test + + +## Docker Run LiteLLM Proxy + +``` +docker run +-e STORE_MODEL_IN_DB=True +-p 4000:4000 +ghcr.io/berriai/litellm:main-v1.63.11-stable +``` + +## Demo Instance + +Here's a Demo Instance to test changes: +- Instance: https://demo.litellm.ai/ +- Login Credentials: + - Username: admin + - Password: sk-1234 + + + +## New Models / Updated Models + +- Image Generation support for Amazon Nova Canvas [Getting Started](https://docs.litellm.ai/docs/providers/bedrock#image-generation) +- Add pricing for Jamba new models [PR](https://github.com/BerriAI/litellm/pull/9032/files) +- Add pricing for Amazon EU models [PR](https://github.com/BerriAI/litellm/pull/9056/files) +- Add Bedrock Deepseek R1 model pricing [PR](https://github.com/BerriAI/litellm/pull/9108/files) +- Update Gemini pricing: Gemma 3, Flash 2 thinking update, LearnLM [PR](https://github.com/BerriAI/litellm/pull/9190/files) +- Mark Cohere Embedding 3 models as Multimodal [PR](https://github.com/BerriAI/litellm/pull/9176/commits/c9a576ce4221fc6e50dc47cdf64ab62736c9da41) +- Add Azure Data Zone pricing [PR](https://github.com/BerriAI/litellm/pull/9185/files#diff-19ad91c53996e178c1921cbacadf6f3bae20cfe062bd03ee6bfffb72f847ee37) + - LiteLLM Tracks cost for `azure/eu` and `azure/us` models + + + +## LLM Translation + + + +1. **New Endpoints** +- [Beta] POST `/responses` API. [Getting Started](https://docs.litellm.ai/docs/response_api) + +2. **New LLM Providers** +- Snowflake Cortex [Getting Started](https://docs.litellm.ai/docs/providers/snowflake) + +3. **New LLM Features** + +- Support OpenRouter `reasoning_content` on streaming [Getting Started](https://docs.litellm.ai/docs/reasoning_content) + +4. **Bug Fixes** + +- OpenAI: Return `code`, `param` and `type` on bad request error [More information on litellm exceptions](https://docs.litellm.ai/docs/exception_mapping) +- Bedrock: Fix converse chunk parsing to only return empty dict on tool use [PR](https://github.com/BerriAI/litellm/pull/9166) +- Bedrock: Support extra_headers [PR](https://github.com/BerriAI/litellm/pull/9113) +- Azure: Fix Function Calling Bug & Update Default API Version to `2025-02-01-preview` [PR](https://github.com/BerriAI/litellm/pull/9191) +- Azure: Fix AI services URL [PR](https://github.com/BerriAI/litellm/pull/9185) +- Vertex AI: Handle HTTP 201 status code in response [PR](https://github.com/BerriAI/litellm/pull/9193) +- Perplexity: Fix incorrect streaming response [PR](https://github.com/BerriAI/litellm/pull/9081) +- Triton: Fix streaming completions bug [PR](https://github.com/BerriAI/litellm/pull/8386) +- Deepgram: Support bytes.IO when handling audio files for transcription [PR](https://github.com/BerriAI/litellm/pull/9071) +- Ollama: Fix "system" role has become unacceptable [PR](https://github.com/BerriAI/litellm/pull/9261) +- All Providers (Streaming): Fix String `data:` stripped from entire content in streamed responses [PR](https://github.com/BerriAI/litellm/pull/9070) + + + +## Spend Tracking Improvements + +1. Support Bedrock converse cache token tracking [Getting Started](https://docs.litellm.ai/docs/completion/prompt_caching) +2. Cost Tracking for Responses API [Getting Started](https://docs.litellm.ai/docs/response_api) +3. Fix Azure Whisper cost tracking [Getting Started](https://docs.litellm.ai/docs/audio_transcription) + + +## UI + +### Re-Use Credentials on UI + +You can now onboard LLM provider credentials on LiteLLM UI. Once these credentials are added you can re-use them when adding new models [Getting Started](https://docs.litellm.ai/docs/proxy/ui_credentials) + + + + +### Test Connections before adding models + +Before adding a model you can test the connection to the LLM provider to verify you have setup your API Base + API Key correctly + + + +### General UI Improvements +1. Add Models Page + - Allow adding Cerebras, Sambanova, Perplexity, Fireworks, Openrouter, TogetherAI Models, Text-Completion OpenAI on Admin UI + - Allow adding EU OpenAI models + - Fix: Instantly show edit + deletes to models +2. Keys Page + - Fix: Instantly show newly created keys on Admin UI (don't require refresh) + - Fix: Allow clicking into Top Keys when showing users Top API Key + - Fix: Allow Filter Keys by Team Alias, Key Alias and Org + - UI Improvements: Show 100 Keys Per Page, Use full height, increase width of key alias +3. Users Page + - Fix: Show correct count of internal user keys on Users Page + - Fix: Metadata not updating in Team UI +4. Logs Page + - UI Improvements: Keep expanded log in focus on LiteLLM UI + - UI Improvements: Minor improvements to logs page + - Fix: Allow internal user to query their own logs + - Allow switching off storing Error Logs in DB [Getting Started](https://docs.litellm.ai/docs/proxy/ui_logs) +5. Sign In/Sign Out + - Fix: Correctly use `PROXY_LOGOUT_URL` when set [Getting Started](https://docs.litellm.ai/docs/proxy/self_serve#setting-custom-logout-urls) + + +## Security + +1. Support for Rotating Master Keys [Getting Started](https://docs.litellm.ai/docs/proxy/master_key_rotations) +2. Fix: Internal User Viewer Permissions, don't allow `internal_user_viewer` role to see `Test Key Page` or `Create Key Button` [More information on role based access controls](https://docs.litellm.ai/docs/proxy/access_control) +3. Emit audit logs on All user + model Create/Update/Delete endpoints [Getting Started](https://docs.litellm.ai/docs/proxy/multiple_admins) +4. JWT + - Support multiple JWT OIDC providers [Getting Started](https://docs.litellm.ai/docs/proxy/token_auth) + - Fix JWT access with Groups not working when team is assigned All Proxy Models access +5. Using K/V pairs in 1 AWS Secret [Getting Started](https://docs.litellm.ai/docs/secret#using-kv-pairs-in-1-aws-secret) + + +## Logging Integrations + +1. Prometheus: Track Azure LLM API latency metric [Getting Started](https://docs.litellm.ai/docs/proxy/prometheus#request-latency-metrics) +2. Athina: Added tags, user_feedback and model_options to additional_keys which can be sent to Athina [Getting Started](https://docs.litellm.ai/docs/observability/athina_integration) + + +## Performance / Reliability improvements + +1. Redis + litellm router - Fix Redis cluster mode for litellm router [PR](https://github.com/BerriAI/litellm/pull/9010) + + +## General Improvements + +1. OpenWebUI Integration - display `thinking` tokens +- Guide on getting started with LiteLLM x OpenWebUI. [Getting Started](https://docs.litellm.ai/docs/tutorials/openweb_ui) +- Display `thinking` tokens on OpenWebUI (Bedrock, Anthropic, Deepseek) [Getting Started](https://docs.litellm.ai/docs/tutorials/openweb_ui#render-thinking-content-on-openweb-ui) + + + + +## Complete Git Diff + +[Here's the complete git diff](https://github.com/BerriAI/litellm/compare/v1.63.2-stable...v1.63.11-stable) \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index cf4f14b202..a92ceb3b05 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -101,7 +101,9 @@ const sidebars = { "proxy/admin_ui_sso", "proxy/self_serve", "proxy/public_teams", - "proxy/custom_sso" + "proxy/custom_sso", + "proxy/ui_credentials", + "proxy/ui_logs" ], }, { @@ -231,6 +233,7 @@ const sidebars = { "providers/sambanova", "providers/custom_llm_server", "providers/petals", + "providers/snowflake" ], }, { @@ -273,7 +276,7 @@ const sidebars = { items: [ { type: "category", - label: "Chat", + label: "/chat/completions", link: { type: "generated-index", title: "Chat Completions", @@ -286,12 +289,13 @@ const sidebars = { "completion/usage", ], }, + "response_api", "text_completion", "embedding/supported_embedding", "anthropic_unified", { type: "category", - label: "Image", + label: "/images", items: [ "image_generation", "image_variations", @@ -299,7 +303,7 @@ const sidebars = { }, { type: "category", - label: "Audio", + label: "/audio", "items": [ "audio_transcription", "text_to_speech", @@ -361,8 +365,12 @@ const sidebars = { ], }, { - type: "doc", - id: "proxy/prompt_management" + type: "category", + label: "[Beta] Prompt Management", + items: [ + "proxy/prompt_management", + "proxy/custom_prompt_management" + ], }, { type: "category", diff --git a/enterprise/enterprise_hooks/aporia_ai.py b/enterprise/enterprise_hooks/aporia_ai.py index d258f00233..2b427bea5c 100644 --- a/enterprise/enterprise_hooks/aporia_ai.py +++ b/enterprise/enterprise_hooks/aporia_ai.py @@ -163,7 +163,7 @@ class AporiaGuardrail(CustomGuardrail): pass - async def async_moderation_hook( ### 👈 KEY CHANGE ### + async def async_moderation_hook( self, data: dict, user_api_key_dict: UserAPIKeyAuth, @@ -173,6 +173,7 @@ class AporiaGuardrail(CustomGuardrail): "image_generation", "moderation", "audio_transcription", + "responses", ], ): from litellm.proxy.common_utils.callback_utils import ( diff --git a/enterprise/enterprise_hooks/google_text_moderation.py b/enterprise/enterprise_hooks/google_text_moderation.py index af5ea35987..fe26a03207 100644 --- a/enterprise/enterprise_hooks/google_text_moderation.py +++ b/enterprise/enterprise_hooks/google_text_moderation.py @@ -94,6 +94,7 @@ class _ENTERPRISE_GoogleTextModeration(CustomLogger): "image_generation", "moderation", "audio_transcription", + "responses", ], ): """ diff --git a/enterprise/enterprise_hooks/llama_guard.py b/enterprise/enterprise_hooks/llama_guard.py index 8abbc996d3..2c53fafa5b 100644 --- a/enterprise/enterprise_hooks/llama_guard.py +++ b/enterprise/enterprise_hooks/llama_guard.py @@ -107,6 +107,7 @@ class _ENTERPRISE_LlamaGuard(CustomLogger): "image_generation", "moderation", "audio_transcription", + "responses", ], ): """ diff --git a/enterprise/enterprise_hooks/llm_guard.py b/enterprise/enterprise_hooks/llm_guard.py index 1b639b8a08..078b8e216e 100644 --- a/enterprise/enterprise_hooks/llm_guard.py +++ b/enterprise/enterprise_hooks/llm_guard.py @@ -126,6 +126,7 @@ class _ENTERPRISE_LLMGuard(CustomLogger): "image_generation", "moderation", "audio_transcription", + "responses", ], ): """ diff --git a/enterprise/enterprise_hooks/openai_moderation.py b/enterprise/enterprise_hooks/openai_moderation.py index 47506a00c4..1db932c853 100644 --- a/enterprise/enterprise_hooks/openai_moderation.py +++ b/enterprise/enterprise_hooks/openai_moderation.py @@ -31,7 +31,7 @@ class _ENTERPRISE_OpenAI_Moderation(CustomLogger): #### CALL HOOKS - proxy only #### - async def async_moderation_hook( ### 👈 KEY CHANGE ### + async def async_moderation_hook( self, data: dict, user_api_key_dict: UserAPIKeyAuth, @@ -41,6 +41,7 @@ class _ENTERPRISE_OpenAI_Moderation(CustomLogger): "image_generation", "moderation", "audio_transcription", + "responses", ], ): text = "" diff --git a/litellm/__init__.py b/litellm/__init__.py index d66707f8b3..762a058c7e 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -8,12 +8,14 @@ import os from typing import Callable, List, Optional, Dict, Union, Any, Literal, get_args from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.caching.caching import Cache, DualCache, RedisCache, InMemoryCache +from litellm.caching.llm_caching_handler import LLMClientCache from litellm.types.llms.bedrock import COHERE_EMBEDDING_INPUT_TYPES from litellm.types.utils import ( ImageObject, BudgetConfig, all_litellm_params, all_litellm_params as _litellm_completion_params, + CredentialItem, ) # maintain backwards compatibility for root param from litellm._logging import ( set_verbose, @@ -180,6 +182,7 @@ cloudflare_api_key: Optional[str] = None baseten_key: Optional[str] = None aleph_alpha_key: Optional[str] = None nlp_cloud_key: Optional[str] = None +snowflake_key: Optional[str] = None common_cloud_provider_auth_params: dict = { "params": ["project", "region_name", "token"], "providers": ["vertex_ai", "bedrock", "watsonx", "azure", "vertex_ai_beta"], @@ -189,15 +192,17 @@ ssl_verify: Union[str, bool] = True ssl_certificate: Optional[str] = None disable_streaming_logging: bool = False disable_add_transform_inline_image_block: bool = False -in_memory_llm_clients_cache: InMemoryCache = InMemoryCache() +in_memory_llm_clients_cache: LLMClientCache = LLMClientCache() safe_memory_mode: bool = False enable_azure_ad_token_refresh: Optional[bool] = False ### DEFAULT AZURE API VERSION ### -AZURE_DEFAULT_API_VERSION = "2024-08-01-preview" # this is updated to the latest +AZURE_DEFAULT_API_VERSION = "2025-02-01-preview" # this is updated to the latest ### DEFAULT WATSONX API VERSION ### WATSONX_DEFAULT_API_VERSION = "2024-03-13" ### COHERE EMBEDDINGS DEFAULT TYPE ### COHERE_DEFAULT_EMBEDDING_INPUT_TYPE: COHERE_EMBEDDING_INPUT_TYPES = "search_document" +### CREDENTIALS ### +credential_list: List[CredentialItem] = [] ### GUARDRAILS ### llamaguard_model_name: Optional[str] = None openai_moderations_model_name: Optional[str] = None @@ -412,6 +417,7 @@ cerebras_models: List = [] galadriel_models: List = [] sambanova_models: List = [] assemblyai_models: List = [] +snowflake_models: List = [] def is_bedrock_pricing_only_model(key: str) -> bool: @@ -565,6 +571,8 @@ def add_known_models(): assemblyai_models.append(key) elif value.get("litellm_provider") == "jina_ai": jina_ai_models.append(key) + elif value.get("litellm_provider") == "snowflake": + snowflake_models.append(key) add_known_models() @@ -594,6 +602,7 @@ ollama_models = ["llama2"] maritalk_models = ["maritalk"] + model_list = ( open_ai_chat_completion_models + open_ai_text_completion_models @@ -638,6 +647,7 @@ model_list = ( + azure_text_models + assemblyai_models + jina_ai_models + + snowflake_models ) model_list_set = set(model_list) @@ -693,6 +703,7 @@ models_by_provider: dict = { "sambanova": sambanova_models, "assemblyai": assemblyai_models, "jina_ai": jina_ai_models, + "snowflake": snowflake_models, } # mapping for those models which have larger equivalents @@ -809,6 +820,7 @@ from .llms.databricks.embed.transformation import DatabricksEmbeddingConfig from .llms.predibase.chat.transformation import PredibaseConfig from .llms.replicate.chat.transformation import ReplicateConfig from .llms.cohere.completion.transformation import CohereTextConfig as CohereConfig +from .llms.snowflake.chat.transformation import SnowflakeConfig from .llms.cohere.rerank.transformation import CohereRerankConfig from .llms.cohere.rerank_v2.transformation import CohereRerankV2Config from .llms.azure_ai.rerank.transformation import AzureAIRerankConfig @@ -899,6 +911,7 @@ from .llms.bedrock.chat.invoke_transformations.base_invoke_transformation import from .llms.bedrock.image.amazon_stability1_transformation import AmazonStabilityConfig from .llms.bedrock.image.amazon_stability3_transformation import AmazonStability3Config +from .llms.bedrock.image.amazon_nova_canvas_transformation import AmazonNovaCanvasConfig from .llms.bedrock.embed.amazon_titan_g1_transformation import AmazonTitanG1Config from .llms.bedrock.embed.amazon_titan_multimodal_transformation import ( AmazonTitanMultimodalEmbeddingG1Config, @@ -921,11 +934,14 @@ from .llms.groq.chat.transformation import GroqChatConfig from .llms.voyage.embedding.transformation import VoyageEmbeddingConfig from .llms.azure_ai.chat.transformation import AzureAIStudioConfig from .llms.mistral.mistral_chat_transformation import MistralConfig +from .llms.openai.responses.transformation import OpenAIResponsesAPIConfig from .llms.openai.chat.o_series_transformation import ( OpenAIOSeriesConfig as OpenAIO1Config, # maintain backwards compatibility OpenAIOSeriesConfig, ) +from .llms.snowflake.chat.transformation import SnowflakeConfig + openaiOSeriesConfig = OpenAIOSeriesConfig() from .llms.openai.chat.gpt_transformation import ( OpenAIGPTConfig, @@ -1010,6 +1026,7 @@ from .batches.main import * from .batch_completion.main import * # type: ignore from .rerank_api.main import * from .llms.anthropic.experimental_pass_through.messages.handler import * +from .responses.main import * from .realtime_api.main import _arealtime from .fine_tuning.main import * from .files.main import * diff --git a/litellm/_redis.py b/litellm/_redis.py index 1e03993c20..5b2f85b1af 100644 --- a/litellm/_redis.py +++ b/litellm/_redis.py @@ -182,9 +182,7 @@ def init_redis_cluster(redis_kwargs) -> redis.RedisCluster: "REDIS_CLUSTER_NODES environment variable is not valid JSON. Please ensure it's properly formatted." ) - verbose_logger.debug( - "init_redis_cluster: startup nodes are being initialized." - ) + verbose_logger.debug("init_redis_cluster: startup nodes are being initialized.") from redis.cluster import ClusterNode args = _get_redis_cluster_kwargs() @@ -307,7 +305,6 @@ def get_redis_async_client( return _init_async_redis_sentinel(redis_kwargs) return async_redis.Redis( - socket_timeout=5, **redis_kwargs, ) diff --git a/litellm/assistants/main.py b/litellm/assistants/main.py index acb37b1e6f..28f4518f15 100644 --- a/litellm/assistants/main.py +++ b/litellm/assistants/main.py @@ -15,6 +15,7 @@ import litellm from litellm.types.router import GenericLiteLLMParams from litellm.utils import ( exception_type, + get_litellm_params, get_llm_provider, get_secret, supports_httpx_timeout, @@ -86,6 +87,7 @@ def get_assistants( optional_params = GenericLiteLLMParams( api_key=api_key, api_base=api_base, api_version=api_version, **kwargs ) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -169,6 +171,7 @@ def get_assistants( max_retries=optional_params.max_retries, client=client, aget_assistants=aget_assistants, # type: ignore + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -270,6 +273,7 @@ def create_assistants( optional_params = GenericLiteLLMParams( api_key=api_key, api_base=api_base, api_version=api_version, **kwargs ) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -371,6 +375,7 @@ def create_assistants( client=client, async_create_assistants=async_create_assistants, create_assistant_data=create_assistant_data, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -445,6 +450,8 @@ def delete_assistant( api_key=api_key, api_base=api_base, api_version=api_version, **kwargs ) + litellm_params_dict = get_litellm_params(**kwargs) + async_delete_assistants: Optional[bool] = kwargs.pop( "async_delete_assistants", None ) @@ -544,6 +551,7 @@ def delete_assistant( max_retries=optional_params.max_retries, client=client, async_delete_assistants=async_delete_assistants, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -639,6 +647,7 @@ def create_thread( """ acreate_thread = kwargs.get("acreate_thread", None) optional_params = GenericLiteLLMParams(**kwargs) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -731,6 +740,7 @@ def create_thread( max_retries=optional_params.max_retries, client=client, acreate_thread=acreate_thread, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -795,7 +805,7 @@ def get_thread( """Get the thread object, given a thread_id""" aget_thread = kwargs.pop("aget_thread", None) optional_params = GenericLiteLLMParams(**kwargs) - + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 # set timeout for 10 minutes by default @@ -884,6 +894,7 @@ def get_thread( max_retries=optional_params.max_retries, client=client, aget_thread=aget_thread, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -972,6 +983,7 @@ def add_message( _message_data = MessageData( role=role, content=content, attachments=attachments, metadata=metadata ) + litellm_params_dict = get_litellm_params(**kwargs) optional_params = GenericLiteLLMParams(**kwargs) message_data = get_optional_params_add_message( @@ -1068,6 +1080,7 @@ def add_message( max_retries=optional_params.max_retries, client=client, a_add_message=a_add_message, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -1139,6 +1152,7 @@ def get_messages( ) -> SyncCursorPage[OpenAIMessage]: aget_messages = kwargs.pop("aget_messages", None) optional_params = GenericLiteLLMParams(**kwargs) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -1225,6 +1239,7 @@ def get_messages( max_retries=optional_params.max_retries, client=client, aget_messages=aget_messages, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -1337,6 +1352,7 @@ def run_thread( """Run a given thread + assistant.""" arun_thread = kwargs.pop("arun_thread", None) optional_params = GenericLiteLLMParams(**kwargs) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -1437,6 +1453,7 @@ def run_thread( max_retries=optional_params.max_retries, client=client, arun_thread=arun_thread, + litellm_params=litellm_params_dict, ) # type: ignore else: raise litellm.exceptions.BadRequestError( diff --git a/litellm/batches/main.py b/litellm/batches/main.py index 2f4800043c..1ddcafce4c 100644 --- a/litellm/batches/main.py +++ b/litellm/batches/main.py @@ -111,6 +111,7 @@ def create_batch( proxy_server_request = kwargs.get("proxy_server_request", None) model_info = kwargs.get("model_info", None) _is_async = kwargs.pop("acreate_batch", False) is True + litellm_params = get_litellm_params(**kwargs) litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj", None) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -217,6 +218,7 @@ def create_batch( timeout=timeout, max_retries=optional_params.max_retries, create_batch_data=_create_batch_request, + litellm_params=litellm_params, ) elif custom_llm_provider == "vertex_ai": api_base = optional_params.api_base or "" @@ -320,15 +322,12 @@ def retrieve_batch( """ try: optional_params = GenericLiteLLMParams(**kwargs) - litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj", None) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 litellm_params = get_litellm_params( custom_llm_provider=custom_llm_provider, - litellm_call_id=kwargs.get("litellm_call_id", None), - litellm_trace_id=kwargs.get("litellm_trace_id"), - litellm_metadata=kwargs.get("litellm_metadata"), + **kwargs, ) litellm_logging_obj.update_environment_variables( model=None, @@ -424,6 +423,7 @@ def retrieve_batch( timeout=timeout, max_retries=optional_params.max_retries, retrieve_batch_data=_retrieve_batch_request, + litellm_params=litellm_params, ) elif custom_llm_provider == "vertex_ai": api_base = optional_params.api_base or "" @@ -526,6 +526,10 @@ def list_batches( try: # set API KEY optional_params = GenericLiteLLMParams(**kwargs) + litellm_params = get_litellm_params( + custom_llm_provider=custom_llm_provider, + **kwargs, + ) api_key = ( optional_params.api_key or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there @@ -603,6 +607,7 @@ def list_batches( api_version=api_version, timeout=timeout, max_retries=optional_params.max_retries, + litellm_params=litellm_params, ) else: raise litellm.exceptions.BadRequestError( @@ -678,6 +683,10 @@ def cancel_batch( """ try: optional_params = GenericLiteLLMParams(**kwargs) + litellm_params = get_litellm_params( + custom_llm_provider=custom_llm_provider, + **kwargs, + ) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 # set timeout for 10 minutes by default @@ -765,6 +774,7 @@ def cancel_batch( timeout=timeout, max_retries=optional_params.max_retries, cancel_batch_data=_cancel_batch_request, + litellm_params=litellm_params, ) else: raise litellm.exceptions.BadRequestError( diff --git a/litellm/caching/caching_handler.py b/litellm/caching/caching_handler.py index 2a958c9eee..09fabf1c12 100644 --- a/litellm/caching/caching_handler.py +++ b/litellm/caching/caching_handler.py @@ -790,6 +790,7 @@ class LLMCachingHandler: - Else append the chunk to self.async_streaming_chunks """ + complete_streaming_response: Optional[ Union[ModelResponse, TextCompletionResponse] ] = _assemble_complete_response_from_streaming_chunks( @@ -800,7 +801,6 @@ class LLMCachingHandler: streaming_chunks=self.async_streaming_chunks, is_async=True, ) - # if a complete_streaming_response is assembled, add it to the cache if complete_streaming_response is not None: await self.async_set_cache( diff --git a/litellm/caching/llm_caching_handler.py b/litellm/caching/llm_caching_handler.py new file mode 100644 index 0000000000..429634b7b1 --- /dev/null +++ b/litellm/caching/llm_caching_handler.py @@ -0,0 +1,40 @@ +""" +Add the event loop to the cache key, to prevent event loop closed errors. +""" + +import asyncio + +from .in_memory_cache import InMemoryCache + + +class LLMClientCache(InMemoryCache): + + def update_cache_key_with_event_loop(self, key): + """ + Add the event loop to the cache key, to prevent event loop closed errors. + If none, use the key as is. + """ + try: + event_loop = asyncio.get_event_loop() + stringified_event_loop = str(id(event_loop)) + return f"{key}-{stringified_event_loop}" + except Exception: # handle no current event loop + return key + + def set_cache(self, key, value, **kwargs): + key = self.update_cache_key_with_event_loop(key) + return super().set_cache(key, value, **kwargs) + + async def async_set_cache(self, key, value, **kwargs): + key = self.update_cache_key_with_event_loop(key) + return await super().async_set_cache(key, value, **kwargs) + + def get_cache(self, key, **kwargs): + key = self.update_cache_key_with_event_loop(key) + + return super().get_cache(key, **kwargs) + + async def async_get_cache(self, key, **kwargs): + key = self.update_cache_key_with_event_loop(key) + + return await super().async_get_cache(key, **kwargs) diff --git a/litellm/caching/redis_cache.py b/litellm/caching/redis_cache.py index 66245e7476..0571ac9f15 100644 --- a/litellm/caching/redis_cache.py +++ b/litellm/caching/redis_cache.py @@ -54,6 +54,7 @@ class RedisCache(BaseCache): redis_flush_size: Optional[int] = 100, namespace: Optional[str] = None, startup_nodes: Optional[List] = None, # for redis-cluster + socket_timeout: Optional[float] = 5.0, # default 5 second timeout **kwargs, ): @@ -70,6 +71,9 @@ class RedisCache(BaseCache): redis_kwargs["password"] = password if startup_nodes is not None: redis_kwargs["startup_nodes"] = startup_nodes + if socket_timeout is not None: + redis_kwargs["socket_timeout"] = socket_timeout + ### HEALTH MONITORING OBJECT ### if kwargs.get("service_logger_obj", None) is not None and isinstance( kwargs["service_logger_obj"], ServiceLogging @@ -556,6 +560,7 @@ class RedisCache(BaseCache): ## LOGGING ## end_time = time.time() _duration = end_time - start_time + asyncio.create_task( self.service_logger_obj.async_service_success_hook( service=ServiceTypes.REDIS, diff --git a/litellm/constants.py b/litellm/constants.py index 0288c45e40..eb59858d43 100644 --- a/litellm/constants.py +++ b/litellm/constants.py @@ -7,6 +7,7 @@ DEFAULT_MAX_RETRIES = 2 DEFAULT_FAILURE_THRESHOLD_PERCENT = ( 0.5 # default cooldown a deployment if 50% of requests fail in a given minute ) +DEFAULT_REDIS_SYNC_INTERVAL = 1 DEFAULT_COOLDOWN_TIME_SECONDS = 5 DEFAULT_REPLICATE_POLLING_RETRIES = 5 DEFAULT_REPLICATE_POLLING_DELAY_SECONDS = 1 @@ -18,6 +19,7 @@ SINGLE_DEPLOYMENT_TRAFFIC_FAILURE_THRESHOLD = 1000 # Minimum number of requests REPEATED_STREAMING_CHUNK_LIMIT = 100 # catch if model starts looping the same chunk while streaming. Uses high default to prevent false positives. #### Networking settings #### request_timeout: float = 6000 # time in seconds +STREAM_SSE_DONE_STRING: str = "[DONE]" LITELLM_CHAT_PROVIDERS = [ "openai", diff --git a/litellm/cost_calculator.py b/litellm/cost_calculator.py index 1d10fa1f9e..e17a94c87e 100644 --- a/litellm/cost_calculator.py +++ b/litellm/cost_calculator.py @@ -44,7 +44,12 @@ from litellm.llms.vertex_ai.cost_calculator import cost_router as google_cost_ro from litellm.llms.vertex_ai.image_generation.cost_calculator import ( cost_calculator as vertex_ai_image_cost_calculator, ) -from litellm.types.llms.openai import HttpxBinaryResponseContent +from litellm.responses.utils import ResponseAPILoggingUtils +from litellm.types.llms.openai import ( + HttpxBinaryResponseContent, + ResponseAPIUsage, + ResponsesAPIResponse, +) from litellm.types.rerank import RerankBilledUnits, RerankResponse from litellm.types.utils import ( CallTypesLiteral, @@ -464,6 +469,13 @@ def _get_usage_object( return usage_obj +def _is_known_usage_objects(usage_obj): + """Returns True if the usage obj is a known Usage type""" + return isinstance(usage_obj, litellm.Usage) or isinstance( + usage_obj, ResponseAPIUsage + ) + + def _infer_call_type( call_type: Optional[CallTypesLiteral], completion_response: Any ) -> Optional[CallTypesLiteral]: @@ -573,9 +585,7 @@ def completion_cost( # noqa: PLR0915 base_model=base_model, ) - verbose_logger.debug( - f"completion_response _select_model_name_for_cost_calc: {model}" - ) + verbose_logger.info(f"selected model name for cost calculation: {model}") if completion_response is not None and ( isinstance(completion_response, BaseModel) @@ -587,8 +597,8 @@ def completion_cost( # noqa: PLR0915 ) else: usage_obj = getattr(completion_response, "usage", {}) - if isinstance(usage_obj, BaseModel) and not isinstance( - usage_obj, litellm.Usage + if isinstance(usage_obj, BaseModel) and not _is_known_usage_objects( + usage_obj=usage_obj ): setattr( completion_response, @@ -601,6 +611,14 @@ def completion_cost( # noqa: PLR0915 _usage = usage_obj.model_dump() else: _usage = usage_obj + + if ResponseAPILoggingUtils._is_response_api_usage(_usage): + _usage = ( + ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage( + _usage + ).model_dump() + ) + # get input/output tokens from completion_response prompt_tokens = _usage.get("prompt_tokens", 0) completion_tokens = _usage.get("completion_tokens", 0) @@ -790,6 +808,23 @@ def completion_cost( # noqa: PLR0915 raise e +def get_response_cost_from_hidden_params( + hidden_params: Union[dict, BaseModel] +) -> Optional[float]: + if isinstance(hidden_params, BaseModel): + _hidden_params_dict = hidden_params.model_dump() + else: + _hidden_params_dict = hidden_params + + additional_headers = _hidden_params_dict.get("additional_headers", {}) + if additional_headers and "x-litellm-response-cost" in additional_headers: + response_cost = additional_headers["x-litellm-response-cost"] + if response_cost is None: + return None + return float(additional_headers["x-litellm-response-cost"]) + return None + + def response_cost_calculator( response_object: Union[ ModelResponse, @@ -799,6 +834,7 @@ def response_cost_calculator( TextCompletionResponse, HttpxBinaryResponseContent, RerankResponse, + ResponsesAPIResponse, ], model: str, custom_llm_provider: Optional[str], @@ -825,7 +861,7 @@ def response_cost_calculator( base_model: Optional[str] = None, custom_pricing: Optional[bool] = None, prompt: str = "", -) -> Optional[float]: +) -> float: """ Returns - float or None: cost of response @@ -837,6 +873,14 @@ def response_cost_calculator( else: if isinstance(response_object, BaseModel): response_object._hidden_params["optional_params"] = optional_params + + if hasattr(response_object, "_hidden_params"): + provider_response_cost = get_response_cost_from_hidden_params( + response_object._hidden_params + ) + if provider_response_cost is not None: + return provider_response_cost + response_cost = completion_cost( completion_response=response_object, model=model, diff --git a/litellm/files/main.py b/litellm/files/main.py index e49066e84b..db9a11ced1 100644 --- a/litellm/files/main.py +++ b/litellm/files/main.py @@ -25,7 +25,7 @@ from litellm.types.llms.openai import ( HttpxBinaryResponseContent, ) from litellm.types.router import * -from litellm.utils import supports_httpx_timeout +from litellm.utils import get_litellm_params, supports_httpx_timeout ####### ENVIRONMENT VARIABLES ################### openai_files_instance = OpenAIFilesAPI() @@ -546,6 +546,7 @@ def create_file( try: _is_async = kwargs.pop("acreate_file", False) is True optional_params = GenericLiteLLMParams(**kwargs) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -630,6 +631,7 @@ def create_file( timeout=timeout, max_retries=optional_params.max_retries, create_file_data=_create_file_request, + litellm_params=litellm_params_dict, ) elif custom_llm_provider == "vertex_ai": api_base = optional_params.api_base or "" diff --git a/litellm/integrations/arize/_utils.py b/litellm/integrations/arize/_utils.py index 9921d47aff..487304cce4 100644 --- a/litellm/integrations/arize/_utils.py +++ b/litellm/integrations/arize/_utils.py @@ -1,31 +1,37 @@ -import json from typing import TYPE_CHECKING, Any, Optional from litellm._logging import verbose_logger +from litellm.litellm_core_utils.safe_json_dumps import safe_dumps from litellm.types.utils import StandardLoggingPayload if TYPE_CHECKING: from opentelemetry.trace import Span as _Span + Span = _Span else: Span = Any def set_attributes(span: Span, kwargs, response_obj): - from openinference.semconv.trace import ( + from litellm.integrations._types.open_inference import ( MessageAttributes, OpenInferenceSpanKindValues, SpanAttributes, ) try: - litellm_params = kwargs.get("litellm_params", {}) or {} + standard_logging_payload: Optional[StandardLoggingPayload] = kwargs.get( + "standard_logging_object" + ) ############################################# ############ LLM CALL METADATA ############## ############################################# - metadata = litellm_params.get("metadata", {}) or {} - span.set_attribute(SpanAttributes.METADATA, str(metadata)) + + if standard_logging_payload and ( + metadata := standard_logging_payload["metadata"] + ): + span.set_attribute(SpanAttributes.METADATA, safe_dumps(metadata)) ############################################# ########## LLM Request Attributes ########### @@ -62,13 +68,12 @@ def set_attributes(span: Span, kwargs, response_obj): msg.get("content", ""), ) - standard_logging_payload: Optional[StandardLoggingPayload] = kwargs.get( - "standard_logging_object" - ) - if standard_logging_payload and (model_params := standard_logging_payload["model_parameters"]): + if standard_logging_payload and ( + model_params := standard_logging_payload["model_parameters"] + ): # The Generative AI Provider: Azure, OpenAI, etc. span.set_attribute( - SpanAttributes.LLM_INVOCATION_PARAMETERS, json.dumps(model_params) + SpanAttributes.LLM_INVOCATION_PARAMETERS, safe_dumps(model_params) ) if model_params.get("user"): @@ -80,7 +85,7 @@ def set_attributes(span: Span, kwargs, response_obj): ########## LLM Response Attributes ########## # https://docs.arize.com/arize/large-language-models/tracing/semantic-conventions ############################################# - if hasattr(response_obj, 'get'): + if hasattr(response_obj, "get"): for choice in response_obj.get("choices", []): response_message = choice.get("message", {}) span.set_attribute( diff --git a/litellm/integrations/arize/arize.py b/litellm/integrations/arize/arize.py index 652957e1ee..7a0fb785a7 100644 --- a/litellm/integrations/arize/arize.py +++ b/litellm/integrations/arize/arize.py @@ -3,31 +3,38 @@ arize AI is OTEL compatible this file has Arize ai specific helper functions """ -import os -from typing import TYPE_CHECKING, Any +import os +from datetime import datetime +from typing import TYPE_CHECKING, Any, Optional, Union + from litellm.integrations.arize import _utils +from litellm.integrations.opentelemetry import OpenTelemetry from litellm.types.integrations.arize import ArizeConfig +from litellm.types.services import ServiceLoggerPayload if TYPE_CHECKING: - from litellm.types.integrations.arize import Protocol as _Protocol from opentelemetry.trace import Span as _Span + from litellm.types.integrations.arize import Protocol as _Protocol + Protocol = _Protocol Span = _Span else: Protocol = Any Span = Any - -class ArizeLogger: +class ArizeLogger(OpenTelemetry): + + def set_attributes(self, span: Span, kwargs, response_obj: Optional[Any]): + ArizeLogger.set_arize_attributes(span, kwargs, response_obj) + return @staticmethod def set_arize_attributes(span: Span, kwargs, response_obj): _utils.set_attributes(span, kwargs, response_obj) return - @staticmethod def get_arize_config() -> ArizeConfig: @@ -43,11 +50,6 @@ class ArizeLogger: space_key = os.environ.get("ARIZE_SPACE_KEY") api_key = os.environ.get("ARIZE_API_KEY") - if not space_key: - raise ValueError("ARIZE_SPACE_KEY not found in environment variables") - if not api_key: - raise ValueError("ARIZE_API_KEY not found in environment variables") - grpc_endpoint = os.environ.get("ARIZE_ENDPOINT") http_endpoint = os.environ.get("ARIZE_HTTP_ENDPOINT") @@ -55,13 +57,13 @@ class ArizeLogger: protocol: Protocol = "otlp_grpc" if grpc_endpoint: - protocol="otlp_grpc" - endpoint=grpc_endpoint + protocol = "otlp_grpc" + endpoint = grpc_endpoint elif http_endpoint: - protocol="otlp_http" - endpoint=http_endpoint + protocol = "otlp_http" + endpoint = http_endpoint else: - protocol="otlp_grpc" + protocol = "otlp_grpc" endpoint = "https://otlp.arize.com/v1" return ArizeConfig( @@ -71,4 +73,33 @@ class ArizeLogger: endpoint=endpoint, ) + async def async_service_success_hook( + self, + payload: ServiceLoggerPayload, + parent_otel_span: Optional[Span] = None, + start_time: Optional[Union[datetime, float]] = None, + end_time: Optional[Union[datetime, float]] = None, + event_metadata: Optional[dict] = None, + ): + """Arize is used mainly for LLM I/O tracing, sending router+caching metrics adds bloat to arize logs""" + pass + async def async_service_failure_hook( + self, + payload: ServiceLoggerPayload, + error: Optional[str] = "", + parent_otel_span: Optional[Span] = None, + start_time: Optional[Union[datetime, float]] = None, + end_time: Optional[Union[float, datetime]] = None, + event_metadata: Optional[dict] = None, + ): + """Arize is used mainly for LLM I/O tracing, sending router+caching metrics adds bloat to arize logs""" + pass + + def create_litellm_proxy_request_started_span( + self, + start_time: datetime, + headers: dict, + ): + """Arize is used mainly for LLM I/O tracing, sending Proxy Server Request adds bloat to arize logs""" + pass diff --git a/litellm/integrations/custom_logger.py b/litellm/integrations/custom_logger.py index 457c0537bd..6f1ec88d01 100644 --- a/litellm/integrations/custom_logger.py +++ b/litellm/integrations/custom_logger.py @@ -1,7 +1,16 @@ #### What this does #### # On success, logs events to Promptlayer import traceback -from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple, Union +from typing import ( + TYPE_CHECKING, + Any, + AsyncGenerator, + List, + Literal, + Optional, + Tuple, + Union, +) from pydantic import BaseModel @@ -14,6 +23,7 @@ from litellm.types.utils import ( EmbeddingResponse, ImageResponse, ModelResponse, + ModelResponseStream, StandardCallbackDynamicParams, StandardLoggingPayload, ) @@ -239,6 +249,7 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac "image_generation", "moderation", "audio_transcription", + "responses", ], ) -> Any: pass @@ -250,6 +261,15 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac ) -> Any: pass + async def async_post_call_streaming_iterator_hook( + self, + user_api_key_dict: UserAPIKeyAuth, + response: Any, + request_data: dict, + ) -> AsyncGenerator[ModelResponseStream, None]: + async for item in response: + yield item + #### SINGLE-USE #### - https://docs.litellm.ai/docs/observability/custom_callback#using-your-custom-callback-function def log_input_event(self, model, messages, kwargs, print_verbose, callback_func): diff --git a/litellm/integrations/custom_prompt_management.py b/litellm/integrations/custom_prompt_management.py new file mode 100644 index 0000000000..5b34ef0c34 --- /dev/null +++ b/litellm/integrations/custom_prompt_management.py @@ -0,0 +1,49 @@ +from typing import List, Optional, Tuple + +from litellm.integrations.custom_logger import CustomLogger +from litellm.integrations.prompt_management_base import ( + PromptManagementBase, + PromptManagementClient, +) +from litellm.types.llms.openai import AllMessageValues +from litellm.types.utils import StandardCallbackDynamicParams + + +class CustomPromptManagement(CustomLogger, PromptManagementBase): + def get_chat_completion_prompt( + self, + model: str, + messages: List[AllMessageValues], + non_default_params: dict, + prompt_id: str, + prompt_variables: Optional[dict], + dynamic_callback_params: StandardCallbackDynamicParams, + ) -> Tuple[str, List[AllMessageValues], dict]: + """ + Returns: + - model: str - the model to use (can be pulled from prompt management tool) + - messages: List[AllMessageValues] - the messages to use (can be pulled from prompt management tool) + - non_default_params: dict - update with any optional params (e.g. temperature, max_tokens, etc.) to use (can be pulled from prompt management tool) + """ + return model, messages, non_default_params + + @property + def integration_name(self) -> str: + return "custom-prompt-management" + + def should_run_prompt_management( + self, + prompt_id: str, + dynamic_callback_params: StandardCallbackDynamicParams, + ) -> bool: + return True + + def _compile_prompt_helper( + self, + prompt_id: str, + prompt_variables: Optional[dict], + dynamic_callback_params: StandardCallbackDynamicParams, + ) -> PromptManagementClient: + raise NotImplementedError( + "Custom prompt management does not support compile prompt helper" + ) diff --git a/litellm/integrations/opentelemetry.py b/litellm/integrations/opentelemetry.py index 0ec7358037..1572eb81f5 100644 --- a/litellm/integrations/opentelemetry.py +++ b/litellm/integrations/opentelemetry.py @@ -10,6 +10,7 @@ from litellm.types.services import ServiceLoggerPayload from litellm.types.utils import ( ChatCompletionMessageToolCall, Function, + StandardCallbackDynamicParams, StandardLoggingPayload, ) @@ -311,6 +312,8 @@ class OpenTelemetry(CustomLogger): ) _parent_context, parent_otel_span = self._get_span_context(kwargs) + self._add_dynamic_span_processor_if_needed(kwargs) + # Span 1: Requst sent to litellm SDK span = self.tracer.start_span( name=self._get_span_name(kwargs), @@ -341,6 +344,45 @@ class OpenTelemetry(CustomLogger): if parent_otel_span is not None: parent_otel_span.end(end_time=self._to_ns(datetime.now())) + def _add_dynamic_span_processor_if_needed(self, kwargs): + """ + Helper method to add a span processor with dynamic headers if needed. + + This allows for per-request configuration of telemetry exporters by + extracting headers from standard_callback_dynamic_params. + """ + from opentelemetry import trace + + standard_callback_dynamic_params: Optional[StandardCallbackDynamicParams] = ( + kwargs.get("standard_callback_dynamic_params") + ) + if not standard_callback_dynamic_params: + return + + # Extract headers from dynamic params + dynamic_headers = {} + + # Handle Arize headers + if standard_callback_dynamic_params.get("arize_space_key"): + dynamic_headers["space_key"] = standard_callback_dynamic_params.get( + "arize_space_key" + ) + if standard_callback_dynamic_params.get("arize_api_key"): + dynamic_headers["api_key"] = standard_callback_dynamic_params.get( + "arize_api_key" + ) + + # Only create a span processor if we have headers to use + if len(dynamic_headers) > 0: + from opentelemetry.sdk.trace import TracerProvider + + provider = trace.get_tracer_provider() + if isinstance(provider, TracerProvider): + span_processor = self._get_span_processor( + dynamic_headers=dynamic_headers + ) + provider.add_span_processor(span_processor) + def _handle_failure(self, kwargs, response_obj, start_time, end_time): from opentelemetry.trace import Status, StatusCode @@ -443,14 +485,12 @@ class OpenTelemetry(CustomLogger): self, span: Span, kwargs, response_obj: Optional[Any] ): try: - if self.callback_name == "arize": - from litellm.integrations.arize.arize import ArizeLogger - ArizeLogger.set_arize_attributes(span, kwargs, response_obj) - return - elif self.callback_name == "arize_phoenix": + if self.callback_name == "arize_phoenix": from litellm.integrations.arize.arize_phoenix import ArizePhoenixLogger - ArizePhoenixLogger.set_arize_phoenix_attributes(span, kwargs, response_obj) + ArizePhoenixLogger.set_arize_phoenix_attributes( + span, kwargs, response_obj + ) return elif self.callback_name == "langtrace": from litellm.integrations.langtrace import LangtraceAttributes @@ -779,7 +819,7 @@ class OpenTelemetry(CustomLogger): carrier = {"traceparent": traceparent} return TraceContextTextMapPropagator().extract(carrier=carrier), None - def _get_span_processor(self): + def _get_span_processor(self, dynamic_headers: Optional[dict] = None): from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter as OTLPSpanExporterGRPC, ) @@ -799,10 +839,9 @@ class OpenTelemetry(CustomLogger): self.OTEL_ENDPOINT, self.OTEL_HEADERS, ) - _split_otel_headers = {} - if self.OTEL_HEADERS is not None and isinstance(self.OTEL_HEADERS, str): - _split_otel_headers = self.OTEL_HEADERS.split("=") - _split_otel_headers = {_split_otel_headers[0]: _split_otel_headers[1]} + _split_otel_headers = OpenTelemetry._get_headers_dictionary( + headers=dynamic_headers or self.OTEL_HEADERS + ) if isinstance(self.OTEL_EXPORTER, SpanExporter): verbose_logger.debug( @@ -844,6 +883,25 @@ class OpenTelemetry(CustomLogger): ) return BatchSpanProcessor(ConsoleSpanExporter()) + @staticmethod + def _get_headers_dictionary(headers: Optional[Union[str, dict]]) -> Dict[str, str]: + """ + Convert a string or dictionary of headers into a dictionary of headers. + """ + _split_otel_headers: Dict[str, str] = {} + if headers: + if isinstance(headers, str): + # when passed HEADERS="x-honeycomb-team=B85YgLm96******" + # Split only on first '=' occurrence + parts = headers.split("=", 1) + if len(parts) == 2: + _split_otel_headers = {parts[0]: parts[1]} + else: + _split_otel_headers = {} + elif isinstance(headers, dict): + _split_otel_headers = headers + return _split_otel_headers + async def async_management_endpoint_success_hook( self, logging_payload: ManagementEndpointLoggingPayload, @@ -948,3 +1006,18 @@ class OpenTelemetry(CustomLogger): ) management_endpoint_span.set_status(Status(StatusCode.ERROR)) management_endpoint_span.end(end_time=_end_time_ns) + + def create_litellm_proxy_request_started_span( + self, + start_time: datetime, + headers: dict, + ) -> Optional[Span]: + """ + Create a span for the received proxy server request. + """ + return self.tracer.start_span( + name="Received Proxy Server Request", + start_time=self._to_ns(start_time), + context=self.get_traceparent_from_header(headers=headers), + kind=self.span_kind.SERVER, + ) diff --git a/litellm/litellm_core_utils/credential_accessor.py b/litellm/litellm_core_utils/credential_accessor.py new file mode 100644 index 0000000000..d87dcc116b --- /dev/null +++ b/litellm/litellm_core_utils/credential_accessor.py @@ -0,0 +1,34 @@ +"""Utils for accessing credentials.""" + +from typing import List + +import litellm +from litellm.types.utils import CredentialItem + + +class CredentialAccessor: + @staticmethod + def get_credential_values(credential_name: str) -> dict: + """Safe accessor for credentials.""" + if not litellm.credential_list: + return {} + for credential in litellm.credential_list: + if credential.credential_name == credential_name: + return credential.credential_values.copy() + return {} + + @staticmethod + def upsert_credentials(credentials: List[CredentialItem]): + """Add a credential to the list of credentials.""" + + credential_names = [cred.credential_name for cred in litellm.credential_list] + + for credential in credentials: + if credential.credential_name in credential_names: + # Find and replace the existing credential in the list + for i, existing_cred in enumerate(litellm.credential_list): + if existing_cred.credential_name == credential.credential_name: + litellm.credential_list[i] = credential + break + else: + litellm.credential_list.append(credential) diff --git a/litellm/litellm_core_utils/exception_mapping_utils.py b/litellm/litellm_core_utils/exception_mapping_utils.py index 7a0cffab7b..54d87cc42e 100644 --- a/litellm/litellm_core_utils/exception_mapping_utils.py +++ b/litellm/litellm_core_utils/exception_mapping_utils.py @@ -127,7 +127,7 @@ def exception_type( # type: ignore # noqa: PLR0915 completion_kwargs={}, extra_kwargs={}, ): - + """Maps an LLM Provider Exception to OpenAI Exception Format""" if any( isinstance(original_exception, exc_type) for exc_type in litellm.LITELLM_EXCEPTION_TYPES diff --git a/litellm/litellm_core_utils/get_litellm_params.py b/litellm/litellm_core_utils/get_litellm_params.py index cf62375f33..4f2f43f0de 100644 --- a/litellm/litellm_core_utils/get_litellm_params.py +++ b/litellm/litellm_core_utils/get_litellm_params.py @@ -58,6 +58,8 @@ def get_litellm_params( async_call: Optional[bool] = None, ssl_verify: Optional[bool] = None, merge_reasoning_content_in_choices: Optional[bool] = None, + api_version: Optional[str] = None, + max_retries: Optional[int] = None, **kwargs, ) -> dict: litellm_params = { @@ -99,5 +101,14 @@ def get_litellm_params( "async_call": async_call, "ssl_verify": ssl_verify, "merge_reasoning_content_in_choices": merge_reasoning_content_in_choices, + "api_version": api_version, + "azure_ad_token": kwargs.get("azure_ad_token"), + "tenant_id": kwargs.get("tenant_id"), + "client_id": kwargs.get("client_id"), + "client_secret": kwargs.get("client_secret"), + "azure_username": kwargs.get("azure_username"), + "azure_password": kwargs.get("azure_password"), + "max_retries": max_retries, + "timeout": kwargs.get("timeout"), } return litellm_params diff --git a/litellm/litellm_core_utils/get_llm_provider_logic.py b/litellm/litellm_core_utils/get_llm_provider_logic.py index a64e7dd700..037351d0e6 100644 --- a/litellm/litellm_core_utils/get_llm_provider_logic.py +++ b/litellm/litellm_core_utils/get_llm_provider_logic.py @@ -129,17 +129,15 @@ def get_llm_provider( # noqa: PLR0915 model, custom_llm_provider ) - if custom_llm_provider: - if ( - model.split("/")[0] == custom_llm_provider - ): # handle scenario where model="azure/*" and custom_llm_provider="azure" - model = model.replace("{}/".format(custom_llm_provider), "") - - return model, custom_llm_provider, dynamic_api_key, api_base + if custom_llm_provider and ( + model.split("/")[0] != custom_llm_provider + ): # handle scenario where model="azure/*" and custom_llm_provider="azure" + model = custom_llm_provider + "/" + model if api_key and api_key.startswith("os.environ/"): dynamic_api_key = get_secret_str(api_key) # check if llm provider part of model name + if ( model.split("/", 1)[0] in litellm.provider_list and model.split("/", 1)[0] not in litellm.model_list_set @@ -571,6 +569,14 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915 or "https://api.galadriel.com/v1" ) # type: ignore dynamic_api_key = api_key or get_secret_str("GALADRIEL_API_KEY") + elif custom_llm_provider == "snowflake": + api_base = ( + api_base + or get_secret_str("SNOWFLAKE_API_BASE") + or f"https://{get_secret('SNOWFLAKE_ACCOUNT_ID')}.snowflakecomputing.com/api/v2/cortex/inference:complete" + ) # type: ignore + dynamic_api_key = api_key or get_secret_str("SNOWFLAKE_JWT") + if api_base is not None and not isinstance(api_base, str): raise Exception("api base needs to be a string. api_base={}".format(api_base)) if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index a3d9a57a49..3e694220a5 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -29,6 +29,7 @@ from litellm.batches.batch_utils import _handle_completed_batch from litellm.caching.caching import DualCache, InMemoryCache from litellm.caching.caching_handler import LLMCachingHandler from litellm.cost_calculator import _select_model_name_for_cost_calc +from litellm.integrations.arize.arize import ArizeLogger from litellm.integrations.custom_guardrail import CustomGuardrail from litellm.integrations.custom_logger import CustomLogger from litellm.integrations.mlflow import MlflowLogger @@ -39,11 +40,14 @@ from litellm.litellm_core_utils.redact_messages import ( redact_message_input_output_from_custom_logger, redact_message_input_output_from_logging, ) +from litellm.responses.utils import ResponseAPILoggingUtils from litellm.types.llms.openai import ( AllMessageValues, Batch, FineTuningJob, HttpxBinaryResponseContent, + ResponseCompletedEvent, + ResponsesAPIResponse, ) from litellm.types.rerank import RerankResponse from litellm.types.router import SPECIAL_MODEL_INFO_PARAMS @@ -73,11 +77,11 @@ from litellm.types.utils import ( from litellm.utils import _get_base_model_from_metadata, executor, print_verbose from ..integrations.argilla import ArgillaLogger -from ..integrations.arize.arize import ArizeLogger from ..integrations.arize.arize_phoenix import ArizePhoenixLogger from ..integrations.athina import AthinaLogger from ..integrations.azure_storage.azure_storage import AzureBlobStorageLogger from ..integrations.braintrust_logging import BraintrustLogger +from ..integrations.custom_prompt_management import CustomPromptManagement from ..integrations.datadog.datadog import DataDogLogger from ..integrations.datadog.datadog_llm_obs import DataDogLLMObsLogger from ..integrations.dynamodb import DyanmoDBLogger @@ -107,7 +111,6 @@ from .exception_mapping_utils import _get_response_headers from .initialize_dynamic_callback_params import ( initialize_standard_callback_dynamic_params as _initialize_standard_callback_dynamic_params, ) -from .logging_utils import _assemble_complete_response_from_streaming_chunks from .specialty_caches.dynamic_logging_cache import DynamicLoggingCache try: @@ -427,34 +430,58 @@ class Logging(LiteLLMLoggingBaseClass): prompt_variables: Optional[dict], ) -> Tuple[str, List[AllMessageValues], dict]: - for ( - custom_logger_compatible_callback - ) in litellm._known_custom_logger_compatible_callbacks: - if model.startswith(custom_logger_compatible_callback): + custom_logger = self.get_custom_logger_for_prompt_management(model) + if custom_logger: + model, messages, non_default_params = ( + custom_logger.get_chat_completion_prompt( + model=model, + messages=messages, + non_default_params=non_default_params, + prompt_id=prompt_id, + prompt_variables=prompt_variables, + dynamic_callback_params=self.standard_callback_dynamic_params, + ) + ) + self.messages = messages + return model, messages, non_default_params + + def get_custom_logger_for_prompt_management( + self, model: str + ) -> Optional[CustomLogger]: + """ + Get a custom logger for prompt management based on model name or available callbacks. + + Args: + model: The model name to check for prompt management integration + + Returns: + A CustomLogger instance if one is found, None otherwise + """ + # First check if model starts with a known custom logger compatible callback + for callback_name in litellm._known_custom_logger_compatible_callbacks: + if model.startswith(callback_name): custom_logger = _init_custom_logger_compatible_class( - logging_integration=custom_logger_compatible_callback, + logging_integration=callback_name, internal_usage_cache=None, llm_router=None, ) + if custom_logger is not None: + self.model_call_details["prompt_integration"] = model.split("/")[0] + return custom_logger - if custom_logger is None: - continue - old_name = model + # Then check for any registered CustomPromptManagement loggers + prompt_management_loggers = ( + litellm.logging_callback_manager.get_custom_loggers_for_type( + callback_type=CustomPromptManagement + ) + ) - model, messages, non_default_params = ( - custom_logger.get_chat_completion_prompt( - model=model, - messages=messages, - non_default_params=non_default_params, - prompt_id=prompt_id, - prompt_variables=prompt_variables, - dynamic_callback_params=self.standard_callback_dynamic_params, - ) - ) - self.model_call_details["prompt_integration"] = old_name.split("/")[0] - self.messages = messages + if prompt_management_loggers: + logger = prompt_management_loggers[0] + self.model_call_details["prompt_integration"] = logger.__class__.__name__ + return logger - return model, messages, non_default_params + return None def _get_raw_request_body(self, data: Optional[Union[dict, str]]) -> dict: if data is None: @@ -716,25 +743,9 @@ class Logging(LiteLLMLoggingBaseClass): Masks the headers of the request sent from LiteLLM """ - sensitive_keywords = [ - "authorization", - "token", - "key", - "secret", - ] - return { - k: ( - (v[:-44] + "*" * 44) - if (isinstance(v, str) and len(v) > 44) - else "*****" - ) - for k, v in headers.items() - if not ignore_sensitive_headers - or not any( - sensitive_keyword in k.lower() - for sensitive_keyword in sensitive_keywords - ) - } + return _get_masked_values( + headers, ignore_sensitive_values=ignore_sensitive_headers + ) def post_call( self, original_response, input=None, api_key=None, additional_args={} @@ -851,6 +862,8 @@ class Logging(LiteLLMLoggingBaseClass): RerankResponse, Batch, FineTuningJob, + ResponsesAPIResponse, + ResponseCompletedEvent, ], cache_hit: Optional[bool] = None, ) -> Optional[float]: @@ -1000,7 +1013,7 @@ class Logging(LiteLLMLoggingBaseClass): standard_logging_object is None and result is not None and self.stream is not True - ): # handle streaming separately + ): if ( isinstance(result, ModelResponse) or isinstance(result, ModelResponseStream) @@ -1012,6 +1025,7 @@ class Logging(LiteLLMLoggingBaseClass): or isinstance(result, RerankResponse) or isinstance(result, FineTuningJob) or isinstance(result, LiteLLMBatch) + or isinstance(result, ResponsesAPIResponse) ): ## HIDDEN PARAMS ## hidden_params = getattr(result, "_hidden_params", {}) @@ -1111,7 +1125,7 @@ class Logging(LiteLLMLoggingBaseClass): ## BUILD COMPLETE STREAMED RESPONSE complete_streaming_response: Optional[ - Union[ModelResponse, TextCompletionResponse] + Union[ModelResponse, TextCompletionResponse, ResponsesAPIResponse] ] = None if "complete_streaming_response" in self.model_call_details: return # break out of this. @@ -1633,7 +1647,7 @@ class Logging(LiteLLMLoggingBaseClass): if "async_complete_streaming_response" in self.model_call_details: return # break out of this. complete_streaming_response: Optional[ - Union[ModelResponse, TextCompletionResponse] + Union[ModelResponse, TextCompletionResponse, ResponsesAPIResponse] ] = self._get_assembled_streaming_response( result=result, start_time=start_time, @@ -2343,28 +2357,24 @@ class Logging(LiteLLMLoggingBaseClass): def _get_assembled_streaming_response( self, - result: Union[ModelResponse, TextCompletionResponse, ModelResponseStream, Any], + result: Union[ + ModelResponse, + TextCompletionResponse, + ModelResponseStream, + ResponseCompletedEvent, + Any, + ], start_time: datetime.datetime, end_time: datetime.datetime, is_async: bool, streaming_chunks: List[Any], - ) -> Optional[Union[ModelResponse, TextCompletionResponse]]: + ) -> Optional[Union[ModelResponse, TextCompletionResponse, ResponsesAPIResponse]]: if isinstance(result, ModelResponse): return result elif isinstance(result, TextCompletionResponse): return result - elif isinstance(result, ModelResponseStream): - complete_streaming_response: Optional[ - Union[ModelResponse, TextCompletionResponse] - ] = _assemble_complete_response_from_streaming_chunks( - result=result, - start_time=start_time, - end_time=end_time, - request_kwargs=self.model_call_details, - streaming_chunks=streaming_chunks, - is_async=is_async, - ) - return complete_streaming_response + elif isinstance(result, ResponseCompletedEvent): + return result.response return None def _handle_anthropic_messages_response_logging(self, result: Any) -> ModelResponse: @@ -2399,6 +2409,58 @@ class Logging(LiteLLMLoggingBaseClass): return result +def _get_masked_values( + sensitive_object: dict, + ignore_sensitive_values: bool = False, + mask_all_values: bool = False, + unmasked_length: int = 4, + number_of_asterisks: Optional[int] = 4, +) -> dict: + """ + Internal debugging helper function + + Masks the headers of the request sent from LiteLLM + + Args: + masked_length: Optional length for the masked portion (number of *). If set, will use exactly this many * + regardless of original string length. The total length will be unmasked_length + masked_length. + """ + sensitive_keywords = [ + "authorization", + "token", + "key", + "secret", + ] + return { + k: ( + ( + v[: unmasked_length // 2] + + "*" * number_of_asterisks + + v[-unmasked_length // 2 :] + ) + if ( + isinstance(v, str) + and len(v) > unmasked_length + and number_of_asterisks is not None + ) + else ( + ( + v[: unmasked_length // 2] + + "*" * (len(v) - unmasked_length) + + v[-unmasked_length // 2 :] + ) + if (isinstance(v, str) and len(v) > unmasked_length) + else "*****" + ) + ) + for k, v in sensitive_object.items() + if not ignore_sensitive_values + or not any( + sensitive_keyword in k.lower() for sensitive_keyword in sensitive_keywords + ) + } + + def set_callbacks(callback_list, function_id=None): # noqa: PLR0915 """ Globally sets the callback client @@ -2621,13 +2683,13 @@ def _init_custom_logger_compatible_class( # noqa: PLR0915 ) for callback in _in_memory_loggers: if ( - isinstance(callback, OpenTelemetry) + isinstance(callback, ArizeLogger) and callback.callback_name == "arize" ): return callback # type: ignore - _otel_logger = OpenTelemetry(config=otel_config, callback_name="arize") - _in_memory_loggers.append(_otel_logger) - return _otel_logger # type: ignore + _arize_otel_logger = ArizeLogger(config=otel_config, callback_name="arize") + _in_memory_loggers.append(_arize_otel_logger) + return _arize_otel_logger # type: ignore elif logging_integration == "arize_phoenix": from litellm.integrations.opentelemetry import ( OpenTelemetry, @@ -2860,15 +2922,13 @@ def get_custom_logger_compatible_class( # noqa: PLR0915 if isinstance(callback, OpenTelemetry): return callback elif logging_integration == "arize": - from litellm.integrations.opentelemetry import OpenTelemetry - if "ARIZE_SPACE_KEY" not in os.environ: raise ValueError("ARIZE_SPACE_KEY not found in environment variables") if "ARIZE_API_KEY" not in os.environ: raise ValueError("ARIZE_API_KEY not found in environment variables") for callback in _in_memory_loggers: if ( - isinstance(callback, OpenTelemetry) + isinstance(callback, ArizeLogger) and callback.callback_name == "arize" ): return callback @@ -3111,6 +3171,12 @@ class StandardLoggingPayloadSetup: elif isinstance(usage, Usage): return usage elif isinstance(usage, dict): + if ResponseAPILoggingUtils._is_response_api_usage(usage): + return ( + ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage( + usage + ) + ) return Usage(**usage) raise ValueError(f"usage is required, got={usage} of type {type(usage)}") @@ -3215,6 +3281,7 @@ class StandardLoggingPayloadSetup: additional_headers=None, litellm_overhead_time_ms=None, batch_models=None, + litellm_model_name=None, ) if hidden_params is not None: for key in StandardLoggingHiddenParams.__annotations__.keys(): @@ -3329,6 +3396,7 @@ def get_standard_logging_object_payload( response_cost=None, litellm_overhead_time_ms=None, batch_models=None, + litellm_model_name=None, ) ) @@ -3614,6 +3682,7 @@ def create_dummy_standard_logging_payload() -> StandardLoggingPayload: additional_headers=None, litellm_overhead_time_ms=None, batch_models=None, + litellm_model_name=None, ) # Convert numeric values to appropriate types diff --git a/litellm/litellm_core_utils/llm_response_utils/response_metadata.py b/litellm/litellm_core_utils/llm_response_utils/response_metadata.py index 03595e27a4..84c80174f9 100644 --- a/litellm/litellm_core_utils/llm_response_utils/response_metadata.py +++ b/litellm/litellm_core_utils/llm_response_utils/response_metadata.py @@ -44,6 +44,7 @@ class ResponseMetadata: "additional_headers": process_response_headers( self._get_value_from_hidden_params("additional_headers") or {} ), + "litellm_model_name": model, } self._update_hidden_params(new_params) diff --git a/litellm/litellm_core_utils/logging_callback_manager.py b/litellm/litellm_core_utils/logging_callback_manager.py index a20e826c43..c57a2401b7 100644 --- a/litellm/litellm_core_utils/logging_callback_manager.py +++ b/litellm/litellm_core_utils/logging_callback_manager.py @@ -1,4 +1,4 @@ -from typing import Callable, List, Set, Union +from typing import Callable, List, Set, Type, Union import litellm from litellm._logging import verbose_logger @@ -86,21 +86,20 @@ class LoggingCallbackManager: callback=callback, parent_list=litellm._async_failure_callback ) - def remove_callback_from_list_by_object( - self, callback_list, obj - ): + def remove_callback_from_list_by_object(self, callback_list, obj): """ Remove callbacks that are methods of a particular object (e.g., router cleanup) """ - if not isinstance(callback_list, list): # Not list -> do nothing + if not isinstance(callback_list, list): # Not list -> do nothing return - - remove_list=[c for c in callback_list if hasattr(c, '__self__') and c.__self__ == obj] + + remove_list = [ + c for c in callback_list if hasattr(c, "__self__") and c.__self__ == obj + ] for c in remove_list: callback_list.remove(c) - def _add_string_callback_to_list( self, callback: str, parent_list: List[Union[CustomLogger, Callable, str]] ): @@ -254,3 +253,11 @@ class LoggingCallbackManager: ): matched_callbacks.add(callback) return matched_callbacks + + def get_custom_loggers_for_type( + self, callback_type: Type[CustomLogger] + ) -> List[CustomLogger]: + """ + Get all custom loggers that are instances of the given class type + """ + return [c for c in self._get_all_callbacks() if isinstance(c, callback_type)] diff --git a/litellm/litellm_core_utils/logging_utils.py b/litellm/litellm_core_utils/logging_utils.py index 6782435af6..c7512ea146 100644 --- a/litellm/litellm_core_utils/logging_utils.py +++ b/litellm/litellm_core_utils/logging_utils.py @@ -77,6 +77,10 @@ def _assemble_complete_response_from_streaming_chunks( complete_streaming_response: Optional[ Union[ModelResponse, TextCompletionResponse] ] = None + + if isinstance(result, ModelResponse): + return result + if result.choices[0].finish_reason is not None: # if it's the last chunk streaming_chunks.append(result) try: diff --git a/litellm/litellm_core_utils/prompt_templates/common_utils.py b/litellm/litellm_core_utils/prompt_templates/common_utils.py index 6ce8faa5c6..c8745f5119 100644 --- a/litellm/litellm_core_utils/prompt_templates/common_utils.py +++ b/litellm/litellm_core_utils/prompt_templates/common_utils.py @@ -77,6 +77,16 @@ def convert_content_list_to_str(message: AllMessageValues) -> str: return texts +def get_str_from_messages(messages: List[AllMessageValues]) -> str: + """ + Converts a list of messages to a string + """ + text = "" + for message in messages: + text += convert_content_list_to_str(message=message) + return text + + def is_non_content_values_set(message: AllMessageValues) -> bool: ignore_keys = ["content", "role", "name"] return any( diff --git a/litellm/litellm_core_utils/prompt_templates/factory.py b/litellm/litellm_core_utils/prompt_templates/factory.py index df7aa2cbd0..28e09d7ac8 100644 --- a/litellm/litellm_core_utils/prompt_templates/factory.py +++ b/litellm/litellm_core_utils/prompt_templates/factory.py @@ -166,148 +166,108 @@ def convert_to_ollama_image(openai_image_url: str): ) +def _handle_ollama_system_message( + messages: list, prompt: str, msg_i: int +) -> Tuple[str, int]: + system_content_str = "" + ## MERGE CONSECUTIVE SYSTEM CONTENT ## + while msg_i < len(messages) and messages[msg_i]["role"] == "system": + msg_content = convert_content_list_to_str(messages[msg_i]) + system_content_str += msg_content + + msg_i += 1 + + return system_content_str, msg_i + + def ollama_pt( - model, messages + model: str, messages: list ) -> Union[ str, OllamaVisionModelObject ]: # https://github.com/ollama/ollama/blob/af4cf55884ac54b9e637cd71dadfe9b7a5685877/docs/modelfile.md#template - if "instruct" in model: - prompt = custom_prompt( - role_dict={ - "system": {"pre_message": "### System:\n", "post_message": "\n"}, - "user": { - "pre_message": "### User:\n", - "post_message": "\n", - }, - "assistant": { - "pre_message": "### Response:\n", - "post_message": "\n", - }, - }, - final_prompt_value="### Response:", - messages=messages, + user_message_types = {"user", "tool", "function"} + msg_i = 0 + images = [] + prompt = "" + while msg_i < len(messages): + init_msg_i = msg_i + user_content_str = "" + ## MERGE CONSECUTIVE USER CONTENT ## + while msg_i < len(messages) and messages[msg_i]["role"] in user_message_types: + msg_content = messages[msg_i].get("content") + if msg_content: + if isinstance(msg_content, list): + for m in msg_content: + if m.get("type", "") == "image_url": + if isinstance(m["image_url"], str): + images.append(m["image_url"]) + elif isinstance(m["image_url"], dict): + images.append(m["image_url"]["url"]) + elif m.get("type", "") == "text": + user_content_str += m["text"] + else: + # Tool message content will always be a string + user_content_str += msg_content + + msg_i += 1 + + if user_content_str: + prompt += f"### User:\n{user_content_str}\n\n" + + system_content_str, msg_i = _handle_ollama_system_message( + messages, prompt, msg_i ) - else: - user_message_types = {"user", "tool", "function"} - msg_i = 0 - images = [] - prompt = "" - while msg_i < len(messages): - init_msg_i = msg_i - user_content_str = "" - ## MERGE CONSECUTIVE USER CONTENT ## - while ( - msg_i < len(messages) and messages[msg_i]["role"] in user_message_types - ): - msg_content = messages[msg_i].get("content") - if msg_content: - if isinstance(msg_content, list): - for m in msg_content: - if m.get("type", "") == "image_url": - if isinstance(m["image_url"], str): - images.append(m["image_url"]) - elif isinstance(m["image_url"], dict): - images.append(m["image_url"]["url"]) - elif m.get("type", "") == "text": - user_content_str += m["text"] - else: - # Tool message content will always be a string - user_content_str += msg_content + if system_content_str: + prompt += f"### System:\n{system_content_str}\n\n" - msg_i += 1 + assistant_content_str = "" + ## MERGE CONSECUTIVE ASSISTANT CONTENT ## + while msg_i < len(messages) and messages[msg_i]["role"] == "assistant": + assistant_content_str += convert_content_list_to_str(messages[msg_i]) + msg_i += 1 - if user_content_str: - prompt += f"### User:\n{user_content_str}\n\n" + tool_calls = messages[msg_i].get("tool_calls") + ollama_tool_calls = [] + if tool_calls: + for call in tool_calls: + call_id: str = call["id"] + function_name: str = call["function"]["name"] + arguments = json.loads(call["function"]["arguments"]) - assistant_content_str = "" - ## MERGE CONSECUTIVE ASSISTANT CONTENT ## - while msg_i < len(messages) and messages[msg_i]["role"] == "assistant": - msg_content = messages[msg_i].get("content") - if msg_content: - if isinstance(msg_content, list): - for m in msg_content: - if m.get("type", "") == "text": - assistant_content_str += m["text"] - elif isinstance(msg_content, str): - # Tool message content will always be a string - assistant_content_str += msg_content - - tool_calls = messages[msg_i].get("tool_calls") - ollama_tool_calls = [] - if tool_calls: - for call in tool_calls: - call_id: str = call["id"] - function_name: str = call["function"]["name"] - arguments = json.loads(call["function"]["arguments"]) - - ollama_tool_calls.append( - { - "id": call_id, - "type": "function", - "function": { - "name": function_name, - "arguments": arguments, - }, - } - ) - - if ollama_tool_calls: - assistant_content_str += ( - f"Tool Calls: {json.dumps(ollama_tool_calls, indent=2)}" + ollama_tool_calls.append( + { + "id": call_id, + "type": "function", + "function": { + "name": function_name, + "arguments": arguments, + }, + } ) - msg_i += 1 - - if assistant_content_str: - prompt += f"### Assistant:\n{assistant_content_str}\n\n" - - if msg_i == init_msg_i: # prevent infinite loops - raise litellm.BadRequestError( - message=BAD_MESSAGE_ERROR_STR + f"passed in {messages[msg_i]}", - model=model, - llm_provider="ollama", + if ollama_tool_calls: + assistant_content_str += ( + f"Tool Calls: {json.dumps(ollama_tool_calls, indent=2)}" ) - # prompt = "" - # images = [] - # for message in messages: - # if isinstance(message["content"], str): - # prompt += message["content"] - # elif isinstance(message["content"], list): - # # see https://docs.litellm.ai/docs/providers/openai#openai-vision-models - # for element in message["content"]: - # if isinstance(element, dict): - # if element["type"] == "text": - # prompt += element["text"] - # elif element["type"] == "image_url": - # base64_image = convert_to_ollama_image( - # element["image_url"]["url"] - # ) - # images.append(base64_image) - # if "tool_calls" in message: - # tool_calls = [] + msg_i += 1 - # for call in message["tool_calls"]: - # call_id: str = call["id"] - # function_name: str = call["function"]["name"] - # arguments = json.loads(call["function"]["arguments"]) + if assistant_content_str: + prompt += f"### Assistant:\n{assistant_content_str}\n\n" - # tool_calls.append( - # { - # "id": call_id, - # "type": "function", - # "function": {"name": function_name, "arguments": arguments}, - # } - # ) + if msg_i == init_msg_i: # prevent infinite loops + raise litellm.BadRequestError( + message=BAD_MESSAGE_ERROR_STR + f"passed in {messages[msg_i]}", + model=model, + llm_provider="ollama", + ) - # prompt += f"### Assistant:\nTool Calls: {json.dumps(tool_calls, indent=2)}\n\n" + response_dict: OllamaVisionModelObject = { + "prompt": prompt, + "images": images, + } - # elif "tool_call_id" in message: - # prompt += f"### User:\n{message['content']}\n\n" - - return {"prompt": prompt, "images": images} - - return prompt + return response_dict def mistral_instruct_pt(messages): diff --git a/litellm/litellm_core_utils/streaming_chunk_builder_utils.py b/litellm/litellm_core_utils/streaming_chunk_builder_utils.py index e78b10c289..7a5ee3e41e 100644 --- a/litellm/litellm_core_utils/streaming_chunk_builder_utils.py +++ b/litellm/litellm_core_utils/streaming_chunk_builder_utils.py @@ -13,6 +13,7 @@ from litellm.types.utils import ( Function, FunctionCall, ModelResponse, + ModelResponseStream, PromptTokensDetails, Usage, ) @@ -319,8 +320,12 @@ class ChunkProcessor: usage_chunk: Optional[Usage] = None if "usage" in chunk: usage_chunk = chunk["usage"] - elif isinstance(chunk, ModelResponse) and hasattr(chunk, "_hidden_params"): + elif ( + isinstance(chunk, ModelResponse) + or isinstance(chunk, ModelResponseStream) + ) and hasattr(chunk, "_hidden_params"): usage_chunk = chunk._hidden_params.get("usage", None) + if usage_chunk is not None: usage_chunk_dict = self._usage_chunk_calculation_helper(usage_chunk) if ( diff --git a/litellm/litellm_core_utils/streaming_handler.py b/litellm/litellm_core_utils/streaming_handler.py index 5d5a8bf256..56e64d1859 100644 --- a/litellm/litellm_core_utils/streaming_handler.py +++ b/litellm/litellm_core_utils/streaming_handler.py @@ -898,6 +898,8 @@ class CustomStreamWrapper: return model_response # Default - return StopIteration + if hasattr(model_response, "usage"): + self.chunks.append(model_response) raise StopIteration # flush any remaining holding chunk if len(self.holding_chunk) > 0: @@ -1470,6 +1472,24 @@ class CustomStreamWrapper: """ self.logging_loop = loop + def cache_streaming_response(self, processed_chunk, cache_hit: bool): + """ + Caches the streaming response + """ + if not cache_hit and self.logging_obj._llm_caching_handler is not None: + self.logging_obj._llm_caching_handler._sync_add_streaming_response_to_cache( + processed_chunk + ) + + async def async_cache_streaming_response(self, processed_chunk, cache_hit: bool): + """ + Caches the streaming response + """ + if not cache_hit and self.logging_obj._llm_caching_handler is not None: + await self.logging_obj._llm_caching_handler._add_streaming_response_to_cache( + processed_chunk + ) + def run_success_logging_and_cache_storage(self, processed_chunk, cache_hit: bool): """ Runs success logging in a thread and adds the response to the cache @@ -1501,12 +1521,6 @@ class CustomStreamWrapper: ## SYNC LOGGING self.logging_obj.success_handler(processed_chunk, None, None, cache_hit) - ## Sync store in cache - if self.logging_obj._llm_caching_handler is not None: - self.logging_obj._llm_caching_handler._sync_add_streaming_response_to_cache( - processed_chunk - ) - def finish_reason_handler(self): model_response = self.model_response_creator() _finish_reason = self.received_finish_reason or self.intermittent_finish_reason @@ -1553,10 +1567,11 @@ class CustomStreamWrapper: if response is None: continue ## LOGGING - threading.Thread( - target=self.run_success_logging_and_cache_storage, - args=(response, cache_hit), - ).start() # log response + executor.submit( + self.run_success_logging_and_cache_storage, + response, + cache_hit, + ) # log response choice = response.choices[0] if isinstance(choice, StreamingChoices): self.response_uptil_now += choice.delta.get("content", "") or "" @@ -1600,13 +1615,27 @@ class CustomStreamWrapper: "usage", getattr(complete_streaming_response, "usage"), ) - - ## LOGGING - threading.Thread( - target=self.logging_obj.success_handler, - args=(response, None, None, cache_hit), - ).start() # log response - + self.cache_streaming_response( + processed_chunk=complete_streaming_response.model_copy( + deep=True + ), + cache_hit=cache_hit, + ) + executor.submit( + self.logging_obj.success_handler, + complete_streaming_response.model_copy(deep=True), + None, + None, + cache_hit, + ) + else: + executor.submit( + self.logging_obj.success_handler, + response, + None, + None, + cache_hit, + ) if self.sent_stream_usage is False and self.send_stream_usage is True: self.sent_stream_usage = True return response @@ -1618,10 +1647,11 @@ class CustomStreamWrapper: usage = calculate_total_usage(chunks=self.chunks) processed_chunk._hidden_params["usage"] = usage ## LOGGING - threading.Thread( - target=self.run_success_logging_and_cache_storage, - args=(processed_chunk, cache_hit), - ).start() # log response + executor.submit( + self.run_success_logging_and_cache_storage, + processed_chunk, + cache_hit, + ) # log response return processed_chunk except Exception as e: traceback_exception = traceback.format_exc() @@ -1690,13 +1720,6 @@ class CustomStreamWrapper: if processed_chunk is None: continue - if self.logging_obj._llm_caching_handler is not None: - asyncio.create_task( - self.logging_obj._llm_caching_handler._add_streaming_response_to_cache( - processed_chunk=cast(ModelResponse, processed_chunk), - ) - ) - choice = processed_chunk.choices[0] if isinstance(choice, StreamingChoices): self.response_uptil_now += choice.delta.get("content", "") or "" @@ -1767,6 +1790,14 @@ class CustomStreamWrapper: "usage", getattr(complete_streaming_response, "usage"), ) + asyncio.create_task( + self.async_cache_streaming_response( + processed_chunk=complete_streaming_response.model_copy( + deep=True + ), + cache_hit=cache_hit, + ) + ) if self.sent_stream_usage is False and self.send_stream_usage is True: self.sent_stream_usage = True return response diff --git a/litellm/llms/aiohttp_openai/chat/transformation.py b/litellm/llms/aiohttp_openai/chat/transformation.py index 625704dbea..212db1853b 100644 --- a/litellm/llms/aiohttp_openai/chat/transformation.py +++ b/litellm/llms/aiohttp_openai/chat/transformation.py @@ -29,6 +29,7 @@ class AiohttpOpenAIChatConfig(OpenAILikeChatConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/azure/assistants.py b/litellm/llms/azure/assistants.py index 2f67b5506f..2e8c78b259 100644 --- a/litellm/llms/azure/assistants.py +++ b/litellm/llms/azure/assistants.py @@ -1,4 +1,4 @@ -from typing import Coroutine, Iterable, Literal, Optional, Union +from typing import Any, Coroutine, Dict, Iterable, Literal, Optional, Union import httpx from openai import AsyncAzureOpenAI, AzureOpenAI @@ -18,10 +18,10 @@ from ...types.llms.openai import ( SyncCursorPage, Thread, ) -from ..base import BaseLLM +from .common_utils import BaseAzureLLM -class AzureAssistantsAPI(BaseLLM): +class AzureAssistantsAPI(BaseAzureLLM): def __init__(self) -> None: super().__init__() @@ -34,18 +34,18 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AzureOpenAI] = None, + litellm_params: Optional[dict] = None, ) -> AzureOpenAI: - received_args = locals() if client is None: - data = {} - for k, v in received_args.items(): - if k == "self" or k == "client": - pass - elif k == "api_base" and v is not None: - data["azure_endpoint"] = v - elif v is not None: - data[k] = v - azure_openai_client = AzureOpenAI(**data) # type: ignore + azure_client_params = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + api_base=api_base, + model_name="", + api_version=api_version, + is_async=False, + ) + azure_openai_client = AzureOpenAI(**azure_client_params) # type: ignore else: azure_openai_client = client @@ -60,18 +60,19 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI] = None, + litellm_params: Optional[dict] = None, ) -> AsyncAzureOpenAI: - received_args = locals() if client is None: - data = {} - for k, v in received_args.items(): - if k == "self" or k == "client": - pass - elif k == "api_base" and v is not None: - data["azure_endpoint"] = v - elif v is not None: - data[k] = v - azure_openai_client = AsyncAzureOpenAI(**data) + azure_client_params = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + api_base=api_base, + model_name="", + api_version=api_version, + is_async=True, + ) + + azure_openai_client = AsyncAzureOpenAI(**azure_client_params) # azure_openai_client = AsyncAzureOpenAI(**data) # type: ignore else: azure_openai_client = client @@ -89,6 +90,7 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], + litellm_params: Optional[dict] = None, ) -> AsyncCursorPage[Assistant]: azure_openai_client = self.async_get_azure_client( api_key=api_key, @@ -98,6 +100,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = await azure_openai_client.beta.assistants.list() @@ -146,6 +149,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client=None, aget_assistants=None, + litellm_params: Optional[dict] = None, ): if aget_assistants is not None and aget_assistants is True: return self.async_get_assistants( @@ -156,6 +160,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) azure_openai_client = self.get_azure_client( api_key=api_key, @@ -165,6 +170,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries=max_retries, client=client, api_version=api_version, + litellm_params=litellm_params, ) response = azure_openai_client.beta.assistants.list() @@ -184,6 +190,7 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI] = None, + litellm_params: Optional[dict] = None, ) -> OpenAIMessage: openai_client = self.async_get_azure_client( api_key=api_key, @@ -193,6 +200,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) thread_message: OpenAIMessage = await openai_client.beta.threads.messages.create( # type: ignore @@ -222,6 +230,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], a_add_message: Literal[True], + litellm_params: Optional[dict] = None, ) -> Coroutine[None, None, OpenAIMessage]: ... @@ -238,6 +247,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AzureOpenAI], a_add_message: Optional[Literal[False]], + litellm_params: Optional[dict] = None, ) -> OpenAIMessage: ... @@ -255,6 +265,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client=None, a_add_message: Optional[bool] = None, + litellm_params: Optional[dict] = None, ): if a_add_message is not None and a_add_message is True: return self.a_add_message( @@ -267,6 +278,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) openai_client = self.get_azure_client( api_key=api_key, @@ -300,6 +312,7 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI] = None, + litellm_params: Optional[dict] = None, ) -> AsyncCursorPage[OpenAIMessage]: openai_client = self.async_get_azure_client( api_key=api_key, @@ -309,6 +322,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = await openai_client.beta.threads.messages.list(thread_id=thread_id) @@ -329,6 +343,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], aget_messages: Literal[True], + litellm_params: Optional[dict] = None, ) -> Coroutine[None, None, AsyncCursorPage[OpenAIMessage]]: ... @@ -344,6 +359,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AzureOpenAI], aget_messages: Optional[Literal[False]], + litellm_params: Optional[dict] = None, ) -> SyncCursorPage[OpenAIMessage]: ... @@ -360,6 +376,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client=None, aget_messages=None, + litellm_params: Optional[dict] = None, ): if aget_messages is not None and aget_messages is True: return self.async_get_messages( @@ -371,6 +388,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) openai_client = self.get_azure_client( api_key=api_key, @@ -380,6 +398,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = openai_client.beta.threads.messages.list(thread_id=thread_id) @@ -399,6 +418,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], messages: Optional[Iterable[OpenAICreateThreadParamsMessage]], + litellm_params: Optional[dict] = None, ) -> Thread: openai_client = self.async_get_azure_client( api_key=api_key, @@ -408,6 +428,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) data = {} @@ -435,6 +456,7 @@ class AzureAssistantsAPI(BaseLLM): messages: Optional[Iterable[OpenAICreateThreadParamsMessage]], client: Optional[AsyncAzureOpenAI], acreate_thread: Literal[True], + litellm_params: Optional[dict] = None, ) -> Coroutine[None, None, Thread]: ... @@ -451,6 +473,7 @@ class AzureAssistantsAPI(BaseLLM): messages: Optional[Iterable[OpenAICreateThreadParamsMessage]], client: Optional[AzureOpenAI], acreate_thread: Optional[Literal[False]], + litellm_params: Optional[dict] = None, ) -> Thread: ... @@ -468,6 +491,7 @@ class AzureAssistantsAPI(BaseLLM): messages: Optional[Iterable[OpenAICreateThreadParamsMessage]], client=None, acreate_thread=None, + litellm_params: Optional[dict] = None, ): """ Here's an example: @@ -490,6 +514,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries=max_retries, client=client, messages=messages, + litellm_params=litellm_params, ) azure_openai_client = self.get_azure_client( api_key=api_key, @@ -499,6 +524,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) data = {} @@ -521,6 +547,7 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], + litellm_params: Optional[dict] = None, ) -> Thread: openai_client = self.async_get_azure_client( api_key=api_key, @@ -530,6 +557,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = await openai_client.beta.threads.retrieve(thread_id=thread_id) @@ -550,6 +578,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], aget_thread: Literal[True], + litellm_params: Optional[dict] = None, ) -> Coroutine[None, None, Thread]: ... @@ -565,6 +594,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AzureOpenAI], aget_thread: Optional[Literal[False]], + litellm_params: Optional[dict] = None, ) -> Thread: ... @@ -581,6 +611,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client=None, aget_thread=None, + litellm_params: Optional[dict] = None, ): if aget_thread is not None and aget_thread is True: return self.async_get_thread( @@ -592,6 +623,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) openai_client = self.get_azure_client( api_key=api_key, @@ -601,6 +633,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = openai_client.beta.threads.retrieve(thread_id=thread_id) @@ -618,7 +651,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -629,6 +662,7 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], + litellm_params: Optional[dict] = None, ) -> Run: openai_client = self.async_get_azure_client( api_key=api_key, @@ -638,6 +672,7 @@ class AzureAssistantsAPI(BaseLLM): api_version=api_version, azure_ad_token=azure_ad_token, client=client, + litellm_params=litellm_params, ) response = await openai_client.beta.threads.runs.create_and_poll( # type: ignore @@ -645,7 +680,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id=assistant_id, additional_instructions=additional_instructions, instructions=instructions, - metadata=metadata, + metadata=metadata, # type: ignore model=model, tools=tools, ) @@ -659,12 +694,13 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], tools: Optional[Iterable[AssistantToolParam]], event_handler: Optional[AssistantEventHandler], + litellm_params: Optional[dict] = None, ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]: - data = { + data: Dict[str, Any] = { "thread_id": thread_id, "assistant_id": assistant_id, "additional_instructions": additional_instructions, @@ -684,12 +720,13 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], tools: Optional[Iterable[AssistantToolParam]], event_handler: Optional[AssistantEventHandler], + litellm_params: Optional[dict] = None, ) -> AssistantStreamManager[AssistantEventHandler]: - data = { + data: Dict[str, Any] = { "thread_id": thread_id, "assistant_id": assistant_id, "additional_instructions": additional_instructions, @@ -711,7 +748,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -733,7 +770,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -756,7 +793,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -769,6 +806,7 @@ class AzureAssistantsAPI(BaseLLM): client=None, arun_thread=None, event_handler: Optional[AssistantEventHandler] = None, + litellm_params: Optional[dict] = None, ): if arun_thread is not None and arun_thread is True: if stream is not None and stream is True: @@ -780,6 +818,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) return self.async_run_thread_stream( client=azure_client, @@ -791,13 +830,14 @@ class AzureAssistantsAPI(BaseLLM): model=model, tools=tools, event_handler=event_handler, + litellm_params=litellm_params, ) return self.arun_thread( thread_id=thread_id, assistant_id=assistant_id, additional_instructions=additional_instructions, instructions=instructions, - metadata=metadata, + metadata=metadata, # type: ignore model=model, stream=stream, tools=tools, @@ -808,6 +848,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) openai_client = self.get_azure_client( api_key=api_key, @@ -817,6 +858,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) if stream is not None and stream is True: @@ -830,6 +872,7 @@ class AzureAssistantsAPI(BaseLLM): model=model, tools=tools, event_handler=event_handler, + litellm_params=litellm_params, ) response = openai_client.beta.threads.runs.create_and_poll( # type: ignore @@ -837,7 +880,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id=assistant_id, additional_instructions=additional_instructions, instructions=instructions, - metadata=metadata, + metadata=metadata, # type: ignore model=model, tools=tools, ) @@ -855,6 +898,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], create_assistant_data: dict, + litellm_params: Optional[dict] = None, ) -> Assistant: azure_openai_client = self.async_get_azure_client( api_key=api_key, @@ -864,6 +908,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = await azure_openai_client.beta.assistants.create( @@ -882,6 +927,7 @@ class AzureAssistantsAPI(BaseLLM): create_assistant_data: dict, client=None, async_create_assistants=None, + litellm_params: Optional[dict] = None, ): if async_create_assistants is not None and async_create_assistants is True: return self.async_create_assistants( @@ -893,6 +939,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries=max_retries, client=client, create_assistant_data=create_assistant_data, + litellm_params=litellm_params, ) azure_openai_client = self.get_azure_client( api_key=api_key, @@ -902,6 +949,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = azure_openai_client.beta.assistants.create(**create_assistant_data) @@ -918,6 +966,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], assistant_id: str, + litellm_params: Optional[dict] = None, ): azure_openai_client = self.async_get_azure_client( api_key=api_key, @@ -927,6 +976,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = await azure_openai_client.beta.assistants.delete( @@ -945,6 +995,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, async_delete_assistants: Optional[bool] = None, client=None, + litellm_params: Optional[dict] = None, ): if async_delete_assistants is not None and async_delete_assistants is True: return self.async_delete_assistant( @@ -956,6 +1007,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries=max_retries, client=client, assistant_id=assistant_id, + litellm_params=litellm_params, ) azure_openai_client = self.get_azure_client( api_key=api_key, @@ -965,6 +1017,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = azure_openai_client.beta.assistants.delete(assistant_id=assistant_id) diff --git a/litellm/llms/azure/audio_transcriptions.py b/litellm/llms/azure/audio_transcriptions.py index 94793295ca..be7d0fa30d 100644 --- a/litellm/llms/azure/audio_transcriptions.py +++ b/litellm/llms/azure/audio_transcriptions.py @@ -1,20 +1,20 @@ import uuid -from typing import Any, Optional +from typing import Any, Coroutine, Optional, Union from openai import AsyncAzureOpenAI, AzureOpenAI from pydantic import BaseModel -import litellm from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_name from litellm.types.utils import FileTypes -from litellm.utils import TranscriptionResponse, convert_to_model_response_object - -from .azure import ( - AzureChatCompletion, - get_azure_ad_token_from_oidc, - select_azure_base_url_or_endpoint, +from litellm.utils import ( + TranscriptionResponse, + convert_to_model_response_object, + extract_duration_from_srt_or_vtt, ) +from .azure import AzureChatCompletion +from .common_utils import AzureOpenAIError + class AzureAudioTranscription(AzureChatCompletion): def audio_transcriptions( @@ -32,32 +32,12 @@ class AzureAudioTranscription(AzureChatCompletion): client=None, azure_ad_token: Optional[str] = None, atranscription: bool = False, - ) -> TranscriptionResponse: + litellm_params: Optional[dict] = None, + ) -> Union[TranscriptionResponse, Coroutine[Any, Any, TranscriptionResponse]]: data = {"model": model, "file": audio_file, **optional_params} - # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "timeout": timeout, - } - - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - - if max_retries is not None: - azure_client_params["max_retries"] = max_retries - if atranscription is True: - return self.async_audio_transcriptions( # type: ignore + return self.async_audio_transcriptions( audio_file=audio_file, data=data, model_response=model_response, @@ -65,14 +45,26 @@ class AzureAudioTranscription(AzureChatCompletion): api_key=api_key, api_base=api_base, client=client, - azure_client_params=azure_client_params, max_retries=max_retries, logging_obj=logging_obj, + model=model, + litellm_params=litellm_params, + ) + + azure_client = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + _is_async=False, + client=client, + litellm_params=litellm_params, + ) + if not isinstance(azure_client, AzureOpenAI): + raise AzureOpenAIError( + status_code=500, + message="azure_client is not an instance of AzureOpenAI", ) - if client is None: - azure_client = AzureOpenAI(http_client=litellm.client_session, **azure_client_params) # type: ignore - else: - azure_client = client ## LOGGING logging_obj.pre_call( @@ -109,25 +101,34 @@ class AzureAudioTranscription(AzureChatCompletion): async def async_audio_transcriptions( self, audio_file: FileTypes, + model: str, data: dict, model_response: TranscriptionResponse, timeout: float, - azure_client_params: dict, logging_obj: Any, + api_version: Optional[str] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, client=None, max_retries=None, - ): + litellm_params: Optional[dict] = None, + ) -> TranscriptionResponse: response = None try: - if client is None: - async_azure_client = AsyncAzureOpenAI( - **azure_client_params, - http_client=litellm.aclient_session, + async_azure_client = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + _is_async=True, + client=client, + litellm_params=litellm_params, + ) + if not isinstance(async_azure_client, AsyncAzureOpenAI): + raise AzureOpenAIError( + status_code=500, + message="async_azure_client is not an instance of AsyncAzureOpenAI", ) - else: - async_azure_client = client ## LOGGING logging_obj.pre_call( @@ -156,6 +157,8 @@ class AzureAudioTranscription(AzureChatCompletion): stringified_response = response.model_dump() else: stringified_response = TranscriptionResponse(text=response).model_dump() + duration = extract_duration_from_srt_or_vtt(response) + stringified_response["duration"] = duration ## LOGGING logging_obj.post_call( @@ -178,7 +181,12 @@ class AzureAudioTranscription(AzureChatCompletion): model_response_object=model_response, hidden_params=hidden_params, response_type="audio_transcription", - ) # type: ignore + ) + if not isinstance(response, TranscriptionResponse): + raise AzureOpenAIError( + status_code=500, + message="response is not an instance of TranscriptionResponse", + ) return response except Exception as e: ## LOGGING diff --git a/litellm/llms/azure/azure.py b/litellm/llms/azure/azure.py index dcd5af7b96..03c5cc09eb 100644 --- a/litellm/llms/azure/azure.py +++ b/litellm/llms/azure/azure.py @@ -1,16 +1,15 @@ import asyncio import json -import os import time -from typing import Any, Callable, Dict, List, Literal, Optional, Union +from typing import Any, Callable, Coroutine, Dict, List, Optional, Union import httpx # type: ignore from openai import APITimeoutError, AsyncAzureOpenAI, AzureOpenAI import litellm -from litellm.caching.caching import DualCache from litellm.constants import DEFAULT_MAX_RETRIES from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj +from litellm.litellm_core_utils.logging_utils import track_llm_api_timing from litellm.llms.custom_httpx.http_handler import ( AsyncHTTPHandler, HTTPHandler, @@ -25,15 +24,18 @@ from litellm.types.utils import ( from litellm.utils import ( CustomStreamWrapper, convert_to_model_response_object, - get_secret, modify_url, ) from ...types.llms.openai import HttpxBinaryResponseContent from ..base import BaseLLM -from .common_utils import AzureOpenAIError, process_azure_headers - -azure_ad_cache = DualCache() +from .common_utils import ( + AzureOpenAIError, + BaseAzureLLM, + get_azure_ad_token_from_oidc, + process_azure_headers, + select_azure_base_url_or_endpoint, +) class AzureOpenAIAssistantsAPIConfig: @@ -98,93 +100,6 @@ class AzureOpenAIAssistantsAPIConfig: return optional_params -def select_azure_base_url_or_endpoint(azure_client_params: dict): - azure_endpoint = azure_client_params.get("azure_endpoint", None) - if azure_endpoint is not None: - # see : https://github.com/openai/openai-python/blob/3d61ed42aba652b547029095a7eb269ad4e1e957/src/openai/lib/azure.py#L192 - if "/openai/deployments" in azure_endpoint: - # this is base_url, not an azure_endpoint - azure_client_params["base_url"] = azure_endpoint - azure_client_params.pop("azure_endpoint") - - return azure_client_params - - -def get_azure_ad_token_from_oidc(azure_ad_token: str): - azure_client_id = os.getenv("AZURE_CLIENT_ID", None) - azure_tenant_id = os.getenv("AZURE_TENANT_ID", None) - azure_authority_host = os.getenv( - "AZURE_AUTHORITY_HOST", "https://login.microsoftonline.com" - ) - - if azure_client_id is None or azure_tenant_id is None: - raise AzureOpenAIError( - status_code=422, - message="AZURE_CLIENT_ID and AZURE_TENANT_ID must be set", - ) - - oidc_token = get_secret(azure_ad_token) - - if oidc_token is None: - raise AzureOpenAIError( - status_code=401, - message="OIDC token could not be retrieved from secret manager.", - ) - - azure_ad_token_cache_key = json.dumps( - { - "azure_client_id": azure_client_id, - "azure_tenant_id": azure_tenant_id, - "azure_authority_host": azure_authority_host, - "oidc_token": oidc_token, - } - ) - - azure_ad_token_access_token = azure_ad_cache.get_cache(azure_ad_token_cache_key) - if azure_ad_token_access_token is not None: - return azure_ad_token_access_token - - client = litellm.module_level_client - req_token = client.post( - f"{azure_authority_host}/{azure_tenant_id}/oauth2/v2.0/token", - data={ - "client_id": azure_client_id, - "grant_type": "client_credentials", - "scope": "https://cognitiveservices.azure.com/.default", - "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", - "client_assertion": oidc_token, - }, - ) - - if req_token.status_code != 200: - raise AzureOpenAIError( - status_code=req_token.status_code, - message=req_token.text, - ) - - azure_ad_token_json = req_token.json() - azure_ad_token_access_token = azure_ad_token_json.get("access_token", None) - azure_ad_token_expires_in = azure_ad_token_json.get("expires_in", None) - - if azure_ad_token_access_token is None: - raise AzureOpenAIError( - status_code=422, message="Azure AD Token access_token not returned" - ) - - if azure_ad_token_expires_in is None: - raise AzureOpenAIError( - status_code=422, message="Azure AD Token expires_in not returned" - ) - - azure_ad_cache.set_cache( - key=azure_ad_token_cache_key, - value=azure_ad_token_access_token, - ttl=azure_ad_token_expires_in, - ) - - return azure_ad_token_access_token - - def _check_dynamic_azure_params( azure_client_params: dict, azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]], @@ -206,7 +121,7 @@ def _check_dynamic_azure_params( return False -class AzureChatCompletion(BaseLLM): +class AzureChatCompletion(BaseAzureLLM, BaseLLM): def __init__(self) -> None: super().__init__() @@ -226,52 +141,6 @@ class AzureChatCompletion(BaseLLM): return headers - def _get_sync_azure_client( - self, - api_version: Optional[str], - api_base: Optional[str], - api_key: Optional[str], - azure_ad_token: Optional[str], - azure_ad_token_provider: Optional[Callable], - model: str, - max_retries: int, - timeout: Union[float, httpx.Timeout], - client: Optional[Any], - client_type: Literal["sync", "async"], - ): - # init AzureOpenAI Client - azure_client_params: Dict[str, Any] = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider - if client is None: - if client_type == "sync": - azure_client = AzureOpenAI(**azure_client_params) # type: ignore - elif client_type == "async": - azure_client = AsyncAzureOpenAI(**azure_client_params) # type: ignore - else: - azure_client = client - if api_version is not None and isinstance(azure_client._custom_query, dict): - # set api_version to version passed by user - azure_client._custom_query.setdefault("api-version", api_version) - - return azure_client - def make_sync_azure_openai_chat_completion_request( self, azure_client: AzureOpenAI, @@ -294,11 +163,13 @@ class AzureChatCompletion(BaseLLM): except Exception as e: raise e + @track_llm_api_timing() async def make_azure_openai_chat_completion_request( self, azure_client: AsyncAzureOpenAI, data: dict, timeout: Union[float, httpx.Timeout], + logging_obj: LiteLLMLoggingObj, ): """ Helper to: @@ -360,37 +231,18 @@ class AzureChatCompletion(BaseLLM): ### CHECK IF CLOUDFLARE AI GATEWAY ### ### if so - set the model as part of the base url if "gateway.ai.cloudflare.com" in api_base: - ## build base url - assume api base includes resource name - if client is None: - if not api_base.endswith("/"): - api_base += "/" - api_base += f"{model}" - - azure_client_params = { - "api_version": api_version, - "base_url": f"{api_base}", - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - } - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc( - azure_ad_token - ) - - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = ( - azure_ad_token_provider - ) - - if acompletion is True: - client = AsyncAzureOpenAI(**azure_client_params) - else: - client = AzureOpenAI(**azure_client_params) + client = self._init_azure_client_for_cloudflare_ai_gateway( + api_base=api_base, + model=model, + api_version=api_version, + max_retries=max_retries, + timeout=timeout, + api_key=api_key, + azure_ad_token=azure_ad_token, + azure_ad_token_provider=azure_ad_token_provider, + acompletion=acompletion, + client=client, + ) data = {"model": None, "messages": messages, **optional_params} else: @@ -417,6 +269,7 @@ class AzureChatCompletion(BaseLLM): timeout=timeout, client=client, max_retries=max_retries, + litellm_params=litellm_params, ) else: return self.acompletion( @@ -434,6 +287,7 @@ class AzureChatCompletion(BaseLLM): logging_obj=logging_obj, max_retries=max_retries, convert_tool_call_to_json_mode=json_mode, + litellm_params=litellm_params, ) elif "stream" in optional_params and optional_params["stream"] is True: return self.streaming( @@ -449,6 +303,7 @@ class AzureChatCompletion(BaseLLM): timeout=timeout, client=client, max_retries=max_retries, + litellm_params=litellm_params, ) else: ## LOGGING @@ -470,43 +325,15 @@ class AzureChatCompletion(BaseLLM): status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params + azure_client = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + client=client, + _is_async=False, + litellm_params=litellm_params, ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = ( - azure_ad_token_provider - ) - - if ( - client is None - or not isinstance(client, AzureOpenAI) - or dynamic_params - ): - azure_client = AzureOpenAI(**azure_client_params) - else: - azure_client = client - if api_version is not None and isinstance( - azure_client._custom_query, dict - ): - # set api_version to version passed by user - azure_client._custom_query.setdefault( - "api-version", api_version - ) if not isinstance(azure_client, AzureOpenAI): raise AzureOpenAIError( status_code=500, @@ -566,36 +393,22 @@ class AzureChatCompletion(BaseLLM): azure_ad_token_provider: Optional[Callable] = None, convert_tool_call_to_json_mode: Optional[bool] = None, client=None, # this is the AsyncAzureOpenAI + litellm_params: Optional[dict] = {}, ): response = None try: - # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.aclient_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider - # setting Azure client - if client is None or dynamic_params: - azure_client = AsyncAzureOpenAI(**azure_client_params) - else: - azure_client = client - + azure_client = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + client=client, + _is_async=True, + litellm_params=litellm_params, + ) + if not isinstance(azure_client, AsyncAzureOpenAI): + raise ValueError("Azure client is not an instance of AsyncAzureOpenAI") ## LOGGING logging_obj.pre_call( input=data["messages"], @@ -615,6 +428,7 @@ class AzureChatCompletion(BaseLLM): azure_client=azure_client, data=data, timeout=timeout, + logging_obj=logging_obj, ) logging_obj.model_call_details["response_headers"] = headers @@ -680,6 +494,7 @@ class AzureChatCompletion(BaseLLM): azure_ad_token: Optional[str] = None, azure_ad_token_provider: Optional[Callable] = None, client=None, + litellm_params: Optional[dict] = {}, ): # init AzureOpenAI Client azure_client_params = { @@ -702,10 +517,20 @@ class AzureChatCompletion(BaseLLM): elif azure_ad_token_provider is not None: azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider - if client is None or dynamic_params: - azure_client = AzureOpenAI(**azure_client_params) - else: - azure_client = client + azure_client = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + client=client, + _is_async=False, + litellm_params=litellm_params, + ) + if not isinstance(azure_client, AzureOpenAI): + raise AzureOpenAIError( + status_code=500, + message="azure_client is not an instance of AzureOpenAI", + ) ## LOGGING logging_obj.pre_call( input=data["messages"], @@ -747,32 +572,21 @@ class AzureChatCompletion(BaseLLM): azure_ad_token: Optional[str] = None, azure_ad_token_provider: Optional[Callable] = None, client=None, + litellm_params: Optional[dict] = {}, ): try: - # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.aclient_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params + azure_client = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + client=client, + _is_async=True, + litellm_params=litellm_params, ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider - if client is None or dynamic_params: - azure_client = AsyncAzureOpenAI(**azure_client_params) - else: - azure_client = client + if not isinstance(azure_client, AsyncAzureOpenAI): + raise ValueError("Azure client is not an instance of AsyncAzureOpenAI") + ## LOGGING logging_obj.pre_call( input=data["messages"], @@ -792,6 +606,7 @@ class AzureChatCompletion(BaseLLM): azure_client=azure_client, data=data, timeout=timeout, + logging_obj=logging_obj, ) logging_obj.model_call_details["response_headers"] = headers @@ -822,21 +637,36 @@ class AzureChatCompletion(BaseLLM): async def aembedding( self, + model: str, data: dict, model_response: EmbeddingResponse, - azure_client_params: dict, input: list, logging_obj: LiteLLMLoggingObj, + api_base: str, api_key: Optional[str] = None, + api_version: Optional[str] = None, client: Optional[AsyncAzureOpenAI] = None, - timeout=None, - ): + timeout: Optional[Union[float, httpx.Timeout]] = None, + max_retries: Optional[int] = None, + azure_ad_token: Optional[str] = None, + azure_ad_token_provider: Optional[Callable] = None, + litellm_params: Optional[dict] = {}, + ) -> EmbeddingResponse: response = None try: - if client is None: - openai_aclient = AsyncAzureOpenAI(**azure_client_params) - else: - openai_aclient = client + + openai_aclient = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + _is_async=True, + client=client, + litellm_params=litellm_params, + ) + if not isinstance(openai_aclient, AsyncAzureOpenAI): + raise ValueError("Azure client is not an instance of AsyncAzureOpenAI") + raw_response = await openai_aclient.embeddings.with_raw_response.create( **data, timeout=timeout ) @@ -850,13 +680,19 @@ class AzureChatCompletion(BaseLLM): additional_args={"complete_input_dict": data}, original_response=stringified_response, ) - return convert_to_model_response_object( + embedding_response = convert_to_model_response_object( response_object=stringified_response, model_response_object=model_response, hidden_params={"headers": headers}, _response_headers=process_azure_headers(headers), response_type="embedding", ) + if not isinstance(embedding_response, EmbeddingResponse): + raise AzureOpenAIError( + status_code=500, + message="embedding_response is not an instance of EmbeddingResponse", + ) + return embedding_response except Exception as e: ## LOGGING logging_obj.post_call( @@ -884,7 +720,8 @@ class AzureChatCompletion(BaseLLM): client=None, aembedding=None, headers: Optional[dict] = None, - ) -> EmbeddingResponse: + litellm_params: Optional[dict] = None, + ) -> Union[EmbeddingResponse, Coroutine[Any, Any, EmbeddingResponse]]: if headers: optional_params["extra_headers"] = headers if self._client_session is None: @@ -893,35 +730,6 @@ class AzureChatCompletion(BaseLLM): data = {"model": model, "input": input, **optional_params} if max_retries is None: max_retries = litellm.DEFAULT_MAX_RETRIES - if not isinstance(max_retries, int): - raise AzureOpenAIError( - status_code=422, message="max retries must be an int" - ) - - # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if aembedding: - azure_client_params["http_client"] = litellm.aclient_session - else: - azure_client_params["http_client"] = litellm.client_session - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider - ## LOGGING logging_obj.pre_call( input=input, @@ -933,20 +741,33 @@ class AzureChatCompletion(BaseLLM): ) if aembedding is True: - return self.aembedding( # type: ignore + return self.aembedding( data=data, input=input, + model=model, logging_obj=logging_obj, api_key=api_key, model_response=model_response, - azure_client_params=azure_client_params, timeout=timeout, client=client, + litellm_params=litellm_params, + api_base=api_base, ) - if client is None: - azure_client = AzureOpenAI(**azure_client_params) # type: ignore - else: - azure_client = client + azure_client = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + _is_async=False, + client=client, + litellm_params=litellm_params, + ) + if not isinstance(azure_client, AzureOpenAI): + raise AzureOpenAIError( + status_code=500, + message="azure_client is not an instance of AzureOpenAI", + ) + ## COMPLETION CALL raw_response = azure_client.embeddings.with_raw_response.create(**data, timeout=timeout) # type: ignore headers = dict(raw_response.headers) @@ -1281,6 +1102,7 @@ class AzureChatCompletion(BaseLLM): azure_ad_token_provider: Optional[Callable] = None, client=None, aimg_generation=None, + litellm_params: Optional[dict] = None, ) -> ImageResponse: try: if model and len(model) > 0: @@ -1305,25 +1127,14 @@ class AzureChatCompletion(BaseLLM): ) # init AzureOpenAI Client - azure_client_params: Dict[str, Any] = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params + azure_client_params: Dict[str, Any] = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + model_name=model or "", + api_version=api_version, + api_base=api_base, + is_async=False, ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider - if aimg_generation is True: return self.aimage_generation(data=data, input=input, logging_obj=logging_obj, model_response=model_response, api_key=api_key, client=client, azure_client_params=azure_client_params, timeout=timeout, headers=headers) # type: ignore @@ -1386,6 +1197,7 @@ class AzureChatCompletion(BaseLLM): azure_ad_token_provider: Optional[Callable] = None, aspeech: Optional[bool] = None, client=None, + litellm_params: Optional[dict] = None, ) -> HttpxBinaryResponseContent: max_retries = optional_params.pop("max_retries", 2) @@ -1404,19 +1216,17 @@ class AzureChatCompletion(BaseLLM): max_retries=max_retries, timeout=timeout, client=client, + litellm_params=litellm_params, ) # type: ignore - azure_client: AzureOpenAI = self._get_sync_azure_client( + azure_client: AzureOpenAI = self.get_azure_openai_client( api_base=api_base, api_version=api_version, api_key=api_key, - azure_ad_token=azure_ad_token, - azure_ad_token_provider=azure_ad_token_provider, model=model, - max_retries=max_retries, - timeout=timeout, + _is_async=False, client=client, - client_type="sync", + litellm_params=litellm_params, ) # type: ignore response = azure_client.audio.speech.create( @@ -1441,19 +1251,17 @@ class AzureChatCompletion(BaseLLM): max_retries: int, timeout: Union[float, httpx.Timeout], client=None, + litellm_params: Optional[dict] = None, ) -> HttpxBinaryResponseContent: - azure_client: AsyncAzureOpenAI = self._get_sync_azure_client( + azure_client: AsyncAzureOpenAI = self.get_azure_openai_client( api_base=api_base, api_version=api_version, api_key=api_key, - azure_ad_token=azure_ad_token, - azure_ad_token_provider=azure_ad_token_provider, model=model, - max_retries=max_retries, - timeout=timeout, + _is_async=True, client=client, - client_type="async", + litellm_params=litellm_params, ) # type: ignore azure_response = await azure_client.audio.speech.create( diff --git a/litellm/llms/azure/batches/handler.py b/litellm/llms/azure/batches/handler.py index d36ae648ab..1b93c526d5 100644 --- a/litellm/llms/azure/batches/handler.py +++ b/litellm/llms/azure/batches/handler.py @@ -6,7 +6,6 @@ from typing import Any, Coroutine, Optional, Union, cast import httpx -import litellm from litellm.llms.azure.azure import AsyncAzureOpenAI, AzureOpenAI from litellm.types.llms.openai import ( Batch, @@ -16,8 +15,10 @@ from litellm.types.llms.openai import ( ) from litellm.types.utils import LiteLLMBatch +from ..common_utils import BaseAzureLLM -class AzureBatchesAPI: + +class AzureBatchesAPI(BaseAzureLLM): """ Azure methods to support for batches - create_batch() @@ -29,38 +30,6 @@ class AzureBatchesAPI: def __init__(self) -> None: super().__init__() - def get_azure_openai_client( - self, - api_key: Optional[str], - api_base: Optional[str], - timeout: Union[float, httpx.Timeout], - max_retries: Optional[int], - api_version: Optional[str] = None, - client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, - _is_async: bool = False, - ) -> Optional[Union[AzureOpenAI, AsyncAzureOpenAI]]: - received_args = locals() - openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None - if client is None: - data = {} - for k, v in received_args.items(): - if k == "self" or k == "client" or k == "_is_async": - pass - elif k == "api_base" and v is not None: - data["azure_endpoint"] = v - elif v is not None: - data[k] = v - if "api_version" not in data: - data["api_version"] = litellm.AZURE_DEFAULT_API_VERSION - if _is_async is True: - openai_client = AsyncAzureOpenAI(**data) - else: - openai_client = AzureOpenAI(**data) # type: ignore - else: - openai_client = client - - return openai_client - async def acreate_batch( self, create_batch_data: CreateBatchRequest, @@ -79,16 +48,16 @@ class AzureBatchesAPI: timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ) -> Union[LiteLLMBatch, Coroutine[Any, Any, LiteLLMBatch]]: azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( self.get_azure_openai_client( api_key=api_key, api_base=api_base, - timeout=timeout, api_version=api_version, - max_retries=max_retries, client=client, _is_async=_is_async, + litellm_params=litellm_params or {}, ) ) if azure_client is None: @@ -125,16 +94,16 @@ class AzureBatchesAPI: timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AzureOpenAI] = None, + litellm_params: Optional[dict] = None, ): azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( self.get_azure_openai_client( api_key=api_key, api_base=api_base, api_version=api_version, - timeout=timeout, - max_retries=max_retries, client=client, _is_async=_is_async, + litellm_params=litellm_params or {}, ) ) if azure_client is None: @@ -173,16 +142,16 @@ class AzureBatchesAPI: timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AzureOpenAI] = None, + litellm_params: Optional[dict] = None, ): azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( self.get_azure_openai_client( api_key=api_key, api_base=api_base, api_version=api_version, - timeout=timeout, - max_retries=max_retries, client=client, _is_async=_is_async, + litellm_params=litellm_params or {}, ) ) if azure_client is None: @@ -212,16 +181,16 @@ class AzureBatchesAPI: after: Optional[str] = None, limit: Optional[int] = None, client: Optional[AzureOpenAI] = None, + litellm_params: Optional[dict] = None, ): azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( self.get_azure_openai_client( api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, api_version=api_version, client=client, _is_async=_is_async, + litellm_params=litellm_params or {}, ) ) if azure_client is None: diff --git a/litellm/llms/azure/chat/gpt_transformation.py b/litellm/llms/azure/chat/gpt_transformation.py index 7aa4fffab5..ee85517e66 100644 --- a/litellm/llms/azure/chat/gpt_transformation.py +++ b/litellm/llms/azure/chat/gpt_transformation.py @@ -99,6 +99,8 @@ class AzureOpenAIConfig(BaseConfig): "extra_headers", "parallel_tool_calls", "prediction", + "modalities", + "audio", ] def _is_response_format_supported_model(self, model: str) -> bool: diff --git a/litellm/llms/azure/chat/o_series_handler.py b/litellm/llms/azure/chat/o_series_handler.py index a2042b3e2a..2f3e9e6399 100644 --- a/litellm/llms/azure/chat/o_series_handler.py +++ b/litellm/llms/azure/chat/o_series_handler.py @@ -4,50 +4,69 @@ Handler file for calls to Azure OpenAI's o1/o3 family of models Written separately to handle faking streaming for o1 and o3 models. """ -from typing import Optional, Union +from typing import Any, Callable, Optional, Union import httpx -from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI + +from litellm.types.utils import ModelResponse from ...openai.openai import OpenAIChatCompletion -from ..common_utils import get_azure_openai_client +from ..common_utils import BaseAzureLLM -class AzureOpenAIO1ChatCompletion(OpenAIChatCompletion): - def _get_openai_client( +class AzureOpenAIO1ChatCompletion(BaseAzureLLM, OpenAIChatCompletion): + def completion( self, - is_async: bool, + model_response: ModelResponse, + timeout: Union[float, httpx.Timeout], + optional_params: dict, + litellm_params: dict, + logging_obj: Any, + model: Optional[str] = None, + messages: Optional[list] = None, + print_verbose: Optional[Callable] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, - timeout: Union[float, httpx.Timeout] = httpx.Timeout(None), - max_retries: Optional[int] = 2, + dynamic_params: Optional[bool] = None, + azure_ad_token: Optional[str] = None, + acompletion: bool = False, + logger_fn=None, + headers: Optional[dict] = None, + custom_prompt_dict: dict = {}, + client=None, organization: Optional[str] = None, - client: Optional[ - Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI] - ] = None, - ) -> Optional[ - Union[ - OpenAI, - AsyncOpenAI, - AzureOpenAI, - AsyncAzureOpenAI, - ] - ]: - - # Override to use Azure-specific client initialization - if not isinstance(client, AzureOpenAI) and not isinstance( - client, AsyncAzureOpenAI - ): - client = None - - return get_azure_openai_client( + custom_llm_provider: Optional[str] = None, + drop_params: Optional[bool] = None, + ): + client = self.get_azure_openai_client( + litellm_params=litellm_params, api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, - organization=organization, api_version=api_version, client=client, - _is_async=is_async, + _is_async=acompletion, + ) + return super().completion( + model_response=model_response, + timeout=timeout, + optional_params=optional_params, + litellm_params=litellm_params, + logging_obj=logging_obj, + model=model, + messages=messages, + print_verbose=print_verbose, + api_key=api_key, + api_base=api_base, + api_version=api_version, + dynamic_params=dynamic_params, + azure_ad_token=azure_ad_token, + acompletion=acompletion, + logger_fn=logger_fn, + headers=headers, + custom_prompt_dict=custom_prompt_dict, + client=client, + organization=organization, + custom_llm_provider=custom_llm_provider, + drop_params=drop_params, ) diff --git a/litellm/llms/azure/common_utils.py b/litellm/llms/azure/common_utils.py index 43f3480ed6..71092c8b99 100644 --- a/litellm/llms/azure/common_utils.py +++ b/litellm/llms/azure/common_utils.py @@ -1,13 +1,22 @@ -from typing import Callable, Optional, Union +import json +import os +from typing import Any, Callable, Dict, Optional, Union import httpx from openai import AsyncAzureOpenAI, AzureOpenAI import litellm from litellm._logging import verbose_logger +from litellm.caching.caching import DualCache from litellm.llms.base_llm.chat.transformation import BaseLLMException +from litellm.llms.openai.common_utils import BaseOpenAILLM +from litellm.secret_managers.get_azure_ad_token_provider import ( + get_azure_ad_token_provider, +) from litellm.secret_managers.main import get_secret_str +azure_ad_cache = DualCache() + class AzureOpenAIError(BaseLLMException): def __init__( @@ -29,39 +38,6 @@ class AzureOpenAIError(BaseLLMException): ) -def get_azure_openai_client( - api_key: Optional[str], - api_base: Optional[str], - timeout: Union[float, httpx.Timeout], - max_retries: Optional[int], - api_version: Optional[str] = None, - organization: Optional[str] = None, - client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, - _is_async: bool = False, -) -> Optional[Union[AzureOpenAI, AsyncAzureOpenAI]]: - received_args = locals() - openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None - if client is None: - data = {} - for k, v in received_args.items(): - if k == "self" or k == "client" or k == "_is_async": - pass - elif k == "api_base" and v is not None: - data["azure_endpoint"] = v - elif v is not None: - data[k] = v - if "api_version" not in data: - data["api_version"] = litellm.AZURE_DEFAULT_API_VERSION - if _is_async is True: - openai_client = AsyncAzureOpenAI(**data) - else: - openai_client = AzureOpenAI(**data) # type: ignore - else: - openai_client = client - - return openai_client - - def process_azure_headers(headers: Union[httpx.Headers, dict]) -> dict: openai_headers = {} if "x-ratelimit-limit-requests" in headers: @@ -180,3 +156,271 @@ def get_azure_ad_token_from_username_password( verbose_logger.debug("token_provider %s", token_provider) return token_provider + + +def get_azure_ad_token_from_oidc(azure_ad_token: str): + azure_client_id = os.getenv("AZURE_CLIENT_ID", None) + azure_tenant_id = os.getenv("AZURE_TENANT_ID", None) + azure_authority_host = os.getenv( + "AZURE_AUTHORITY_HOST", "https://login.microsoftonline.com" + ) + + if azure_client_id is None or azure_tenant_id is None: + raise AzureOpenAIError( + status_code=422, + message="AZURE_CLIENT_ID and AZURE_TENANT_ID must be set", + ) + + oidc_token = get_secret_str(azure_ad_token) + + if oidc_token is None: + raise AzureOpenAIError( + status_code=401, + message="OIDC token could not be retrieved from secret manager.", + ) + + azure_ad_token_cache_key = json.dumps( + { + "azure_client_id": azure_client_id, + "azure_tenant_id": azure_tenant_id, + "azure_authority_host": azure_authority_host, + "oidc_token": oidc_token, + } + ) + + azure_ad_token_access_token = azure_ad_cache.get_cache(azure_ad_token_cache_key) + if azure_ad_token_access_token is not None: + return azure_ad_token_access_token + + client = litellm.module_level_client + req_token = client.post( + f"{azure_authority_host}/{azure_tenant_id}/oauth2/v2.0/token", + data={ + "client_id": azure_client_id, + "grant_type": "client_credentials", + "scope": "https://cognitiveservices.azure.com/.default", + "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", + "client_assertion": oidc_token, + }, + ) + + if req_token.status_code != 200: + raise AzureOpenAIError( + status_code=req_token.status_code, + message=req_token.text, + ) + + azure_ad_token_json = req_token.json() + azure_ad_token_access_token = azure_ad_token_json.get("access_token", None) + azure_ad_token_expires_in = azure_ad_token_json.get("expires_in", None) + + if azure_ad_token_access_token is None: + raise AzureOpenAIError( + status_code=422, message="Azure AD Token access_token not returned" + ) + + if azure_ad_token_expires_in is None: + raise AzureOpenAIError( + status_code=422, message="Azure AD Token expires_in not returned" + ) + + azure_ad_cache.set_cache( + key=azure_ad_token_cache_key, + value=azure_ad_token_access_token, + ttl=azure_ad_token_expires_in, + ) + + return azure_ad_token_access_token + + +def select_azure_base_url_or_endpoint(azure_client_params: dict): + azure_endpoint = azure_client_params.get("azure_endpoint", None) + if azure_endpoint is not None: + # see : https://github.com/openai/openai-python/blob/3d61ed42aba652b547029095a7eb269ad4e1e957/src/openai/lib/azure.py#L192 + if "/openai/deployments" in azure_endpoint: + # this is base_url, not an azure_endpoint + azure_client_params["base_url"] = azure_endpoint + azure_client_params.pop("azure_endpoint") + + return azure_client_params + + +class BaseAzureLLM(BaseOpenAILLM): + def get_azure_openai_client( + self, + api_key: Optional[str], + api_base: Optional[str], + api_version: Optional[str] = None, + client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, + _is_async: bool = False, + model: Optional[str] = None, + ) -> Optional[Union[AzureOpenAI, AsyncAzureOpenAI]]: + openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None + client_initialization_params: dict = locals() + if client is None: + cached_client = self.get_cached_openai_client( + client_initialization_params=client_initialization_params, + client_type="azure", + ) + if cached_client: + if isinstance(cached_client, AzureOpenAI) or isinstance( + cached_client, AsyncAzureOpenAI + ): + return cached_client + + azure_client_params = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + api_base=api_base, + model_name=model, + api_version=api_version, + is_async=_is_async, + ) + if _is_async is True: + openai_client = AsyncAzureOpenAI(**azure_client_params) + else: + openai_client = AzureOpenAI(**azure_client_params) # type: ignore + else: + openai_client = client + if api_version is not None and isinstance( + openai_client._custom_query, dict + ): + # set api_version to version passed by user + openai_client._custom_query.setdefault("api-version", api_version) + + # save client in-memory cache + self.set_cached_openai_client( + openai_client=openai_client, + client_initialization_params=client_initialization_params, + client_type="azure", + ) + return openai_client + + def initialize_azure_sdk_client( + self, + litellm_params: dict, + api_key: Optional[str], + api_base: Optional[str], + model_name: Optional[str], + api_version: Optional[str], + is_async: bool, + ) -> dict: + + azure_ad_token_provider: Optional[Callable[[], str]] = None + # If we have api_key, then we have higher priority + azure_ad_token = litellm_params.get("azure_ad_token") + tenant_id = litellm_params.get("tenant_id") + client_id = litellm_params.get("client_id") + client_secret = litellm_params.get("client_secret") + azure_username = litellm_params.get("azure_username") + azure_password = litellm_params.get("azure_password") + max_retries = litellm_params.get("max_retries") + timeout = litellm_params.get("timeout") + if not api_key and tenant_id and client_id and client_secret: + verbose_logger.debug("Using Azure AD Token Provider for Azure Auth") + azure_ad_token_provider = get_azure_ad_token_from_entrata_id( + tenant_id=tenant_id, + client_id=client_id, + client_secret=client_secret, + ) + if azure_username and azure_password and client_id: + azure_ad_token_provider = get_azure_ad_token_from_username_password( + azure_username=azure_username, + azure_password=azure_password, + client_id=client_id, + ) + + if azure_ad_token is not None and azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) + elif ( + not api_key + and azure_ad_token_provider is None + and litellm.enable_azure_ad_token_refresh is True + ): + try: + azure_ad_token_provider = get_azure_ad_token_provider() + except ValueError: + verbose_logger.debug("Azure AD Token Provider could not be used.") + if api_version is None: + api_version = os.getenv( + "AZURE_API_VERSION", litellm.AZURE_DEFAULT_API_VERSION + ) + + _api_key = api_key + if _api_key is not None and isinstance(_api_key, str): + # only show first 5 chars of api_key + _api_key = _api_key[:8] + "*" * 15 + verbose_logger.debug( + f"Initializing Azure OpenAI Client for {model_name}, Api Base: {str(api_base)}, Api Key:{_api_key}" + ) + azure_client_params = { + "api_key": api_key, + "azure_endpoint": api_base, + "api_version": api_version, + "azure_ad_token": azure_ad_token, + "azure_ad_token_provider": azure_ad_token_provider, + } + # init http client + SSL Verification settings + if is_async is True: + azure_client_params["http_client"] = self._get_async_http_client() + else: + azure_client_params["http_client"] = self._get_sync_http_client() + + if max_retries is not None: + azure_client_params["max_retries"] = max_retries + if timeout is not None: + azure_client_params["timeout"] = timeout + + if azure_ad_token_provider is not None: + azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider + # this decides if we should set azure_endpoint or base_url on Azure OpenAI Client + # required to support GPT-4 vision enhancements, since base_url needs to be set on Azure OpenAI Client + + azure_client_params = select_azure_base_url_or_endpoint( + azure_client_params=azure_client_params + ) + + return azure_client_params + + def _init_azure_client_for_cloudflare_ai_gateway( + self, + api_base: str, + model: str, + api_version: str, + max_retries: int, + timeout: Union[float, httpx.Timeout], + api_key: Optional[str], + azure_ad_token: Optional[str], + azure_ad_token_provider: Optional[Callable[[], str]], + acompletion: bool, + client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + ) -> Union[AzureOpenAI, AsyncAzureOpenAI]: + ## build base url - assume api base includes resource name + if client is None: + if not api_base.endswith("/"): + api_base += "/" + api_base += f"{model}" + + azure_client_params: Dict[str, Any] = { + "api_version": api_version, + "base_url": f"{api_base}", + "http_client": litellm.client_session, + "max_retries": max_retries, + "timeout": timeout, + } + if api_key is not None: + azure_client_params["api_key"] = api_key + elif azure_ad_token is not None: + if azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) + + azure_client_params["azure_ad_token"] = azure_ad_token + if azure_ad_token_provider is not None: + azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider + + if acompletion is True: + client = AsyncAzureOpenAI(**azure_client_params) # type: ignore + else: + client = AzureOpenAI(**azure_client_params) # type: ignore + return client diff --git a/litellm/llms/azure/completion/handler.py b/litellm/llms/azure/completion/handler.py index fafa5665bb..8301c4d617 100644 --- a/litellm/llms/azure/completion/handler.py +++ b/litellm/llms/azure/completion/handler.py @@ -2,30 +2,16 @@ from typing import Any, Callable, Optional from openai import AsyncAzureOpenAI, AzureOpenAI -import litellm from litellm.litellm_core_utils.prompt_templates.factory import prompt_factory from litellm.utils import CustomStreamWrapper, ModelResponse, TextCompletionResponse -from ...base import BaseLLM from ...openai.completion.transformation import OpenAITextCompletionConfig -from ..common_utils import AzureOpenAIError +from ..common_utils import AzureOpenAIError, BaseAzureLLM openai_text_completion_config = OpenAITextCompletionConfig() -def select_azure_base_url_or_endpoint(azure_client_params: dict): - azure_endpoint = azure_client_params.get("azure_endpoint", None) - if azure_endpoint is not None: - # see : https://github.com/openai/openai-python/blob/3d61ed42aba652b547029095a7eb269ad4e1e957/src/openai/lib/azure.py#L192 - if "/openai/deployments" in azure_endpoint: - # this is base_url, not an azure_endpoint - azure_client_params["base_url"] = azure_endpoint - azure_client_params.pop("azure_endpoint") - - return azure_client_params - - -class AzureTextCompletion(BaseLLM): +class AzureTextCompletion(BaseAzureLLM): def __init__(self) -> None: super().__init__() @@ -60,7 +46,6 @@ class AzureTextCompletion(BaseLLM): headers: Optional[dict] = None, client=None, ): - super().completion() try: if model is None or messages is None: raise AzureOpenAIError( @@ -76,27 +61,18 @@ class AzureTextCompletion(BaseLLM): ### if so - set the model as part of the base url if "gateway.ai.cloudflare.com" in api_base: ## build base url - assume api base includes resource name - if client is None: - if not api_base.endswith("/"): - api_base += "/" - api_base += f"{model}" - - azure_client_params = { - "api_version": api_version, - "base_url": f"{api_base}", - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - } - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - azure_client_params["azure_ad_token"] = azure_ad_token - - if acompletion is True: - client = AsyncAzureOpenAI(**azure_client_params) - else: - client = AzureOpenAI(**azure_client_params) + client = self._init_azure_client_for_cloudflare_ai_gateway( + api_key=api_key, + api_version=api_version, + api_base=api_base, + model=model, + client=client, + max_retries=max_retries, + timeout=timeout, + azure_ad_token=azure_ad_token, + azure_ad_token_provider=azure_ad_token_provider, + acompletion=acompletion, + ) data = {"model": None, "prompt": prompt, **optional_params} else: @@ -118,6 +94,7 @@ class AzureTextCompletion(BaseLLM): azure_ad_token=azure_ad_token, timeout=timeout, client=client, + litellm_params=litellm_params, ) else: return self.acompletion( @@ -132,6 +109,7 @@ class AzureTextCompletion(BaseLLM): client=client, logging_obj=logging_obj, max_retries=max_retries, + litellm_params=litellm_params, ) elif "stream" in optional_params and optional_params["stream"] is True: return self.streaming( @@ -165,33 +143,21 @@ class AzureTextCompletion(BaseLLM): status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - "azure_ad_token_provider": azure_ad_token_provider, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params + azure_client = self.get_azure_openai_client( + api_key=api_key, + api_base=api_base, + api_version=api_version, + client=client, + litellm_params=litellm_params, + _is_async=False, + model=model, ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - azure_client_params["azure_ad_token"] = azure_ad_token - if client is None: - azure_client = AzureOpenAI(**azure_client_params) - else: - azure_client = client - if api_version is not None and isinstance( - azure_client._custom_query, dict - ): - # set api_version to version passed by user - azure_client._custom_query.setdefault( - "api-version", api_version - ) + + if not isinstance(azure_client, AzureOpenAI): + raise AzureOpenAIError( + status_code=500, + message="azure_client is not an instance of AzureOpenAI", + ) raw_response = azure_client.completions.with_raw_response.create( **data, timeout=timeout @@ -240,36 +206,27 @@ class AzureTextCompletion(BaseLLM): max_retries: int, azure_ad_token: Optional[str] = None, client=None, # this is the AsyncAzureOpenAI + litellm_params: dict = {}, ): response = None try: # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - azure_client_params["azure_ad_token"] = azure_ad_token - # setting Azure client - if client is None: - azure_client = AsyncAzureOpenAI(**azure_client_params) - else: - azure_client = client - if api_version is not None and isinstance( - azure_client._custom_query, dict - ): - # set api_version to version passed by user - azure_client._custom_query.setdefault("api-version", api_version) + azure_client = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + _is_async=True, + client=client, + litellm_params=litellm_params, + ) + if not isinstance(azure_client, AsyncAzureOpenAI): + raise AzureOpenAIError( + status_code=500, + message="azure_client is not an instance of AsyncAzureOpenAI", + ) + ## LOGGING logging_obj.pre_call( input=data["prompt"], @@ -312,6 +269,7 @@ class AzureTextCompletion(BaseLLM): timeout: Any, azure_ad_token: Optional[str] = None, client=None, + litellm_params: dict = {}, ): max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): @@ -319,28 +277,21 @@ class AzureTextCompletion(BaseLLM): status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params + azure_client = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + _is_async=False, + client=client, + litellm_params=litellm_params, ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - azure_client_params["azure_ad_token"] = azure_ad_token - if client is None: - azure_client = AzureOpenAI(**azure_client_params) - else: - azure_client = client - if api_version is not None and isinstance(azure_client._custom_query, dict): - # set api_version to version passed by user - azure_client._custom_query.setdefault("api-version", api_version) + if not isinstance(azure_client, AzureOpenAI): + raise AzureOpenAIError( + status_code=500, + message="azure_client is not an instance of AzureOpenAI", + ) + ## LOGGING logging_obj.pre_call( input=data["prompt"], @@ -375,33 +326,24 @@ class AzureTextCompletion(BaseLLM): timeout: Any, azure_ad_token: Optional[str] = None, client=None, + litellm_params: dict = {}, ): try: # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": data.pop("max_retries", 2), - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params + azure_client = self.get_azure_openai_client( + api_version=api_version, + api_base=api_base, + api_key=api_key, + model=model, + _is_async=True, + client=client, + litellm_params=litellm_params, ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - azure_client_params["azure_ad_token"] = azure_ad_token - if client is None: - azure_client = AsyncAzureOpenAI(**azure_client_params) - else: - azure_client = client - if api_version is not None and isinstance( - azure_client._custom_query, dict - ): - # set api_version to version passed by user - azure_client._custom_query.setdefault("api-version", api_version) + if not isinstance(azure_client, AsyncAzureOpenAI): + raise AzureOpenAIError( + status_code=500, + message="azure_client is not an instance of AsyncAzureOpenAI", + ) ## LOGGING logging_obj.pre_call( input=data["prompt"], diff --git a/litellm/llms/azure/files/handler.py b/litellm/llms/azure/files/handler.py index f442af855e..d45ac9a315 100644 --- a/litellm/llms/azure/files/handler.py +++ b/litellm/llms/azure/files/handler.py @@ -5,13 +5,12 @@ from openai import AsyncAzureOpenAI, AzureOpenAI from openai.types.file_deleted import FileDeleted from litellm._logging import verbose_logger -from litellm.llms.base import BaseLLM from litellm.types.llms.openai import * -from ..common_utils import get_azure_openai_client +from ..common_utils import BaseAzureLLM -class AzureOpenAIFilesAPI(BaseLLM): +class AzureOpenAIFilesAPI(BaseAzureLLM): """ AzureOpenAI methods to support for batches - create_file() @@ -45,14 +44,15 @@ class AzureOpenAIFilesAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ) -> Union[FileObject, Coroutine[Any, Any, FileObject]]: + openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( - get_azure_openai_client( + self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, api_version=api_version, - timeout=timeout, - max_retries=max_retries, client=client, _is_async=_is_async, ) @@ -91,17 +91,16 @@ class AzureOpenAIFilesAPI(BaseLLM): max_retries: Optional[int], api_version: Optional[str] = None, client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ) -> Union[ HttpxBinaryResponseContent, Coroutine[Any, Any, HttpxBinaryResponseContent] ]: openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( - get_azure_openai_client( + self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, - timeout=timeout, api_version=api_version, - max_retries=max_retries, - organization=None, client=client, _is_async=_is_async, ) @@ -144,14 +143,13 @@ class AzureOpenAIFilesAPI(BaseLLM): max_retries: Optional[int], api_version: Optional[str] = None, client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ): openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( - get_azure_openai_client( + self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, - organization=None, api_version=api_version, client=client, _is_async=_is_async, @@ -197,14 +195,13 @@ class AzureOpenAIFilesAPI(BaseLLM): organization: Optional[str] = None, api_version: Optional[str] = None, client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ): openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( - get_azure_openai_client( + self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, - organization=organization, api_version=api_version, client=client, _is_async=_is_async, @@ -252,14 +249,13 @@ class AzureOpenAIFilesAPI(BaseLLM): purpose: Optional[str] = None, api_version: Optional[str] = None, client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ): openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( - get_azure_openai_client( + self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, - organization=None, # openai param api_version=api_version, client=client, _is_async=_is_async, diff --git a/litellm/llms/azure/fine_tuning/handler.py b/litellm/llms/azure/fine_tuning/handler.py index c34b181eff..3d7cc336fb 100644 --- a/litellm/llms/azure/fine_tuning/handler.py +++ b/litellm/llms/azure/fine_tuning/handler.py @@ -3,11 +3,11 @@ from typing import Optional, Union import httpx from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI -from litellm.llms.azure.files.handler import get_azure_openai_client +from litellm.llms.azure.common_utils import BaseAzureLLM from litellm.llms.openai.fine_tuning.handler import OpenAIFineTuningAPI -class AzureOpenAIFineTuningAPI(OpenAIFineTuningAPI): +class AzureOpenAIFineTuningAPI(OpenAIFineTuningAPI, BaseAzureLLM): """ AzureOpenAI methods to support fine tuning, inherits from OpenAIFineTuningAPI. """ @@ -24,6 +24,7 @@ class AzureOpenAIFineTuningAPI(OpenAIFineTuningAPI): ] = None, _is_async: bool = False, api_version: Optional[str] = None, + litellm_params: Optional[dict] = None, ) -> Optional[ Union[ OpenAI, @@ -36,12 +37,10 @@ class AzureOpenAIFineTuningAPI(OpenAIFineTuningAPI): if isinstance(client, OpenAI) or isinstance(client, AsyncOpenAI): client = None - return get_azure_openai_client( + return self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, - organization=organization, api_version=api_version, client=client, _is_async=_is_async, diff --git a/litellm/llms/azure_ai/chat/transformation.py b/litellm/llms/azure_ai/chat/transformation.py index 46a1a6bf9c..154f345537 100644 --- a/litellm/llms/azure_ai/chat/transformation.py +++ b/litellm/llms/azure_ai/chat/transformation.py @@ -16,10 +16,23 @@ from litellm.llms.openai.openai import OpenAIConfig from litellm.secret_managers.main import get_secret_str from litellm.types.llms.openai import AllMessageValues from litellm.types.utils import ModelResponse, ProviderField -from litellm.utils import _add_path_to_api_base +from litellm.utils import _add_path_to_api_base, supports_tool_choice class AzureAIStudioConfig(OpenAIConfig): + def get_supported_openai_params(self, model: str) -> List: + model_supports_tool_choice = True # azure ai supports this by default + if not supports_tool_choice(model=f"azure_ai/{model}"): + model_supports_tool_choice = False + supported_params = super().get_supported_openai_params(model) + if not model_supports_tool_choice: + filtered_supported_params = [] + for param in supported_params: + if param != "tool_choice": + filtered_supported_params.append(param) + return filtered_supported_params + return supported_params + def validate_environment( self, headers: dict, @@ -54,6 +67,7 @@ class AzureAIStudioConfig(OpenAIConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ @@ -79,12 +93,14 @@ class AzureAIStudioConfig(OpenAIConfig): original_url = httpx.URL(api_base) # Extract api_version or use default - api_version = cast(Optional[str], optional_params.get("api_version")) + api_version = cast(Optional[str], litellm_params.get("api_version")) - # Check if 'api-version' is already present - if "api-version" not in original_url.params and api_version: - # Add api_version to optional_params - original_url.params["api-version"] = api_version + # Create a new dictionary with existing params + query_params = dict(original_url.params) + + # Add api_version if needed + if "api-version" not in query_params and api_version: + query_params["api-version"] = api_version # Add the path to the base URL if "services.ai.azure.com" in api_base: @@ -96,8 +112,7 @@ class AzureAIStudioConfig(OpenAIConfig): api_base=api_base, ending_path="/chat/completions" ) - # Convert optional_params to query parameters - query_params = original_url.params + # Use the new query_params dictionary final_url = httpx.URL(new_url).copy_with(params=query_params) return str(final_url) diff --git a/litellm/llms/base_llm/audio_transcription/transformation.py b/litellm/llms/base_llm/audio_transcription/transformation.py index 66140455d9..e550c574e2 100644 --- a/litellm/llms/base_llm/audio_transcription/transformation.py +++ b/litellm/llms/base_llm/audio_transcription/transformation.py @@ -30,6 +30,7 @@ class BaseAudioTranscriptionConfig(BaseConfig, ABC): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/base_llm/chat/transformation.py b/litellm/llms/base_llm/chat/transformation.py index 8327a10464..1b5a6bc58e 100644 --- a/litellm/llms/base_llm/chat/transformation.py +++ b/litellm/llms/base_llm/chat/transformation.py @@ -270,6 +270,7 @@ class BaseConfig(ABC): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/base_llm/completion/transformation.py b/litellm/llms/base_llm/completion/transformation.py index ca258c2562..9432f02da1 100644 --- a/litellm/llms/base_llm/completion/transformation.py +++ b/litellm/llms/base_llm/completion/transformation.py @@ -31,6 +31,7 @@ class BaseTextCompletionConfig(BaseConfig, ABC): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/base_llm/embedding/transformation.py b/litellm/llms/base_llm/embedding/transformation.py index 940c6bf225..68c0a7c05a 100644 --- a/litellm/llms/base_llm/embedding/transformation.py +++ b/litellm/llms/base_llm/embedding/transformation.py @@ -45,6 +45,7 @@ class BaseEmbeddingConfig(BaseConfig, ABC): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/base_llm/image_variations/transformation.py b/litellm/llms/base_llm/image_variations/transformation.py index dcb53bea94..4d1cd6eebb 100644 --- a/litellm/llms/base_llm/image_variations/transformation.py +++ b/litellm/llms/base_llm/image_variations/transformation.py @@ -36,6 +36,7 @@ class BaseImageVariationConfig(BaseConfig, ABC): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/base_llm/responses/transformation.py b/litellm/llms/base_llm/responses/transformation.py new file mode 100644 index 0000000000..c41d63842b --- /dev/null +++ b/litellm/llms/base_llm/responses/transformation.py @@ -0,0 +1,133 @@ +import types +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, Dict, Optional, Union + +import httpx + +from litellm.types.llms.openai import ( + ResponseInputParam, + ResponsesAPIOptionalRequestParams, + ResponsesAPIRequestParams, + ResponsesAPIResponse, + ResponsesAPIStreamingResponse, +) +from litellm.types.router import GenericLiteLLMParams + +if TYPE_CHECKING: + from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj + + from ..chat.transformation import BaseLLMException as _BaseLLMException + + LiteLLMLoggingObj = _LiteLLMLoggingObj + BaseLLMException = _BaseLLMException +else: + LiteLLMLoggingObj = Any + BaseLLMException = Any + + +class BaseResponsesAPIConfig(ABC): + def __init__(self): + pass + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not k.startswith("_abc") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + + @abstractmethod + def get_supported_openai_params(self, model: str) -> list: + pass + + @abstractmethod + def map_openai_params( + self, + response_api_optional_params: ResponsesAPIOptionalRequestParams, + model: str, + drop_params: bool, + ) -> Dict: + + pass + + @abstractmethod + def validate_environment( + self, + headers: dict, + model: str, + api_key: Optional[str] = None, + ) -> dict: + return {} + + @abstractmethod + def get_complete_url( + self, + api_base: Optional[str], + model: str, + stream: Optional[bool] = None, + ) -> str: + """ + OPTIONAL + + Get the complete url for the request + + Some providers need `model` in `api_base` + """ + if api_base is None: + raise ValueError("api_base is required") + return api_base + + @abstractmethod + def transform_responses_api_request( + self, + model: str, + input: Union[str, ResponseInputParam], + response_api_optional_request_params: Dict, + litellm_params: GenericLiteLLMParams, + headers: dict, + ) -> ResponsesAPIRequestParams: + pass + + @abstractmethod + def transform_response_api_response( + self, + model: str, + raw_response: httpx.Response, + logging_obj: LiteLLMLoggingObj, + ) -> ResponsesAPIResponse: + pass + + @abstractmethod + def transform_streaming_response( + self, + model: str, + parsed_chunk: dict, + logging_obj: LiteLLMLoggingObj, + ) -> ResponsesAPIStreamingResponse: + """ + Transform a parsed streaming response chunk into a ResponsesAPIStreamingResponse + """ + pass + + def get_error_class( + self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers] + ) -> BaseLLMException: + from ..chat.transformation import BaseLLMException + + raise BaseLLMException( + status_code=status_code, + message=error_message, + headers=headers, + ) diff --git a/litellm/llms/bedrock/base_aws_llm.py b/litellm/llms/bedrock/base_aws_llm.py index 86b47675d4..5482d80687 100644 --- a/litellm/llms/bedrock/base_aws_llm.py +++ b/litellm/llms/bedrock/base_aws_llm.py @@ -279,16 +279,30 @@ class BaseAWSLLM: return None def _get_aws_region_name( - self, optional_params: dict, model: Optional[str] = None + self, + optional_params: dict, + model: Optional[str] = None, + model_id: Optional[str] = None, ) -> str: """ - Get the AWS region name from the environment variables + Get the AWS region name from the environment variables. + + Parameters: + optional_params (dict): Optional parameters for the model call + model (str): The model name + model_id (str): The model ID. This is the ARN of the model, if passed in as a separate param. + + Returns: + str: The AWS region name """ aws_region_name = optional_params.get("aws_region_name", None) ### SET REGION NAME ### if aws_region_name is None: # check model arn # - aws_region_name = self._get_aws_region_from_model_arn(model) + if model_id is not None: + aws_region_name = self._get_aws_region_from_model_arn(model_id) + else: + aws_region_name = self._get_aws_region_from_model_arn(model) # check env # litellm_aws_region_name = get_secret("AWS_REGION_NAME", None) diff --git a/litellm/llms/bedrock/chat/converse_handler.py b/litellm/llms/bedrock/chat/converse_handler.py index b70c15b3e1..a4230177b5 100644 --- a/litellm/llms/bedrock/chat/converse_handler.py +++ b/litellm/llms/bedrock/chat/converse_handler.py @@ -13,7 +13,7 @@ from litellm.llms.custom_httpx.http_handler import ( get_async_httpx_client, ) from litellm.types.utils import ModelResponse -from litellm.utils import CustomStreamWrapper, get_secret +from litellm.utils import CustomStreamWrapper from ..base_aws_llm import BaseAWSLLM, Credentials from ..common_utils import BedrockError @@ -268,23 +268,29 @@ class BedrockConverseLLM(BaseAWSLLM): ## SETUP ## stream = optional_params.pop("stream", None) - modelId = optional_params.pop("model_id", None) + unencoded_model_id = optional_params.pop("model_id", None) fake_stream = optional_params.pop("fake_stream", False) json_mode = optional_params.get("json_mode", False) - if modelId is not None: - modelId = self.encode_model_id(model_id=modelId) + if unencoded_model_id is not None: + modelId = self.encode_model_id(model_id=unencoded_model_id) else: - modelId = model + modelId = self.encode_model_id(model_id=model) if stream is True and "ai21" in modelId: fake_stream = True + ### SET REGION NAME ### + aws_region_name = self._get_aws_region_name( + optional_params=optional_params, + model=model, + model_id=unencoded_model_id, + ) + ## CREDENTIALS ## # pop aws_secret_access_key, aws_access_key_id, aws_region_name from kwargs, since completion calls fail with them aws_secret_access_key = optional_params.pop("aws_secret_access_key", None) aws_access_key_id = optional_params.pop("aws_access_key_id", None) aws_session_token = optional_params.pop("aws_session_token", None) - aws_region_name = optional_params.pop("aws_region_name", None) aws_role_name = optional_params.pop("aws_role_name", None) aws_session_name = optional_params.pop("aws_session_name", None) aws_profile_name = optional_params.pop("aws_profile_name", None) @@ -293,25 +299,7 @@ class BedrockConverseLLM(BaseAWSLLM): ) # https://bedrock-runtime.{region_name}.amazonaws.com aws_web_identity_token = optional_params.pop("aws_web_identity_token", None) aws_sts_endpoint = optional_params.pop("aws_sts_endpoint", None) - - ### SET REGION NAME ### - if aws_region_name is None: - # check env # - litellm_aws_region_name = get_secret("AWS_REGION_NAME", None) - - if litellm_aws_region_name is not None and isinstance( - litellm_aws_region_name, str - ): - aws_region_name = litellm_aws_region_name - - standard_aws_region_name = get_secret("AWS_REGION", None) - if standard_aws_region_name is not None and isinstance( - standard_aws_region_name, str - ): - aws_region_name = standard_aws_region_name - - if aws_region_name is None: - aws_region_name = "us-west-2" + optional_params.pop("aws_region_name", None) litellm_params["aws_region_name"] = ( aws_region_name # [DO NOT DELETE] important for async calls diff --git a/litellm/llms/bedrock/chat/converse_transformation.py b/litellm/llms/bedrock/chat/converse_transformation.py index 0b0d55f23d..bb874cfe38 100644 --- a/litellm/llms/bedrock/chat/converse_transformation.py +++ b/litellm/llms/bedrock/chat/converse_transformation.py @@ -31,7 +31,7 @@ from litellm.types.llms.openai import ( ChatCompletionUserMessage, OpenAIMessageContentListBlock, ) -from litellm.types.utils import ModelResponse, Usage +from litellm.types.utils import ModelResponse, PromptTokensDetailsWrapper, Usage from litellm.utils import add_dummy_tool, has_tool_call_blocks from ..common_utils import BedrockError, BedrockModelInfo, get_bedrock_tool_name @@ -602,6 +602,33 @@ class AmazonConverseConfig(BaseConfig): thinking_blocks_list.append(_thinking_block) return thinking_blocks_list + def _transform_usage(self, usage: ConverseTokenUsageBlock) -> Usage: + input_tokens = usage["inputTokens"] + output_tokens = usage["outputTokens"] + total_tokens = usage["totalTokens"] + cache_creation_input_tokens: int = 0 + cache_read_input_tokens: int = 0 + + if "cacheReadInputTokens" in usage: + cache_read_input_tokens = usage["cacheReadInputTokens"] + input_tokens += cache_read_input_tokens + if "cacheWriteInputTokens" in usage: + cache_creation_input_tokens = usage["cacheWriteInputTokens"] + input_tokens += cache_creation_input_tokens + + prompt_tokens_details = PromptTokensDetailsWrapper( + cached_tokens=cache_read_input_tokens + ) + openai_usage = Usage( + prompt_tokens=input_tokens, + completion_tokens=output_tokens, + total_tokens=total_tokens, + prompt_tokens_details=prompt_tokens_details, + cache_creation_input_tokens=cache_creation_input_tokens, + cache_read_input_tokens=cache_read_input_tokens, + ) + return openai_usage + def _transform_response( self, model: str, @@ -730,9 +757,7 @@ class AmazonConverseConfig(BaseConfig): chat_completion_message["tool_calls"] = tools ## CALCULATING USAGE - bedrock returns usage in the headers - input_tokens = completion_response["usage"]["inputTokens"] - output_tokens = completion_response["usage"]["outputTokens"] - total_tokens = completion_response["usage"]["totalTokens"] + usage = self._transform_usage(completion_response["usage"]) model_response.choices = [ litellm.Choices( @@ -743,11 +768,7 @@ class AmazonConverseConfig(BaseConfig): ] model_response.created = int(time.time()) model_response.model = model - usage = Usage( - prompt_tokens=input_tokens, - completion_tokens=output_tokens, - total_tokens=total_tokens, - ) + setattr(model_response, "usage", usage) # Add "trace" from Bedrock guardrails - if user has opted in to returning it diff --git a/litellm/llms/bedrock/chat/invoke_handler.py b/litellm/llms/bedrock/chat/invoke_handler.py index 27289164f7..84ac592c41 100644 --- a/litellm/llms/bedrock/chat/invoke_handler.py +++ b/litellm/llms/bedrock/chat/invoke_handler.py @@ -72,6 +72,9 @@ _response_stream_shape_cache = None bedrock_tool_name_mappings: InMemoryCache = InMemoryCache( max_size_in_memory=50, default_ttl=600 ) +from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig + +converse_config = AmazonConverseConfig() class AmazonCohereChatConfig: @@ -1231,7 +1234,9 @@ class AWSEventStreamDecoder: if len(self.content_blocks) == 0: return False - if "text" in self.content_blocks[0]: + if ( + "toolUse" not in self.content_blocks[0] + ): # be explicit - only do this if tool use block, as this is to prevent json decoding errors return False for block in self.content_blocks: @@ -1269,10 +1274,17 @@ class AWSEventStreamDecoder: def converse_chunk_parser(self, chunk_data: dict) -> ModelResponseStream: try: verbose_logger.debug("\n\nRaw Chunk: {}\n\n".format(chunk_data)) + chunk_data["usage"] = { + "inputTokens": 3, + "outputTokens": 392, + "totalTokens": 2191, + "cacheReadInputTokens": 1796, + "cacheWriteInputTokens": 0, + } text = "" tool_use: Optional[ChatCompletionToolCallChunk] = None finish_reason = "" - usage: Optional[ChatCompletionUsageBlock] = None + usage: Optional[Usage] = None provider_specific_fields: dict = {} reasoning_content: Optional[str] = None thinking_blocks: Optional[List[ChatCompletionThinkingBlock]] = None @@ -1348,11 +1360,7 @@ class AWSEventStreamDecoder: elif "stopReason" in chunk_data: finish_reason = map_finish_reason(chunk_data.get("stopReason", "stop")) elif "usage" in chunk_data: - usage = ChatCompletionUsageBlock( - prompt_tokens=chunk_data.get("inputTokens", 0), - completion_tokens=chunk_data.get("outputTokens", 0), - total_tokens=chunk_data.get("totalTokens", 0), - ) + usage = converse_config._transform_usage(chunk_data.get("usage", {})) model_response_provider_specific_fields = {} if "trace" in chunk_data: diff --git a/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py b/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py index 5414429d4c..133eb659df 100644 --- a/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py +++ b/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py @@ -76,6 +76,7 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/bedrock/common_utils.py b/litellm/llms/bedrock/common_utils.py index 54be359897..4677a579ed 100644 --- a/litellm/llms/bedrock/common_utils.py +++ b/litellm/llms/bedrock/common_utils.py @@ -336,13 +336,7 @@ class BedrockModelInfo(BaseLLMModelInfo): return model @staticmethod - def get_base_model(model: str) -> str: - """ - Get the base model from the given model name. - - Handle model names like - "us.meta.llama3-2-11b-instruct-v1:0" -> "meta.llama3-2-11b-instruct-v1" - AND "meta.llama3-2-11b-instruct-v1:0" -> "meta.llama3-2-11b-instruct-v1" - """ + def get_non_litellm_routing_model_name(model: str) -> str: if model.startswith("bedrock/"): model = model.split("/", 1)[1] @@ -352,6 +346,18 @@ class BedrockModelInfo(BaseLLMModelInfo): if model.startswith("invoke/"): model = model.split("/", 1)[1] + return model + + @staticmethod + def get_base_model(model: str) -> str: + """ + Get the base model from the given model name. + + Handle model names like - "us.meta.llama3-2-11b-instruct-v1:0" -> "meta.llama3-2-11b-instruct-v1" + AND "meta.llama3-2-11b-instruct-v1:0" -> "meta.llama3-2-11b-instruct-v1" + """ + + model = BedrockModelInfo.get_non_litellm_routing_model_name(model=model) model = BedrockModelInfo.extract_model_name_from_arn(model) potential_region = model.split(".", 1)[0] @@ -386,12 +392,16 @@ class BedrockModelInfo(BaseLLMModelInfo): Get the bedrock route for the given model. """ base_model = BedrockModelInfo.get_base_model(model) + alt_model = BedrockModelInfo.get_non_litellm_routing_model_name(model=model) if "invoke/" in model: return "invoke" elif "converse_like" in model: return "converse_like" elif "converse/" in model: return "converse" - elif base_model in litellm.bedrock_converse_models: + elif ( + base_model in litellm.bedrock_converse_models + or alt_model in litellm.bedrock_converse_models + ): return "converse" return "invoke" diff --git a/litellm/llms/bedrock/image/amazon_nova_canvas_transformation.py b/litellm/llms/bedrock/image/amazon_nova_canvas_transformation.py new file mode 100644 index 0000000000..de46edb923 --- /dev/null +++ b/litellm/llms/bedrock/image/amazon_nova_canvas_transformation.py @@ -0,0 +1,106 @@ +import types +from typing import List, Optional + +from openai.types.image import Image + +from litellm.types.llms.bedrock import ( + AmazonNovaCanvasTextToImageRequest, AmazonNovaCanvasTextToImageResponse, + AmazonNovaCanvasTextToImageParams, AmazonNovaCanvasRequestBase, +) +from litellm.types.utils import ImageResponse + + +class AmazonNovaCanvasConfig: + """ + Reference: https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/model-catalog/serverless/amazon.nova-canvas-v1:0 + + """ + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + + @classmethod + def get_supported_openai_params(cls, model: Optional[str] = None) -> List: + """ + """ + return ["n", "size", "quality"] + + @classmethod + def _is_nova_model(cls, model: Optional[str] = None) -> bool: + """ + Returns True if the model is a Nova Canvas model + + Nova models follow this pattern: + + """ + if model: + if "amazon.nova-canvas" in model: + return True + return False + + @classmethod + def transform_request_body( + cls, text: str, optional_params: dict + ) -> AmazonNovaCanvasRequestBase: + """ + Transform the request body for Amazon Nova Canvas model + """ + task_type = optional_params.pop("taskType", "TEXT_IMAGE") + image_generation_config = optional_params.pop("imageGenerationConfig", {}) + image_generation_config = {**image_generation_config, **optional_params} + if task_type == "TEXT_IMAGE": + text_to_image_params = image_generation_config.pop("textToImageParams", {}) + text_to_image_params = {"text" :text, **text_to_image_params} + text_to_image_params = AmazonNovaCanvasTextToImageParams(**text_to_image_params) + return AmazonNovaCanvasTextToImageRequest(textToImageParams=text_to_image_params, taskType=task_type, + imageGenerationConfig=image_generation_config) + raise NotImplementedError(f"Task type {task_type} is not supported") + + @classmethod + def map_openai_params(cls, non_default_params: dict, optional_params: dict) -> dict: + """ + Map the OpenAI params to the Bedrock params + """ + _size = non_default_params.get("size") + if _size is not None: + width, height = _size.split("x") + optional_params["width"], optional_params["height"] = int(width), int(height) + if non_default_params.get("n") is not None: + optional_params["numberOfImages"] = non_default_params.get("n") + if non_default_params.get("quality") is not None: + if non_default_params.get("quality") in ("hd", "premium"): + optional_params["quality"] = "premium" + if non_default_params.get("quality") == "standard": + optional_params["quality"] = "standard" + return optional_params + + @classmethod + def transform_response_dict_to_openai_response( + cls, model_response: ImageResponse, response_dict: dict + ) -> ImageResponse: + """ + Transform the response dict to the OpenAI response + """ + + nova_response = AmazonNovaCanvasTextToImageResponse(**response_dict) + openai_images: List[Image] = [] + for _img in nova_response.get("images", []): + openai_images.append(Image(b64_json=_img)) + + model_response.data = openai_images + return model_response diff --git a/litellm/llms/bedrock/image/image_handler.py b/litellm/llms/bedrock/image/image_handler.py index 59a80b2222..8f7762e547 100644 --- a/litellm/llms/bedrock/image/image_handler.py +++ b/litellm/llms/bedrock/image/image_handler.py @@ -266,6 +266,8 @@ class BedrockImageGeneration(BaseAWSLLM): "text_prompts": [{"text": prompt, "weight": 1}], **inference_params, } + elif provider == "amazon": + return dict(litellm.AmazonNovaCanvasConfig.transform_request_body(text=prompt, optional_params=optional_params)) else: raise BedrockError( status_code=422, message=f"Unsupported model={model}, passed in" @@ -301,6 +303,7 @@ class BedrockImageGeneration(BaseAWSLLM): config_class = ( litellm.AmazonStability3Config if litellm.AmazonStability3Config._is_stability_3_model(model=model) + else litellm.AmazonNovaCanvasConfig if litellm.AmazonNovaCanvasConfig._is_nova_model(model=model) else litellm.AmazonStabilityConfig ) config_class.transform_response_dict_to_openai_response( diff --git a/litellm/llms/cloudflare/chat/transformation.py b/litellm/llms/cloudflare/chat/transformation.py index 555e3c21f4..83c7483df9 100644 --- a/litellm/llms/cloudflare/chat/transformation.py +++ b/litellm/llms/cloudflare/chat/transformation.py @@ -79,6 +79,7 @@ class CloudflareChatConfig(BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: if api_base is None: diff --git a/litellm/llms/custom_httpx/aiohttp_handler.py b/litellm/llms/custom_httpx/aiohttp_handler.py index 4a9e07016f..c865fee17e 100644 --- a/litellm/llms/custom_httpx/aiohttp_handler.py +++ b/litellm/llms/custom_httpx/aiohttp_handler.py @@ -234,6 +234,7 @@ class BaseLLMAIOHTTPHandler: api_base=api_base, model=model, optional_params=optional_params, + litellm_params=litellm_params, stream=stream, ) @@ -483,6 +484,7 @@ class BaseLLMAIOHTTPHandler: api_base=api_base, model=model, optional_params=optional_params, + litellm_params=litellm_params, stream=False, ) diff --git a/litellm/llms/custom_httpx/http_handler.py b/litellm/llms/custom_httpx/http_handler.py index 736b85dc53..34d70434d5 100644 --- a/litellm/llms/custom_httpx/http_handler.py +++ b/litellm/llms/custom_httpx/http_handler.py @@ -1,5 +1,6 @@ import asyncio import os +import ssl import time from typing import TYPE_CHECKING, Any, Callable, List, Mapping, Optional, Union @@ -94,7 +95,7 @@ class AsyncHTTPHandler: event_hooks: Optional[Mapping[str, List[Callable[..., Any]]]] = None, concurrent_limit=1000, client_alias: Optional[str] = None, # name for client in logs - ssl_verify: Optional[Union[bool, str]] = None, + ssl_verify: Optional[VerifyTypes] = None, ): self.timeout = timeout self.event_hooks = event_hooks @@ -111,13 +112,33 @@ class AsyncHTTPHandler: timeout: Optional[Union[float, httpx.Timeout]], concurrent_limit: int, event_hooks: Optional[Mapping[str, List[Callable[..., Any]]]], - ssl_verify: Optional[Union[bool, str]] = None, + ssl_verify: Optional[VerifyTypes] = None, ) -> httpx.AsyncClient: # SSL certificates (a.k.a CA bundle) used to verify the identity of requested hosts. # /path/to/certificate.pem if ssl_verify is None: ssl_verify = os.getenv("SSL_VERIFY", litellm.ssl_verify) + + ssl_security_level = os.getenv("SSL_SECURITY_LEVEL") + + # If ssl_verify is not False and we need a lower security level + if ( + not ssl_verify + and ssl_security_level + and isinstance(ssl_security_level, str) + ): + # Create a custom SSL context with reduced security level + custom_ssl_context = ssl.create_default_context() + custom_ssl_context.set_ciphers(ssl_security_level) + + # If ssl_verify is a path to a CA bundle, load it into our custom context + if isinstance(ssl_verify, str) and os.path.exists(ssl_verify): + custom_ssl_context.load_verify_locations(cafile=ssl_verify) + + # Use our custom SSL context instead of the original ssl_verify value + ssl_verify = custom_ssl_context + # An SSL certificate used by the requested host to authenticate the client. # /path/to/client.pem cert = os.getenv("SSL_CERTIFICATE", litellm.ssl_certificate) diff --git a/litellm/llms/custom_httpx/llm_http_handler.py b/litellm/llms/custom_httpx/llm_http_handler.py index 9d67fd1a85..01fe36acda 100644 --- a/litellm/llms/custom_httpx/llm_http_handler.py +++ b/litellm/llms/custom_httpx/llm_http_handler.py @@ -1,6 +1,6 @@ import io import json -from typing import TYPE_CHECKING, Any, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Coroutine, Dict, Optional, Tuple, Union import httpx # type: ignore @@ -11,13 +11,21 @@ import litellm.types.utils from litellm.llms.base_llm.chat.transformation import BaseConfig from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig from litellm.llms.base_llm.rerank.transformation import BaseRerankConfig +from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig from litellm.llms.custom_httpx.http_handler import ( AsyncHTTPHandler, HTTPHandler, _get_httpx_client, get_async_httpx_client, ) +from litellm.responses.streaming_iterator import ( + BaseResponsesAPIStreamingIterator, + ResponsesAPIStreamingIterator, + SyncResponsesAPIStreamingIterator, +) +from litellm.types.llms.openai import ResponseInputParam, ResponsesAPIResponse from litellm.types.rerank import OptionalRerankParams, RerankResponse +from litellm.types.router import GenericLiteLLMParams from litellm.types.utils import EmbeddingResponse, FileTypes, TranscriptionResponse from litellm.utils import CustomStreamWrapper, ModelResponse, ProviderConfigManager @@ -234,6 +242,7 @@ class BaseLLMHTTPHandler: model=model, optional_params=optional_params, stream=stream, + litellm_params=litellm_params, ) data = provider_config.transform_request( @@ -604,6 +613,7 @@ class BaseLLMHTTPHandler: api_base=api_base, model=model, optional_params=optional_params, + litellm_params=litellm_params, ) data = provider_config.transform_embedding_request( @@ -899,6 +909,7 @@ class BaseLLMHTTPHandler: client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, atranscription: bool = False, headers: dict = {}, + litellm_params: dict = {}, ) -> TranscriptionResponse: provider_config = ProviderConfigManager.get_provider_audio_transcription_config( model=model, provider=litellm.LlmProviders(custom_llm_provider) @@ -922,6 +933,7 @@ class BaseLLMHTTPHandler: api_base=api_base, model=model, optional_params=optional_params, + litellm_params=litellm_params, ) # Handle the audio file based on type @@ -952,8 +964,235 @@ class BaseLLMHTTPHandler: return returned_response return model_response + def response_api_handler( + self, + model: str, + input: Union[str, ResponseInputParam], + responses_api_provider_config: BaseResponsesAPIConfig, + response_api_optional_request_params: Dict, + custom_llm_provider: str, + litellm_params: GenericLiteLLMParams, + logging_obj: LiteLLMLoggingObj, + extra_headers: Optional[Dict[str, Any]] = None, + extra_body: Optional[Dict[str, Any]] = None, + timeout: Optional[Union[float, httpx.Timeout]] = None, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, + _is_async: bool = False, + ) -> Union[ + ResponsesAPIResponse, + BaseResponsesAPIStreamingIterator, + Coroutine[ + Any, Any, Union[ResponsesAPIResponse, BaseResponsesAPIStreamingIterator] + ], + ]: + """ + Handles responses API requests. + When _is_async=True, returns a coroutine instead of making the call directly. + """ + if _is_async: + # Return the async coroutine if called with _is_async=True + return self.async_response_api_handler( + model=model, + input=input, + responses_api_provider_config=responses_api_provider_config, + response_api_optional_request_params=response_api_optional_request_params, + custom_llm_provider=custom_llm_provider, + litellm_params=litellm_params, + logging_obj=logging_obj, + extra_headers=extra_headers, + extra_body=extra_body, + timeout=timeout, + client=client if isinstance(client, AsyncHTTPHandler) else None, + ) + + if client is None or not isinstance(client, HTTPHandler): + sync_httpx_client = _get_httpx_client( + params={"ssl_verify": litellm_params.get("ssl_verify", None)} + ) + else: + sync_httpx_client = client + + headers = responses_api_provider_config.validate_environment( + api_key=litellm_params.api_key, + headers=response_api_optional_request_params.get("extra_headers", {}) or {}, + model=model, + ) + + if extra_headers: + headers.update(extra_headers) + + api_base = responses_api_provider_config.get_complete_url( + api_base=litellm_params.api_base, + model=model, + ) + + data = responses_api_provider_config.transform_responses_api_request( + model=model, + input=input, + response_api_optional_request_params=response_api_optional_request_params, + litellm_params=litellm_params, + headers=headers, + ) + + ## LOGGING + logging_obj.pre_call( + input=input, + api_key="", + additional_args={ + "complete_input_dict": data, + "api_base": api_base, + "headers": headers, + }, + ) + + # Check if streaming is requested + stream = response_api_optional_request_params.get("stream", False) + + try: + if stream: + # For streaming, use stream=True in the request + response = sync_httpx_client.post( + url=api_base, + headers=headers, + data=json.dumps(data), + timeout=timeout + or response_api_optional_request_params.get("timeout"), + stream=True, + ) + + return SyncResponsesAPIStreamingIterator( + response=response, + model=model, + logging_obj=logging_obj, + responses_api_provider_config=responses_api_provider_config, + ) + else: + # For non-streaming requests + response = sync_httpx_client.post( + url=api_base, + headers=headers, + data=json.dumps(data), + timeout=timeout + or response_api_optional_request_params.get("timeout"), + ) + except Exception as e: + raise self._handle_error( + e=e, + provider_config=responses_api_provider_config, + ) + + return responses_api_provider_config.transform_response_api_response( + model=model, + raw_response=response, + logging_obj=logging_obj, + ) + + async def async_response_api_handler( + self, + model: str, + input: Union[str, ResponseInputParam], + responses_api_provider_config: BaseResponsesAPIConfig, + response_api_optional_request_params: Dict, + custom_llm_provider: str, + litellm_params: GenericLiteLLMParams, + logging_obj: LiteLLMLoggingObj, + extra_headers: Optional[Dict[str, Any]] = None, + extra_body: Optional[Dict[str, Any]] = None, + timeout: Optional[Union[float, httpx.Timeout]] = None, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, + ) -> Union[ResponsesAPIResponse, BaseResponsesAPIStreamingIterator]: + """ + Async version of the responses API handler. + Uses async HTTP client to make requests. + """ + if client is None or not isinstance(client, AsyncHTTPHandler): + async_httpx_client = get_async_httpx_client( + llm_provider=litellm.LlmProviders(custom_llm_provider), + params={"ssl_verify": litellm_params.get("ssl_verify", None)}, + ) + else: + async_httpx_client = client + + headers = responses_api_provider_config.validate_environment( + api_key=litellm_params.api_key, + headers=response_api_optional_request_params.get("extra_headers", {}) or {}, + model=model, + ) + + if extra_headers: + headers.update(extra_headers) + + api_base = responses_api_provider_config.get_complete_url( + api_base=litellm_params.api_base, + model=model, + ) + + data = responses_api_provider_config.transform_responses_api_request( + model=model, + input=input, + response_api_optional_request_params=response_api_optional_request_params, + litellm_params=litellm_params, + headers=headers, + ) + + ## LOGGING + logging_obj.pre_call( + input=input, + api_key="", + additional_args={ + "complete_input_dict": data, + "api_base": api_base, + "headers": headers, + }, + ) + + # Check if streaming is requested + stream = response_api_optional_request_params.get("stream", False) + + try: + if stream: + # For streaming, we need to use stream=True in the request + response = await async_httpx_client.post( + url=api_base, + headers=headers, + data=json.dumps(data), + timeout=timeout + or response_api_optional_request_params.get("timeout"), + stream=True, + ) + + # Return the streaming iterator + return ResponsesAPIStreamingIterator( + response=response, + model=model, + logging_obj=logging_obj, + responses_api_provider_config=responses_api_provider_config, + ) + else: + # For non-streaming, proceed as before + response = await async_httpx_client.post( + url=api_base, + headers=headers, + data=json.dumps(data), + timeout=timeout + or response_api_optional_request_params.get("timeout"), + ) + except Exception as e: + raise self._handle_error( + e=e, + provider_config=responses_api_provider_config, + ) + + return responses_api_provider_config.transform_response_api_response( + model=model, + raw_response=response, + logging_obj=logging_obj, + ) + def _handle_error( - self, e: Exception, provider_config: Union[BaseConfig, BaseRerankConfig] + self, + e: Exception, + provider_config: Union[BaseConfig, BaseRerankConfig, BaseResponsesAPIConfig], ): status_code = getattr(e, "status_code", 500) error_headers = getattr(e, "headers", None) diff --git a/litellm/llms/deepgram/audio_transcription/transformation.py b/litellm/llms/deepgram/audio_transcription/transformation.py index c8dbd688cc..06296736ea 100644 --- a/litellm/llms/deepgram/audio_transcription/transformation.py +++ b/litellm/llms/deepgram/audio_transcription/transformation.py @@ -103,6 +103,7 @@ class DeepgramAudioTranscriptionConfig(BaseAudioTranscriptionConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: if api_base is None: diff --git a/litellm/llms/deepseek/chat/transformation.py b/litellm/llms/deepseek/chat/transformation.py index 747129ddd8..180cf7dc69 100644 --- a/litellm/llms/deepseek/chat/transformation.py +++ b/litellm/llms/deepseek/chat/transformation.py @@ -40,6 +40,7 @@ class DeepSeekChatConfig(OpenAIGPTConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/ollama/completion/transformation.py b/litellm/llms/ollama/completion/transformation.py index 283b2a2437..b4db95cfa1 100644 --- a/litellm/llms/ollama/completion/transformation.py +++ b/litellm/llms/ollama/completion/transformation.py @@ -6,6 +6,9 @@ from typing import TYPE_CHECKING, Any, AsyncIterator, Iterator, List, Optional, from httpx._models import Headers, Response import litellm +from litellm.litellm_core_utils.prompt_templates.common_utils import ( + get_str_from_messages, +) from litellm.litellm_core_utils.prompt_templates.factory import ( convert_to_ollama_image, custom_prompt, @@ -302,6 +305,8 @@ class OllamaConfig(BaseConfig): custom_prompt_dict = ( litellm_params.get("custom_prompt_dict") or litellm.custom_prompt_dict ) + + text_completion_request = litellm_params.get("text_completion") if model in custom_prompt_dict: # check if the model has a registered custom prompt model_prompt_details = custom_prompt_dict[model] @@ -311,7 +316,9 @@ class OllamaConfig(BaseConfig): final_prompt_value=model_prompt_details["final_prompt_value"], messages=messages, ) - else: + elif text_completion_request: # handle `/completions` requests + ollama_prompt = get_str_from_messages(messages=messages) + else: # handle `/chat/completions` requests modified_prompt = ollama_pt(model=model, messages=messages) if isinstance(modified_prompt, dict): ollama_prompt, images = ( @@ -356,6 +363,7 @@ class OllamaConfig(BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/openai/chat/gpt_transformation.py b/litellm/llms/openai/chat/gpt_transformation.py index 1f34d63681..c765f97979 100644 --- a/litellm/llms/openai/chat/gpt_transformation.py +++ b/litellm/llms/openai/chat/gpt_transformation.py @@ -125,6 +125,7 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig): "max_retries", "extra_headers", "parallel_tool_calls", + "audio", ] # works across all models model_specific_params = [] @@ -291,6 +292,7 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/openai/common_utils.py b/litellm/llms/openai/common_utils.py index a8412f867b..55da16d6cd 100644 --- a/litellm/llms/openai/common_utils.py +++ b/litellm/llms/openai/common_utils.py @@ -2,13 +2,17 @@ Common helpers / utils across al OpenAI endpoints """ +import hashlib import json -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union import httpx import openai +from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI +import litellm from litellm.llms.base_llm.chat.transformation import BaseLLMException +from litellm.llms.custom_httpx.http_handler import _DEFAULT_TTL_FOR_HTTPX_CLIENTS class OpenAIError(BaseLLMException): @@ -92,3 +96,113 @@ def drop_params_from_unprocessable_entity_error( new_data = {k: v for k, v in data.items() if k not in invalid_params} return new_data + + +class BaseOpenAILLM: + """ + Base class for OpenAI LLMs for getting their httpx clients and SSL verification settings + """ + + @staticmethod + def get_cached_openai_client( + client_initialization_params: dict, client_type: Literal["openai", "azure"] + ) -> Optional[Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI]]: + """Retrieves the OpenAI client from the in-memory cache based on the client initialization parameters""" + _cache_key = BaseOpenAILLM.get_openai_client_cache_key( + client_initialization_params=client_initialization_params, + client_type=client_type, + ) + _cached_client = litellm.in_memory_llm_clients_cache.get_cache(_cache_key) + return _cached_client + + @staticmethod + def set_cached_openai_client( + openai_client: Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI], + client_type: Literal["openai", "azure"], + client_initialization_params: dict, + ): + """Stores the OpenAI client in the in-memory cache for _DEFAULT_TTL_FOR_HTTPX_CLIENTS SECONDS""" + _cache_key = BaseOpenAILLM.get_openai_client_cache_key( + client_initialization_params=client_initialization_params, + client_type=client_type, + ) + litellm.in_memory_llm_clients_cache.set_cache( + key=_cache_key, + value=openai_client, + ttl=_DEFAULT_TTL_FOR_HTTPX_CLIENTS, + ) + + @staticmethod + def get_openai_client_cache_key( + client_initialization_params: dict, client_type: Literal["openai", "azure"] + ) -> str: + """Creates a cache key for the OpenAI client based on the client initialization parameters""" + hashed_api_key = None + if client_initialization_params.get("api_key") is not None: + hash_object = hashlib.sha256( + client_initialization_params.get("api_key", "").encode() + ) + # Hexadecimal representation of the hash + hashed_api_key = hash_object.hexdigest() + + # Create a more readable cache key using a list of key-value pairs + key_parts = [ + f"hashed_api_key={hashed_api_key}", + f"is_async={client_initialization_params.get('is_async')}", + ] + + LITELLM_CLIENT_SPECIFIC_PARAMS = [ + "timeout", + "max_retries", + "organization", + "api_base", + ] + openai_client_fields = ( + BaseOpenAILLM.get_openai_client_initialization_param_fields( + client_type=client_type + ) + + LITELLM_CLIENT_SPECIFIC_PARAMS + ) + + for param in openai_client_fields: + key_parts.append(f"{param}={client_initialization_params.get(param)}") + + _cache_key = ",".join(key_parts) + return _cache_key + + @staticmethod + def get_openai_client_initialization_param_fields( + client_type: Literal["openai", "azure"] + ) -> List[str]: + """Returns a list of fields that are used to initialize the OpenAI client""" + import inspect + + from openai import AzureOpenAI, OpenAI + + if client_type == "openai": + signature = inspect.signature(OpenAI.__init__) + else: + signature = inspect.signature(AzureOpenAI.__init__) + + # Extract parameter names, excluding 'self' + param_names = [param for param in signature.parameters if param != "self"] + return param_names + + @staticmethod + def _get_async_http_client() -> Optional[httpx.AsyncClient]: + if litellm.aclient_session is not None: + return litellm.aclient_session + + return httpx.AsyncClient( + limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100), + verify=litellm.ssl_verify, + ) + + @staticmethod + def _get_sync_http_client() -> Optional[httpx.Client]: + if litellm.client_session is not None: + return litellm.client_session + return httpx.Client( + limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100), + verify=litellm.ssl_verify, + ) diff --git a/litellm/llms/openai/fine_tuning/handler.py b/litellm/llms/openai/fine_tuning/handler.py index b7eab8e5fd..97b237c757 100644 --- a/litellm/llms/openai/fine_tuning/handler.py +++ b/litellm/llms/openai/fine_tuning/handler.py @@ -27,6 +27,7 @@ class OpenAIFineTuningAPI: ] = None, _is_async: bool = False, api_version: Optional[str] = None, + litellm_params: Optional[dict] = None, ) -> Optional[ Union[ OpenAI, diff --git a/litellm/llms/openai/openai.py b/litellm/llms/openai/openai.py index 7935c46293..deb70b481e 100644 --- a/litellm/llms/openai/openai.py +++ b/litellm/llms/openai/openai.py @@ -1,4 +1,3 @@ -import hashlib import time import types from typing import ( @@ -33,7 +32,6 @@ from litellm.litellm_core_utils.logging_utils import track_llm_api_timing from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMException from litellm.llms.bedrock.chat.invoke_handler import MockResponseIterator -from litellm.llms.custom_httpx.http_handler import _DEFAULT_TTL_FOR_HTTPX_CLIENTS from litellm.types.utils import ( EmbeddingResponse, ImageResponse, @@ -50,7 +48,11 @@ from litellm.utils import ( from ...types.llms.openai import * from ..base import BaseLLM from .chat.o_series_transformation import OpenAIOSeriesConfig -from .common_utils import OpenAIError, drop_params_from_unprocessable_entity_error +from .common_utils import ( + BaseOpenAILLM, + OpenAIError, + drop_params_from_unprocessable_entity_error, +) openaiOSeriesConfig = OpenAIOSeriesConfig() @@ -317,7 +319,7 @@ class OpenAIChatCompletionResponseIterator(BaseModelResponseIterator): raise e -class OpenAIChatCompletion(BaseLLM): +class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM): def __init__(self) -> None: super().__init__() @@ -343,7 +345,8 @@ class OpenAIChatCompletion(BaseLLM): max_retries: Optional[int] = DEFAULT_MAX_RETRIES, organization: Optional[str] = None, client: Optional[Union[OpenAI, AsyncOpenAI]] = None, - ): + ) -> Optional[Union[OpenAI, AsyncOpenAI]]: + client_initialization_params: Dict = locals() if client is None: if not isinstance(max_retries, int): raise OpenAIError( @@ -352,25 +355,21 @@ class OpenAIChatCompletion(BaseLLM): max_retries ), ) - # Creating a new OpenAI Client - # check in memory cache before creating a new one - # Convert the API key to bytes - hashed_api_key = None - if api_key is not None: - hash_object = hashlib.sha256(api_key.encode()) - # Hexadecimal representation of the hash - hashed_api_key = hash_object.hexdigest() + cached_client = self.get_cached_openai_client( + client_initialization_params=client_initialization_params, + client_type="openai", + ) - _cache_key = f"hashed_api_key={hashed_api_key},api_base={api_base},timeout={timeout},max_retries={max_retries},organization={organization},is_async={is_async}" - - _cached_client = litellm.in_memory_llm_clients_cache.get_cache(_cache_key) - if _cached_client: - return _cached_client + if cached_client: + if isinstance(cached_client, OpenAI) or isinstance( + cached_client, AsyncOpenAI + ): + return cached_client if is_async: _new_client: Union[OpenAI, AsyncOpenAI] = AsyncOpenAI( api_key=api_key, base_url=api_base, - http_client=litellm.aclient_session, + http_client=OpenAIChatCompletion._get_async_http_client(), timeout=timeout, max_retries=max_retries, organization=organization, @@ -379,17 +378,17 @@ class OpenAIChatCompletion(BaseLLM): _new_client = OpenAI( api_key=api_key, base_url=api_base, - http_client=litellm.client_session, + http_client=OpenAIChatCompletion._get_sync_http_client(), timeout=timeout, max_retries=max_retries, organization=organization, ) ## SAVE CACHE KEY - litellm.in_memory_llm_clients_cache.set_cache( - key=_cache_key, - value=_new_client, - ttl=_DEFAULT_TTL_FOR_HTTPX_CLIENTS, + self.set_cached_openai_client( + openai_client=_new_client, + client_initialization_params=client_initialization_params, + client_type="openai", ) return _new_client @@ -2650,7 +2649,7 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -2689,12 +2688,12 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], tools: Optional[Iterable[AssistantToolParam]], event_handler: Optional[AssistantEventHandler], ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]: - data = { + data: Dict[str, Any] = { "thread_id": thread_id, "assistant_id": assistant_id, "additional_instructions": additional_instructions, @@ -2714,12 +2713,12 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], tools: Optional[Iterable[AssistantToolParam]], event_handler: Optional[AssistantEventHandler], ) -> AssistantStreamManager[AssistantEventHandler]: - data = { + data: Dict[str, Any] = { "thread_id": thread_id, "assistant_id": assistant_id, "additional_instructions": additional_instructions, @@ -2741,7 +2740,7 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -2763,7 +2762,7 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -2786,7 +2785,7 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], diff --git a/litellm/llms/openai/responses/transformation.py b/litellm/llms/openai/responses/transformation.py new file mode 100644 index 0000000000..ce4052dc19 --- /dev/null +++ b/litellm/llms/openai/responses/transformation.py @@ -0,0 +1,190 @@ +from typing import TYPE_CHECKING, Any, Dict, Optional, Union, cast + +import httpx + +import litellm +from litellm._logging import verbose_logger +from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig +from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.openai import * +from litellm.types.router import GenericLiteLLMParams + +from ..common_utils import OpenAIError + +if TYPE_CHECKING: + from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj + + LiteLLMLoggingObj = _LiteLLMLoggingObj +else: + LiteLLMLoggingObj = Any + + +class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig): + def get_supported_openai_params(self, model: str) -> list: + """ + All OpenAI Responses API params are supported + """ + return [ + "input", + "model", + "include", + "instructions", + "max_output_tokens", + "metadata", + "parallel_tool_calls", + "previous_response_id", + "reasoning", + "store", + "stream", + "temperature", + "text", + "tool_choice", + "tools", + "top_p", + "truncation", + "user", + "extra_headers", + "extra_query", + "extra_body", + "timeout", + ] + + def map_openai_params( + self, + response_api_optional_params: ResponsesAPIOptionalRequestParams, + model: str, + drop_params: bool, + ) -> Dict: + """No mapping applied since inputs are in OpenAI spec already""" + return dict(response_api_optional_params) + + def transform_responses_api_request( + self, + model: str, + input: Union[str, ResponseInputParam], + response_api_optional_request_params: Dict, + litellm_params: GenericLiteLLMParams, + headers: dict, + ) -> ResponsesAPIRequestParams: + """No transform applied since inputs are in OpenAI spec already""" + return ResponsesAPIRequestParams( + model=model, input=input, **response_api_optional_request_params + ) + + def transform_response_api_response( + self, + model: str, + raw_response: httpx.Response, + logging_obj: LiteLLMLoggingObj, + ) -> ResponsesAPIResponse: + """No transform applied since outputs are in OpenAI spec already""" + try: + raw_response_json = raw_response.json() + except Exception: + raise OpenAIError( + message=raw_response.text, status_code=raw_response.status_code + ) + return ResponsesAPIResponse(**raw_response_json) + + def validate_environment( + self, + headers: dict, + model: str, + api_key: Optional[str] = None, + ) -> dict: + api_key = ( + api_key + or litellm.api_key + or litellm.openai_key + or get_secret_str("OPENAI_API_KEY") + ) + headers.update( + { + "Authorization": f"Bearer {api_key}", + } + ) + return headers + + def get_complete_url( + self, + api_base: Optional[str], + model: str, + stream: Optional[bool] = None, + ) -> str: + """ + Get the endpoint for OpenAI responses API + """ + api_base = ( + api_base + or litellm.api_base + or get_secret_str("OPENAI_API_BASE") + or "https://api.openai.com/v1" + ) + + # Remove trailing slashes + api_base = api_base.rstrip("/") + + return f"{api_base}/responses" + + def transform_streaming_response( + self, + model: str, + parsed_chunk: dict, + logging_obj: LiteLLMLoggingObj, + ) -> ResponsesAPIStreamingResponse: + """ + Transform a parsed streaming response chunk into a ResponsesAPIStreamingResponse + """ + # Convert the dictionary to a properly typed ResponsesAPIStreamingResponse + verbose_logger.debug("Raw OpenAI Chunk=%s", parsed_chunk) + event_type = str(parsed_chunk.get("type")) + event_pydantic_model = OpenAIResponsesAPIConfig.get_event_model_class( + event_type=event_type + ) + return event_pydantic_model(**parsed_chunk) + + @staticmethod + def get_event_model_class(event_type: str) -> Any: + """ + Returns the appropriate event model class based on the event type. + + Args: + event_type (str): The type of event from the response chunk + + Returns: + Any: The corresponding event model class + + Raises: + ValueError: If the event type is unknown + """ + event_models = { + ResponsesAPIStreamEvents.RESPONSE_CREATED: ResponseCreatedEvent, + ResponsesAPIStreamEvents.RESPONSE_IN_PROGRESS: ResponseInProgressEvent, + ResponsesAPIStreamEvents.RESPONSE_COMPLETED: ResponseCompletedEvent, + ResponsesAPIStreamEvents.RESPONSE_FAILED: ResponseFailedEvent, + ResponsesAPIStreamEvents.RESPONSE_INCOMPLETE: ResponseIncompleteEvent, + ResponsesAPIStreamEvents.OUTPUT_ITEM_ADDED: OutputItemAddedEvent, + ResponsesAPIStreamEvents.OUTPUT_ITEM_DONE: OutputItemDoneEvent, + ResponsesAPIStreamEvents.CONTENT_PART_ADDED: ContentPartAddedEvent, + ResponsesAPIStreamEvents.CONTENT_PART_DONE: ContentPartDoneEvent, + ResponsesAPIStreamEvents.OUTPUT_TEXT_DELTA: OutputTextDeltaEvent, + ResponsesAPIStreamEvents.OUTPUT_TEXT_ANNOTATION_ADDED: OutputTextAnnotationAddedEvent, + ResponsesAPIStreamEvents.OUTPUT_TEXT_DONE: OutputTextDoneEvent, + ResponsesAPIStreamEvents.REFUSAL_DELTA: RefusalDeltaEvent, + ResponsesAPIStreamEvents.REFUSAL_DONE: RefusalDoneEvent, + ResponsesAPIStreamEvents.FUNCTION_CALL_ARGUMENTS_DELTA: FunctionCallArgumentsDeltaEvent, + ResponsesAPIStreamEvents.FUNCTION_CALL_ARGUMENTS_DONE: FunctionCallArgumentsDoneEvent, + ResponsesAPIStreamEvents.FILE_SEARCH_CALL_IN_PROGRESS: FileSearchCallInProgressEvent, + ResponsesAPIStreamEvents.FILE_SEARCH_CALL_SEARCHING: FileSearchCallSearchingEvent, + ResponsesAPIStreamEvents.FILE_SEARCH_CALL_COMPLETED: FileSearchCallCompletedEvent, + ResponsesAPIStreamEvents.WEB_SEARCH_CALL_IN_PROGRESS: WebSearchCallInProgressEvent, + ResponsesAPIStreamEvents.WEB_SEARCH_CALL_SEARCHING: WebSearchCallSearchingEvent, + ResponsesAPIStreamEvents.WEB_SEARCH_CALL_COMPLETED: WebSearchCallCompletedEvent, + ResponsesAPIStreamEvents.ERROR: ErrorEvent, + } + + model_class = event_models.get(cast(ResponsesAPIStreamEvents, event_type)) + if not model_class: + raise ValueError(f"Unknown event type: {event_type}") + + return model_class diff --git a/litellm/llms/openai_like/chat/handler.py b/litellm/llms/openai_like/chat/handler.py index ac886e915c..821fc9b7f1 100644 --- a/litellm/llms/openai_like/chat/handler.py +++ b/litellm/llms/openai_like/chat/handler.py @@ -230,7 +230,7 @@ class OpenAILikeChatHandler(OpenAILikeBase): logging_obj, optional_params: dict, acompletion=None, - litellm_params=None, + litellm_params: dict = {}, logger_fn=None, headers: Optional[dict] = None, timeout: Optional[Union[float, httpx.Timeout]] = None, @@ -337,7 +337,7 @@ class OpenAILikeChatHandler(OpenAILikeBase): timeout=timeout, base_model=base_model, client=client, - json_mode=json_mode + json_mode=json_mode, ) else: ## COMPLETION CALL diff --git a/litellm/llms/replicate/chat/handler.py b/litellm/llms/replicate/chat/handler.py index e7d0d383e2..f52eb2ee05 100644 --- a/litellm/llms/replicate/chat/handler.py +++ b/litellm/llms/replicate/chat/handler.py @@ -169,7 +169,10 @@ def completion( ) # for pricing this must remain right before calling api prediction_url = replicate_config.get_complete_url( - api_base=api_base, model=model, optional_params=optional_params + api_base=api_base, + model=model, + optional_params=optional_params, + litellm_params=litellm_params, ) ## COMPLETION CALL @@ -243,7 +246,10 @@ async def async_completion( ) -> Union[ModelResponse, CustomStreamWrapper]: prediction_url = replicate_config.get_complete_url( - api_base=api_base, model=model, optional_params=optional_params + api_base=api_base, + model=model, + optional_params=optional_params, + litellm_params=litellm_params, ) async_handler = get_async_httpx_client( llm_provider=litellm.LlmProviders.REPLICATE, diff --git a/litellm/llms/replicate/chat/transformation.py b/litellm/llms/replicate/chat/transformation.py index 39aaad6808..75cfe6ced7 100644 --- a/litellm/llms/replicate/chat/transformation.py +++ b/litellm/llms/replicate/chat/transformation.py @@ -141,6 +141,7 @@ class ReplicateConfig(BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: version_id = self.model_to_version_id(model) diff --git a/litellm/llms/sagemaker/completion/handler.py b/litellm/llms/sagemaker/completion/handler.py index 4aff5f5d71..909caf73c3 100644 --- a/litellm/llms/sagemaker/completion/handler.py +++ b/litellm/llms/sagemaker/completion/handler.py @@ -213,7 +213,7 @@ class SagemakerLLM(BaseAWSLLM): sync_response = sync_handler.post( url=prepared_request.url, headers=prepared_request.headers, # type: ignore - json=data, + data=prepared_request.body, stream=stream, ) @@ -308,7 +308,7 @@ class SagemakerLLM(BaseAWSLLM): sync_response = sync_handler.post( url=prepared_request.url, headers=prepared_request.headers, # type: ignore - json=_data, + data=prepared_request.body, timeout=timeout, ) @@ -356,7 +356,7 @@ class SagemakerLLM(BaseAWSLLM): self, api_base: str, headers: dict, - data: dict, + data: str, logging_obj, client=None, ): @@ -368,7 +368,7 @@ class SagemakerLLM(BaseAWSLLM): response = await client.post( api_base, headers=headers, - json=data, + data=data, stream=True, ) @@ -440,7 +440,7 @@ class SagemakerLLM(BaseAWSLLM): completion_stream = await self.make_async_call( api_base=prepared_request.url, headers=prepared_request.headers, # type: ignore - data=data, + data=prepared_request.body, logging_obj=logging_obj, ) streaming_response = CustomStreamWrapper( @@ -522,7 +522,7 @@ class SagemakerLLM(BaseAWSLLM): response = await async_handler.post( url=prepared_request.url, headers=prepared_request.headers, # type: ignore - json=data, + data=prepared_request.body, timeout=timeout, ) diff --git a/litellm/llms/snowflake/chat/transformation.py b/litellm/llms/snowflake/chat/transformation.py new file mode 100644 index 0000000000..d3634e7950 --- /dev/null +++ b/litellm/llms/snowflake/chat/transformation.py @@ -0,0 +1,167 @@ +""" +Support for Snowflake REST API +""" + +from typing import TYPE_CHECKING, Any, List, Optional, Tuple + +import httpx + +from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.openai import AllMessageValues +from litellm.types.utils import ModelResponse + +from ...openai_like.chat.transformation import OpenAIGPTConfig + +if TYPE_CHECKING: + from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj + + LiteLLMLoggingObj = _LiteLLMLoggingObj +else: + LiteLLMLoggingObj = Any + + +class SnowflakeConfig(OpenAIGPTConfig): + """ + source: https://docs.snowflake.com/en/sql-reference/functions/complete-snowflake-cortex + """ + + @classmethod + def get_config(cls): + return super().get_config() + + def get_supported_openai_params(self, model: str) -> List: + return ["temperature", "max_tokens", "top_p", "response_format"] + + def map_openai_params( + self, + non_default_params: dict, + optional_params: dict, + model: str, + drop_params: bool, + ) -> dict: + """ + If any supported_openai_params are in non_default_params, add them to optional_params, so they are used in API call + + Args: + non_default_params (dict): Non-default parameters to filter. + optional_params (dict): Optional parameters to update. + model (str): Model name for parameter support check. + + Returns: + dict: Updated optional_params with supported non-default parameters. + """ + supported_openai_params = self.get_supported_openai_params(model) + for param, value in non_default_params.items(): + if param in supported_openai_params: + optional_params[param] = value + return optional_params + + def transform_response( + self, + model: str, + raw_response: httpx.Response, + model_response: ModelResponse, + logging_obj: LiteLLMLoggingObj, + request_data: dict, + messages: List[AllMessageValues], + optional_params: dict, + litellm_params: dict, + encoding: Any, + api_key: Optional[str] = None, + json_mode: Optional[bool] = None, + ) -> ModelResponse: + response_json = raw_response.json() + logging_obj.post_call( + input=messages, + api_key="", + original_response=response_json, + additional_args={"complete_input_dict": request_data}, + ) + + returned_response = ModelResponse(**response_json) + + returned_response.model = "snowflake/" + (returned_response.model or "") + + if model is not None: + returned_response._hidden_params["model"] = model + return returned_response + + def validate_environment( + self, + headers: dict, + model: str, + messages: List[AllMessageValues], + optional_params: dict, + api_key: Optional[str] = None, + api_base: Optional[str] = None, + ) -> dict: + """ + Return headers to use for Snowflake completion request + + Snowflake REST API Ref: https://docs.snowflake.com/en/user-guide/snowflake-cortex/cortex-llm-rest-api#api-reference + Expected headers: + { + "Content-Type": "application/json", + "Accept": "application/json", + "Authorization": "Bearer " + , + "X-Snowflake-Authorization-Token-Type": "KEYPAIR_JWT" + } + """ + + if api_key is None: + raise ValueError("Missing Snowflake JWT key") + + headers.update( + { + "Content-Type": "application/json", + "Accept": "application/json", + "Authorization": "Bearer " + api_key, + "X-Snowflake-Authorization-Token-Type": "KEYPAIR_JWT", + } + ) + return headers + + def _get_openai_compatible_provider_info( + self, api_base: Optional[str], api_key: Optional[str] + ) -> Tuple[Optional[str], Optional[str]]: + api_base = ( + api_base + or f"""https://{get_secret_str("SNOWFLAKE_ACCOUNT_ID")}.snowflakecomputing.com/api/v2/cortex/inference:complete""" + or get_secret_str("SNOWFLAKE_API_BASE") + ) + dynamic_api_key = api_key or get_secret_str("SNOWFLAKE_JWT") + return api_base, dynamic_api_key + + def get_complete_url( + self, + api_base: Optional[str], + model: str, + optional_params: dict, + litellm_params: dict, + stream: Optional[bool] = None, + ) -> str: + """ + If api_base is not provided, use the default DeepSeek /chat/completions endpoint. + """ + if not api_base: + api_base = f"""https://{get_secret_str("SNOWFLAKE_ACCOUNT_ID")}.snowflakecomputing.com/api/v2/cortex/inference:complete""" + + return api_base + + def transform_request( + self, + model: str, + messages: List[AllMessageValues], + optional_params: dict, + litellm_params: dict, + headers: dict, + ) -> dict: + stream: bool = optional_params.pop("stream", None) or False + extra_body = optional_params.pop("extra_body", {}) + return { + "model": model, + "messages": messages, + "stream": stream, + **optional_params, + **extra_body, + } diff --git a/litellm/llms/snowflake/common_utils.py b/litellm/llms/snowflake/common_utils.py new file mode 100644 index 0000000000..40c8270f95 --- /dev/null +++ b/litellm/llms/snowflake/common_utils.py @@ -0,0 +1,34 @@ +from typing import Optional + + +class SnowflakeBase: + def validate_environment( + self, + headers: dict, + JWT: Optional[str] = None, + ) -> dict: + """ + Return headers to use for Snowflake completion request + + Snowflake REST API Ref: https://docs.snowflake.com/en/user-guide/snowflake-cortex/cortex-llm-rest-api#api-reference + Expected headers: + { + "Content-Type": "application/json", + "Accept": "application/json", + "Authorization": "Bearer " + , + "X-Snowflake-Authorization-Token-Type": "KEYPAIR_JWT" + } + """ + + if JWT is None: + raise ValueError("Missing Snowflake JWT key") + + headers.update( + { + "Content-Type": "application/json", + "Accept": "application/json", + "Authorization": "Bearer " + JWT, + "X-Snowflake-Authorization-Token-Type": "KEYPAIR_JWT", + } + ) + return headers diff --git a/litellm/llms/topaz/image_variations/transformation.py b/litellm/llms/topaz/image_variations/transformation.py index 112c3a8f64..8b95deed04 100644 --- a/litellm/llms/topaz/image_variations/transformation.py +++ b/litellm/llms/topaz/image_variations/transformation.py @@ -55,6 +55,7 @@ class TopazImageVariationConfig(BaseImageVariationConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: api_base = api_base or "https://api.topazlabs.com" diff --git a/litellm/llms/triton/completion/transformation.py b/litellm/llms/triton/completion/transformation.py index 4037c32365..56151f89ef 100644 --- a/litellm/llms/triton/completion/transformation.py +++ b/litellm/llms/triton/completion/transformation.py @@ -72,6 +72,7 @@ class TritonConfig(BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: if api_base is None: diff --git a/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py b/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py index 294939a3c5..9ac1b1ffc4 100644 --- a/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py +++ b/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py @@ -846,7 +846,7 @@ async def make_call( message=VertexGeminiConfig().translate_exception_str(exception_string), headers=e.response.headers, ) - if response.status_code != 200: + if response.status_code != 200 and response.status_code != 201: raise VertexAIError( status_code=response.status_code, message=response.text, @@ -884,7 +884,7 @@ def make_sync_call( response = client.post(api_base, headers=headers, data=data, stream=True) - if response.status_code != 200: + if response.status_code != 200 and response.status_code != 201: raise VertexAIError( status_code=response.status_code, message=str(response.read()), diff --git a/litellm/llms/voyage/embedding/transformation.py b/litellm/llms/voyage/embedding/transformation.py index 623dfe73af..51abc9e43a 100644 --- a/litellm/llms/voyage/embedding/transformation.py +++ b/litellm/llms/voyage/embedding/transformation.py @@ -43,6 +43,7 @@ class VoyageEmbeddingConfig(BaseEmbeddingConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: if api_base: diff --git a/litellm/llms/watsonx/chat/handler.py b/litellm/llms/watsonx/chat/handler.py index fd195214db..8ea19d413e 100644 --- a/litellm/llms/watsonx/chat/handler.py +++ b/litellm/llms/watsonx/chat/handler.py @@ -31,7 +31,7 @@ class WatsonXChatHandler(OpenAILikeChatHandler): logging_obj, optional_params: dict, acompletion=None, - litellm_params=None, + litellm_params: dict = {}, headers: Optional[dict] = None, logger_fn=None, timeout: Optional[Union[float, httpx.Timeout]] = None, @@ -63,6 +63,7 @@ class WatsonXChatHandler(OpenAILikeChatHandler): api_base=api_base, model=model, optional_params=optional_params, + litellm_params=litellm_params, stream=optional_params.get("stream", False), ) diff --git a/litellm/llms/watsonx/chat/transformation.py b/litellm/llms/watsonx/chat/transformation.py index d5e0ed6544..f253da6f5b 100644 --- a/litellm/llms/watsonx/chat/transformation.py +++ b/litellm/llms/watsonx/chat/transformation.py @@ -83,6 +83,7 @@ class IBMWatsonXChatConfig(IBMWatsonXMixin, OpenAIGPTConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: url = self._get_base_url(api_base=api_base) diff --git a/litellm/llms/watsonx/completion/transformation.py b/litellm/llms/watsonx/completion/transformation.py index 7a4df23944..f414354e2a 100644 --- a/litellm/llms/watsonx/completion/transformation.py +++ b/litellm/llms/watsonx/completion/transformation.py @@ -318,6 +318,7 @@ class IBMWatsonXAIConfig(IBMWatsonXMixin, BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: url = self._get_base_url(api_base=api_base) diff --git a/litellm/llms/watsonx/embed/transformation.py b/litellm/llms/watsonx/embed/transformation.py index 69c1f8fffa..359137ee5e 100644 --- a/litellm/llms/watsonx/embed/transformation.py +++ b/litellm/llms/watsonx/embed/transformation.py @@ -54,6 +54,7 @@ class IBMWatsonXEmbeddingConfig(IBMWatsonXMixin, BaseEmbeddingConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: url = self._get_base_url(api_base=api_base) diff --git a/litellm/main.py b/litellm/main.py index b90030a6bb..e75c23f0fc 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -25,6 +25,7 @@ from functools import partial from typing import ( Any, Callable, + Coroutine, Dict, List, Literal, @@ -74,6 +75,7 @@ from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.realtime_api.main import _realtime_health_check from litellm.secret_managers.main import get_secret_str from litellm.types.router import GenericLiteLLMParams +from litellm.types.utils import RawRequestTypedDict from litellm.utils import ( CustomStreamWrapper, ProviderConfigManager, @@ -1162,6 +1164,15 @@ def completion( # type: ignore # noqa: PLR0915 merge_reasoning_content_in_choices=kwargs.get( "merge_reasoning_content_in_choices", None ), + api_version=api_version, + azure_ad_token=kwargs.get("azure_ad_token"), + tenant_id=kwargs.get("tenant_id"), + client_id=kwargs.get("client_id"), + client_secret=kwargs.get("client_secret"), + azure_username=kwargs.get("azure_username"), + azure_password=kwargs.get("azure_password"), + max_retries=max_retries, + timeout=timeout, ) logging.update_environment_variables( model=model, @@ -2977,6 +2988,39 @@ def completion( # type: ignore # noqa: PLR0915 ) return response response = model_response + elif custom_llm_provider == "snowflake" or model in litellm.snowflake_models: + try: + client = ( + HTTPHandler(timeout=timeout) if stream is False else None + ) # Keep this here, otherwise, the httpx.client closes and streaming is impossible + response = base_llm_http_handler.completion( + model=model, + messages=messages, + headers=headers, + model_response=model_response, + api_key=api_key, + api_base=api_base, + acompletion=acompletion, + logging_obj=logging, + optional_params=optional_params, + litellm_params=litellm_params, + timeout=timeout, # type: ignore + client=client, + custom_llm_provider=custom_llm_provider, + encoding=encoding, + stream=stream, + ) + + except Exception as e: + ## LOGGING - log the original exception returned + logging.post_call( + input=messages, + api_key=api_key, + original_response=str(e), + additional_args={"headers": headers}, + ) + raise e + elif custom_llm_provider == "custom": url = litellm.api_base or api_base or "" if url is None or url == "": @@ -3035,6 +3079,7 @@ def completion( # type: ignore # noqa: PLR0915 model_response.created = int(time.time()) model_response.model = model response = model_response + elif ( custom_llm_provider in litellm._custom_providers ): # Assume custom LLM provider @@ -3244,7 +3289,7 @@ def embedding( # noqa: PLR0915 litellm_call_id=None, logger_fn=None, **kwargs, -) -> EmbeddingResponse: +) -> Union[EmbeddingResponse, Coroutine[Any, Any, EmbeddingResponse]]: """ Embedding function that calls an API to generate embeddings for the given input. @@ -3350,6 +3395,7 @@ def embedding( # noqa: PLR0915 } } ) + litellm_params_dict = get_litellm_params(**kwargs) logging: Logging = litellm_logging_obj # type: ignore @@ -3364,7 +3410,9 @@ def embedding( # noqa: PLR0915 if mock_response is not None: return mock_embedding(model=model, mock_response=mock_response) try: - response: Optional[EmbeddingResponse] = None + response: Optional[ + Union[EmbeddingResponse, Coroutine[Any, Any, EmbeddingResponse]] + ] = None if azure is True or custom_llm_provider == "azure": # azure configs @@ -3411,6 +3459,7 @@ def embedding( # noqa: PLR0915 aembedding=aembedding, max_retries=max_retries, headers=headers or extra_headers, + litellm_params=litellm_params_dict, ) elif ( model in litellm.open_ai_embedding_models @@ -3855,7 +3904,11 @@ def embedding( # noqa: PLR0915 raise LiteLLMUnknownProvider( model=model, custom_llm_provider=custom_llm_provider ) - if response is not None and hasattr(response, "_hidden_params"): + if ( + response is not None + and hasattr(response, "_hidden_params") + and isinstance(response, EmbeddingResponse) + ): response._hidden_params["custom_llm_provider"] = custom_llm_provider if response is None: @@ -4514,6 +4567,8 @@ def image_generation( # noqa: PLR0915 **non_default_params, ) + litellm_params_dict = get_litellm_params(**kwargs) + logging: Logging = litellm_logging_obj logging.update_environment_variables( model=model, @@ -4584,6 +4639,7 @@ def image_generation( # noqa: PLR0915 aimg_generation=aimg_generation, client=client, headers=headers, + litellm_params=litellm_params_dict, ) elif ( custom_llm_provider == "openai" @@ -4895,6 +4951,10 @@ async def atranscription(*args, **kwargs) -> TranscriptionResponse: else: # Call the synchronous function using run_in_executor response = await loop.run_in_executor(None, func_with_context) + if not isinstance(response, TranscriptionResponse): + raise ValueError( + f"Invalid response from transcription provider, expected TranscriptionResponse, but got {type(response)}" + ) return response except Exception as e: custom_llm_provider = custom_llm_provider or "openai" @@ -4928,7 +4988,7 @@ def transcription( max_retries: Optional[int] = None, custom_llm_provider=None, **kwargs, -) -> TranscriptionResponse: +) -> Union[TranscriptionResponse, Coroutine[Any, Any, TranscriptionResponse]]: """ Calls openai + azure whisper endpoints. @@ -4979,6 +5039,7 @@ def transcription( custom_llm_provider=custom_llm_provider, drop_params=drop_params, ) + litellm_params_dict = get_litellm_params(**kwargs) litellm_logging_obj.update_environment_variables( model=model, @@ -4996,7 +5057,9 @@ def transcription( custom_llm_provider=custom_llm_provider, ) - response: Optional[TranscriptionResponse] = None + response: Optional[ + Union[TranscriptionResponse, Coroutine[Any, Any, TranscriptionResponse]] + ] = None if custom_llm_provider == "azure": # azure configs api_base = api_base or litellm.api_base or get_secret_str("AZURE_API_BASE") @@ -5032,6 +5095,7 @@ def transcription( api_version=api_version, azure_ad_token=azure_ad_token, max_retries=max_retries, + litellm_params=litellm_params_dict, ) elif ( custom_llm_provider == "openai" @@ -5134,7 +5198,7 @@ async def aspeech(*args, **kwargs) -> HttpxBinaryResponseContent: @client -def speech( +def speech( # noqa: PLR0915 model: str, input: str, voice: Optional[Union[str, dict]] = None, @@ -5175,7 +5239,7 @@ def speech( if max_retries is None: max_retries = litellm.num_retries or openai.DEFAULT_MAX_RETRIES - + litellm_params_dict = get_litellm_params(**kwargs) logging_obj = kwargs.get("litellm_logging_obj", None) logging_obj.update_environment_variables( model=model, @@ -5292,6 +5356,7 @@ def speech( timeout=timeout, client=client, # pass AsyncOpenAI, OpenAI client aspeech=aspeech, + litellm_params=litellm_params_dict, ) elif custom_llm_provider == "vertex_ai" or custom_llm_provider == "vertex_ai_beta": @@ -5399,6 +5464,17 @@ async def ahealth_check( "x-ms-region": str, } """ + # Map modes to their corresponding health check calls + litellm_logging_obj = Logging( + model="", + messages=[], + stream=False, + call_type="acompletion", + litellm_call_id="1234", + start_time=datetime.datetime.now(), + function_id="1234", + log_raw_request_response=True, + ) try: model: Optional[str] = model_params.get("model", None) if model is None: @@ -5421,9 +5497,12 @@ async def ahealth_check( custom_llm_provider=custom_llm_provider, model_params=model_params, ) - # Map modes to their corresponding health check calls + model_params["litellm_logging_obj"] = litellm_logging_obj + mode_handlers = { - "chat": lambda: litellm.acompletion(**model_params), + "chat": lambda: litellm.acompletion( + **model_params, + ), "completion": lambda: litellm.atext_completion( **_filter_model_params(model_params), prompt=prompt or "test", @@ -5480,13 +5559,16 @@ async def ahealth_check( "error": f"error:{str(e)}. Missing `mode`. Set the `mode` for the model - https://docs.litellm.ai/docs/proxy/health#embedding-models \nstacktrace: {stack_trace}" } - error_to_return = ( - str(e) - + "\nHave you set 'mode' - https://docs.litellm.ai/docs/proxy/health#embedding-models" - + "\nstack trace: " - + stack_trace + error_to_return = str(e) + "\nstack trace: " + stack_trace + + raw_request_typed_dict = litellm_logging_obj.model_call_details.get( + "raw_request_typed_dict" ) - return {"error": error_to_return} + + return { + "error": error_to_return, + "raw_request_typed_dict": raw_request_typed_dict, + } ####### HELPER FUNCTIONS ################ diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 67543b1ed1..f2ca9156ad 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -220,6 +220,48 @@ "supports_system_messages": true, "supports_tool_choice": true }, + "o1-pro": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.00015, + "output_cost_per_token": 0.0006, + "input_cost_per_token_batches": 0.000075, + "output_cost_per_token_batches": 0.0003, + "litellm_provider": "openai", + "mode": "responses", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_native_streaming": false, + "supported_modalities": ["text", "image"], + "supported_endpoints": ["/v1/responses", "/v1/batch"] + }, + "o1-pro-2025-03-19": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.00015, + "output_cost_per_token": 0.0006, + "input_cost_per_token_batches": 0.000075, + "output_cost_per_token_batches": 0.0003, + "litellm_provider": "openai", + "mode": "responses", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_native_streaming": false, + "supported_modalities": ["text", "image"], + "supported_endpoints": ["/v1/responses", "/v1/batch"] + }, "o1": { "max_tokens": 100000, "max_input_tokens": 200000, @@ -1021,6 +1063,120 @@ "input_cost_per_character": 0.000030, "litellm_provider": "openai" }, + "azure/gpt-4o-mini-realtime-preview-2024-12-17": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000006, + "input_cost_per_audio_token": 0.00001, + "cache_read_input_token_cost": 0.0000003, + "cache_creation_input_audio_token_cost": 0.0000003, + "output_cost_per_token": 0.0000024, + "output_cost_per_audio_token": 0.00002, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-mini-realtime-preview-2024-12-17": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000066, + "input_cost_per_audio_token": 0.000011, + "cache_read_input_token_cost": 0.00000033, + "cache_creation_input_audio_token_cost": 0.00000033, + "output_cost_per_token": 0.00000264, + "output_cost_per_audio_token": 0.000022, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/us/gpt-4o-mini-realtime-preview-2024-12-17": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000066, + "input_cost_per_audio_token": 0.000011, + "cache_read_input_token_cost": 0.00000033, + "cache_creation_input_audio_token_cost": 0.00000033, + "output_cost_per_token": 0.00000264, + "output_cost_per_audio_token": 0.000022, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/gpt-4o-realtime-preview-2024-10-01": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000005, + "input_cost_per_audio_token": 0.0001, + "cache_read_input_token_cost": 0.0000025, + "cache_creation_input_audio_token_cost": 0.00002, + "output_cost_per_token": 0.00002, + "output_cost_per_audio_token": 0.0002, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/us/gpt-4o-realtime-preview-2024-10-01": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000055, + "input_cost_per_audio_token": 0.00011, + "cache_read_input_token_cost": 0.00000275, + "cache_creation_input_audio_token_cost": 0.000022, + "output_cost_per_token": 0.000022, + "output_cost_per_audio_token": 0.00022, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-realtime-preview-2024-10-01": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000055, + "input_cost_per_audio_token": 0.00011, + "cache_read_input_token_cost": 0.00000275, + "cache_creation_input_audio_token_cost": 0.000022, + "output_cost_per_token": 0.000022, + "output_cost_per_audio_token": 0.00022, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, "azure/o3-mini-2025-01-31": { "max_tokens": 100000, "max_input_tokens": 200000, @@ -1034,6 +1190,36 @@ "supports_prompt_caching": true, "supports_tool_choice": true }, + "azure/us/o3-mini-2025-01-31": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.00000121, + "input_cost_per_token_batches": 0.000000605, + "output_cost_per_token": 0.00000484, + "output_cost_per_token_batches": 0.00000242, + "cache_read_input_token_cost": 0.000000605, + "litellm_provider": "azure", + "mode": "chat", + "supports_vision": false, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/eu/o3-mini-2025-01-31": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.00000121, + "input_cost_per_token_batches": 0.000000605, + "output_cost_per_token": 0.00000484, + "output_cost_per_token_batches": 0.00000242, + "cache_read_input_token_cost": 0.000000605, + "litellm_provider": "azure", + "mode": "chat", + "supports_vision": false, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, "azure/tts-1": { "mode": "audio_speech", "input_cost_per_character": 0.000015, @@ -1092,6 +1278,38 @@ "supports_vision": false, "supports_prompt_caching": true }, + "azure/us/o1-mini-2024-09-12": { + "max_tokens": 65536, + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "input_cost_per_token": 0.00000121, + "input_cost_per_token_batches": 0.000000605, + "output_cost_per_token": 0.00000484, + "output_cost_per_token_batches": 0.00000242, + "cache_read_input_token_cost": 0.000000605, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false, + "supports_prompt_caching": true + }, + "azure/eu/o1-mini-2024-09-12": { + "max_tokens": 65536, + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "input_cost_per_token": 0.00000121, + "input_cost_per_token_batches": 0.000000605, + "output_cost_per_token": 0.00000484, + "output_cost_per_token_batches": 0.00000242, + "cache_read_input_token_cost": 0.000000605, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false, + "supports_prompt_caching": true + }, "azure/o1": { "max_tokens": 100000, "max_input_tokens": 200000, @@ -1122,6 +1340,36 @@ "supports_prompt_caching": true, "supports_tool_choice": true }, + "azure/us/o1-2024-12-17": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.0000165, + "output_cost_per_token": 0.000066, + "cache_read_input_token_cost": 0.00000825, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/eu/o1-2024-12-17": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.0000165, + "output_cost_per_token": 0.000066, + "cache_read_input_token_cost": 0.00000825, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, "azure/o1-preview": { "max_tokens": 32768, "max_input_tokens": 128000, @@ -1150,17 +1398,62 @@ "supports_vision": false, "supports_prompt_caching": true }, - "azure/gpt-4o": { - "max_tokens": 4096, + "azure/us/o1-preview-2024-09-12": { + "max_tokens": 32768, "max_input_tokens": 128000, - "max_output_tokens": 4096, - "input_cost_per_token": 0.000005, - "output_cost_per_token": 0.000015, + "max_output_tokens": 32768, + "input_cost_per_token": 0.0000165, + "output_cost_per_token": 0.000066, + "cache_read_input_token_cost": 0.00000825, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false, + "supports_prompt_caching": true + }, + "azure/eu/o1-preview-2024-09-12": { + "max_tokens": 32768, + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "input_cost_per_token": 0.0000165, + "output_cost_per_token": 0.000066, + "cache_read_input_token_cost": 0.00000825, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false, + "supports_prompt_caching": true + }, + "azure/gpt-4o": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.00001, "cache_read_input_token_cost": 0.00000125, "litellm_provider": "azure", "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/global/gpt-4o-2024-11-20": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.00001, + "cache_read_input_token_cost": 0.00000125, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true, "supports_tool_choice": true @@ -1169,8 +1462,24 @@ "max_tokens": 16384, "max_input_tokens": 128000, "max_output_tokens": 16384, - "input_cost_per_token": 0.00000275, - "output_cost_per_token": 0.000011, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.00001, + "cache_read_input_token_cost": 0.00000125, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/global/gpt-4o-2024-08-06": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.00001, "cache_read_input_token_cost": 0.00000125, "litellm_provider": "azure", "mode": "chat", @@ -1187,6 +1496,38 @@ "max_output_tokens": 16384, "input_cost_per_token": 0.00000275, "output_cost_per_token": 0.000011, + "cache_read_input_token_cost": 0.00000125, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/us/gpt-4o-2024-11-20": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000275, + "cache_creation_input_token_cost": 0.00000138, + "output_cost_per_token": 0.000011, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-2024-11-20": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000275, + "cache_creation_input_token_cost": 0.00000138, + "output_cost_per_token": 0.000011, "litellm_provider": "azure", "mode": "chat", "supports_function_calling": true, @@ -1225,6 +1566,38 @@ "supports_prompt_caching": true, "supports_tool_choice": true }, + "azure/us/gpt-4o-2024-08-06": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000275, + "output_cost_per_token": 0.000011, + "cache_read_input_token_cost": 0.000001375, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-2024-08-06": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000275, + "output_cost_per_token": 0.000011, + "cache_read_input_token_cost": 0.000001375, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, "azure/global-standard/gpt-4o-2024-11-20": { "max_tokens": 16384, "max_input_tokens": 128000, @@ -1285,6 +1658,38 @@ "supports_prompt_caching": true, "supports_tool_choice": true }, + "azure/us/gpt-4o-mini-2024-07-18": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.000000165, + "output_cost_per_token": 0.00000066, + "cache_read_input_token_cost": 0.000000083, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-mini-2024-07-18": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.000000165, + "output_cost_per_token": 0.00000066, + "cache_read_input_token_cost": 0.000000083, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, "azure/gpt-4-turbo-2024-04-09": { "max_tokens": 4096, "max_input_tokens": 128000, @@ -1966,6 +2371,7 @@ "output_cost_per_token": 0.0, "litellm_provider": "azure_ai", "mode": "embedding", + "supports_embedding_image_input": true, "source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/cohere.cohere-embed-v3-english-offer?tab=PlansAndPrice" }, "azure_ai/Cohere-embed-v3-multilingual": { @@ -1976,6 +2382,7 @@ "output_cost_per_token": 0.0, "litellm_provider": "azure_ai", "mode": "embedding", + "supports_embedding_image_input": true, "source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/cohere.cohere-embed-v3-english-offer?tab=PlansAndPrice" }, "babbage-002": { @@ -3937,67 +4344,6 @@ "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", "supports_tool_choice": true }, - "gemini/gemini-2.0-pro-exp-02-05": { - "max_tokens": 8192, - "max_input_tokens": 2097152, - "max_output_tokens": 8192, - "max_images_per_prompt": 3000, - "max_videos_per_prompt": 10, - "max_video_length": 1, - "max_audio_length_hours": 8.4, - "max_audio_per_prompt": 1, - "max_pdf_size_mb": 30, - "input_cost_per_image": 0, - "input_cost_per_video_per_second": 0, - "input_cost_per_audio_per_second": 0, - "input_cost_per_token": 0, - "input_cost_per_character": 0, - "input_cost_per_token_above_128k_tokens": 0, - "input_cost_per_character_above_128k_tokens": 0, - "input_cost_per_image_above_128k_tokens": 0, - "input_cost_per_video_per_second_above_128k_tokens": 0, - "input_cost_per_audio_per_second_above_128k_tokens": 0, - "output_cost_per_token": 0, - "output_cost_per_character": 0, - "output_cost_per_token_above_128k_tokens": 0, - "output_cost_per_character_above_128k_tokens": 0, - "litellm_provider": "gemini", - "mode": "chat", - "supports_system_messages": true, - "supports_function_calling": true, - "supports_vision": true, - "supports_audio_input": true, - "supports_video_input": true, - "supports_pdf_input": true, - "supports_response_schema": true, - "supports_tool_choice": true, - "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" - }, - "gemini/gemini-2.0-flash": { - "max_tokens": 8192, - "max_input_tokens": 1048576, - "max_output_tokens": 8192, - "max_images_per_prompt": 3000, - "max_videos_per_prompt": 10, - "max_video_length": 1, - "max_audio_length_hours": 8.4, - "max_audio_per_prompt": 1, - "max_pdf_size_mb": 30, - "input_cost_per_audio_token": 0.0000007, - "input_cost_per_token": 0.0000001, - "output_cost_per_token": 0.0000004, - "litellm_provider": "gemini", - "mode": "chat", - "rpm": 10000, - "tpm": 10000000, - "supports_system_messages": true, - "supports_function_calling": true, - "supports_vision": true, - "supports_response_schema": true, - "supports_audio_output": true, - "supports_tool_choice": true, - "source": "https://ai.google.dev/pricing#2_0flash" - }, "gemini-2.0-flash-001": { "max_tokens": 8192, "max_input_tokens": 1048576, @@ -4089,6 +4435,69 @@ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", "supports_tool_choice": true }, + "gemini/gemini-2.0-pro-exp-02-05": { + "max_tokens": 8192, + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_images_per_prompt": 3000, + "max_videos_per_prompt": 10, + "max_video_length": 1, + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_pdf_size_mb": 30, + "input_cost_per_image": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_token": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_character": 0, + "output_cost_per_token_above_128k_tokens": 0, + "output_cost_per_character_above_128k_tokens": 0, + "litellm_provider": "gemini", + "mode": "chat", + "rpm": 2, + "tpm": 1000000, + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_audio_input": true, + "supports_video_input": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "gemini/gemini-2.0-flash": { + "max_tokens": 8192, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_images_per_prompt": 3000, + "max_videos_per_prompt": 10, + "max_video_length": 1, + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_pdf_size_mb": 30, + "input_cost_per_audio_token": 0.0000007, + "input_cost_per_token": 0.0000001, + "output_cost_per_token": 0.0000004, + "litellm_provider": "gemini", + "mode": "chat", + "rpm": 10000, + "tpm": 10000000, + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_response_schema": true, + "supports_audio_output": true, + "supports_tool_choice": true, + "source": "https://ai.google.dev/pricing#2_0flash" + }, "gemini/gemini-2.0-flash-001": { "max_tokens": 8192, "max_input_tokens": 1048576, @@ -4178,7 +4587,7 @@ "gemini/gemini-2.0-flash-thinking-exp": { "max_tokens": 8192, "max_input_tokens": 1048576, - "max_output_tokens": 8192, + "max_output_tokens": 65536, "max_images_per_prompt": 3000, "max_videos_per_prompt": 10, "max_video_length": 1, @@ -4211,6 +4620,98 @@ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", "supports_tool_choice": true }, + "gemini/gemini-2.0-flash-thinking-exp-01-21": { + "max_tokens": 8192, + "max_input_tokens": 1048576, + "max_output_tokens": 65536, + "max_images_per_prompt": 3000, + "max_videos_per_prompt": 10, + "max_video_length": 1, + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_pdf_size_mb": 30, + "input_cost_per_image": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_token": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_character": 0, + "output_cost_per_token_above_128k_tokens": 0, + "output_cost_per_character_above_128k_tokens": 0, + "litellm_provider": "gemini", + "mode": "chat", + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_response_schema": true, + "supports_audio_output": true, + "tpm": 4000000, + "rpm": 10, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", + "supports_tool_choice": true + }, + "gemini/gemma-3-27b-it": { + "max_tokens": 8192, + "max_input_tokens": 131072, + "max_output_tokens": 8192, + "input_cost_per_image": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_token": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_character": 0, + "output_cost_per_token_above_128k_tokens": 0, + "output_cost_per_character_above_128k_tokens": 0, + "litellm_provider": "gemini", + "mode": "chat", + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_response_schema": true, + "supports_audio_output": false, + "source": "https://aistudio.google.com", + "supports_tool_choice": true + }, + "gemini/learnlm-1.5-pro-experimental": { + "max_tokens": 8192, + "max_input_tokens": 32767, + "max_output_tokens": 8192, + "input_cost_per_image": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_token": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_character": 0, + "output_cost_per_token_above_128k_tokens": 0, + "output_cost_per_character_above_128k_tokens": 0, + "litellm_provider": "gemini", + "mode": "chat", + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_response_schema": true, + "supports_audio_output": false, + "source": "https://aistudio.google.com", + "supports_tool_choice": true + }, "vertex_ai/claude-3-sonnet": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -5378,6 +5879,7 @@ "input_cost_per_token": 0.00000010, "output_cost_per_token": 0.00000, "litellm_provider": "cohere", + "supports_embedding_image_input": true, "mode": "embedding" }, "embed-english-v2.0": { @@ -6541,7 +7043,7 @@ "supports_response_schema": true }, "us.amazon.nova-micro-v1:0": { - "max_tokens": 4096, + "max_tokens": 4096, "max_input_tokens": 300000, "max_output_tokens": 4096, "input_cost_per_token": 0.000000035, @@ -6579,7 +7081,7 @@ "supports_response_schema": true }, "us.amazon.nova-lite-v1:0": { - "max_tokens": 4096, + "max_tokens": 4096, "max_input_tokens": 128000, "max_output_tokens": 4096, "input_cost_per_token": 0.00000006, @@ -6621,7 +7123,7 @@ "supports_response_schema": true }, "us.amazon.nova-pro-v1:0": { - "max_tokens": 4096, + "max_tokens": 4096, "max_input_tokens": 300000, "max_output_tokens": 4096, "input_cost_per_token": 0.0000008, @@ -6634,6 +7136,12 @@ "supports_prompt_caching": true, "supports_response_schema": true }, + "1024-x-1024/50-steps/bedrock/amazon.nova-canvas-v1:0": { + "max_input_tokens": 2600, + "output_cost_per_image": 0.06, + "litellm_provider": "bedrock", + "mode": "image_generation" + }, "eu.amazon.nova-pro-v1:0": { "max_tokens": 4096, "max_input_tokens": 300000, @@ -7553,8 +8061,9 @@ "max_input_tokens": 512, "input_cost_per_token": 0.0000001, "output_cost_per_token": 0.000000, - "litellm_provider": "bedrock", - "mode": "embedding" + "litellm_provider": "bedrock", + "mode": "embedding", + "supports_embedding_image_input": true }, "cohere.embed-multilingual-v3": { "max_tokens": 512, @@ -7562,7 +8071,20 @@ "input_cost_per_token": 0.0000001, "output_cost_per_token": 0.000000, "litellm_provider": "bedrock", - "mode": "embedding" + "mode": "embedding", + "supports_embedding_image_input": true + }, + "us.deepseek.r1-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000135, + "output_cost_per_token": 0.0000054, + "litellm_provider": "bedrock_converse", + "mode": "chat", + "supports_function_calling": false, + "supports_tool_choice": false + }, "meta.llama3-3-70b-instruct-v1:0": { "max_tokens": 4096, @@ -7978,22 +8500,22 @@ "mode": "image_generation" }, "stability.sd3-5-large-v1:0": { - "max_tokens": 77, - "max_input_tokens": 77, + "max_tokens": 77, + "max_input_tokens": 77, "output_cost_per_image": 0.08, "litellm_provider": "bedrock", "mode": "image_generation" }, "stability.stable-image-core-v1:0": { - "max_tokens": 77, - "max_input_tokens": 77, + "max_tokens": 77, + "max_input_tokens": 77, "output_cost_per_image": 0.04, "litellm_provider": "bedrock", "mode": "image_generation" }, "stability.stable-image-core-v1:1": { - "max_tokens": 77, - "max_input_tokens": 77, + "max_tokens": 77, + "max_input_tokens": 77, "output_cost_per_image": 0.04, "litellm_provider": "bedrock", "mode": "image_generation" @@ -8006,8 +8528,8 @@ "mode": "image_generation" }, "stability.stable-image-ultra-v1:1": { - "max_tokens": 77, - "max_input_tokens": 77, + "max_tokens": 77, + "max_input_tokens": 77, "output_cost_per_image": 0.14, "litellm_provider": "bedrock", "mode": "image_generation" @@ -9622,5 +10144,173 @@ "output_cost_per_token": 0.000000018, "litellm_provider": "jina_ai", "mode": "rerank" + }, + "snowflake/deepseek-r1": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/snowflake-arctic": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/claude-3-5-sonnet": { + "max_tokens": 18000, + "max_input_tokens": 18000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/mistral-large": { + "max_tokens": 32000, + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/mistral-large2": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/reka-flash": { + "max_tokens": 100000, + "max_input_tokens": 100000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/reka-core": { + "max_tokens": 32000, + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/jamba-instruct": { + "max_tokens": 256000, + "max_input_tokens": 256000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/jamba-1.5-mini": { + "max_tokens": 256000, + "max_input_tokens": 256000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/jamba-1.5-large": { + "max_tokens": 256000, + "max_input_tokens": 256000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/mixtral-8x7b": { + "max_tokens": 32000, + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/llama2-70b-chat": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/llama3-8b": { + "max_tokens": 8000, + "max_input_tokens": 8000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/llama3-70b": { + "max_tokens": 8000, + "max_input_tokens": 8000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/llama3.1-8b": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/llama3.1-70b": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/llama3.3-70b": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/snowflake-llama-3.3-70b": { + "max_tokens": 8000, + "max_input_tokens": 8000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/llama3.1-405b": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/snowflake-llama-3.1-405b": { + "max_tokens": 8000, + "max_input_tokens": 8000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/llama3.2-1b": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/llama3.2-3b": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/mistral-7b": { + "max_tokens": 32000, + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" + }, + "snowflake/gemma-7b": { + "max_tokens": 8000, + "max_input_tokens": 8000, + "max_output_tokens": 8192, + "litellm_provider": "snowflake", + "mode": "chat" } } diff --git a/litellm/proxy/_experimental/out/_next/static/i92Qc9kkJSCtCgV3DDmdu/_buildManifest.js b/litellm/proxy/_experimental/out/_next/static/9yIyUkG6nV2cO0gn7kJ-Q/_buildManifest.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/i92Qc9kkJSCtCgV3DDmdu/_buildManifest.js rename to litellm/proxy/_experimental/out/_next/static/9yIyUkG6nV2cO0gn7kJ-Q/_buildManifest.js diff --git a/litellm/proxy/_experimental/out/_next/static/i92Qc9kkJSCtCgV3DDmdu/_ssgManifest.js b/litellm/proxy/_experimental/out/_next/static/9yIyUkG6nV2cO0gn7kJ-Q/_ssgManifest.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/i92Qc9kkJSCtCgV3DDmdu/_ssgManifest.js rename to litellm/proxy/_experimental/out/_next/static/9yIyUkG6nV2cO0gn7kJ-Q/_ssgManifest.js diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/157-cf7bc8b3ae1b80ba.js b/litellm/proxy/_experimental/out/_next/static/chunks/157-cf7bc8b3ae1b80ba.js deleted file mode 100644 index 6a596c25d8..0000000000 --- a/litellm/proxy/_experimental/out/_next/static/chunks/157-cf7bc8b3ae1b80ba.js +++ /dev/null @@ -1,11 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[157],{12660:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M917.7 148.8l-42.4-42.4c-1.6-1.6-3.6-2.3-5.7-2.3s-4.1.8-5.7 2.3l-76.1 76.1a199.27 199.27 0 00-112.1-34.3c-51.2 0-102.4 19.5-141.5 58.6L432.3 308.7a8.03 8.03 0 000 11.3L704 591.7c1.6 1.6 3.6 2.3 5.7 2.3 2 0 4.1-.8 5.7-2.3l101.9-101.9c68.9-69 77-175.7 24.3-253.5l76.1-76.1c3.1-3.2 3.1-8.3 0-11.4zM769.1 441.7l-59.4 59.4-186.8-186.8 59.4-59.4c24.9-24.9 58.1-38.7 93.4-38.7 35.3 0 68.4 13.7 93.4 38.7 24.9 24.9 38.7 58.1 38.7 93.4 0 35.3-13.8 68.4-38.7 93.4zm-190.2 105a8.03 8.03 0 00-11.3 0L501 613.3 410.7 523l66.7-66.7c3.1-3.1 3.1-8.2 0-11.3L441 408.6a8.03 8.03 0 00-11.3 0L363 475.3l-43-43a7.85 7.85 0 00-5.7-2.3c-2 0-4.1.8-5.7 2.3L206.8 534.2c-68.9 69-77 175.7-24.3 253.5l-76.1 76.1a8.03 8.03 0 000 11.3l42.4 42.4c1.6 1.6 3.6 2.3 5.7 2.3s4.1-.8 5.7-2.3l76.1-76.1c33.7 22.9 72.9 34.3 112.1 34.3 51.2 0 102.4-19.5 141.5-58.6l101.9-101.9c3.1-3.1 3.1-8.2 0-11.3l-43-43 66.7-66.7c3.1-3.1 3.1-8.2 0-11.3l-36.6-36.2zM441.7 769.1a131.32 131.32 0 01-93.4 38.7c-35.3 0-68.4-13.7-93.4-38.7a131.32 131.32 0 01-38.7-93.4c0-35.3 13.7-68.4 38.7-93.4l59.4-59.4 186.8 186.8-59.4 59.4z"}}]},name:"api",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},88009:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M464 144H160c-8.8 0-16 7.2-16 16v304c0 8.8 7.2 16 16 16h304c8.8 0 16-7.2 16-16V160c0-8.8-7.2-16-16-16zm-52 268H212V212h200v200zm452-268H560c-8.8 0-16 7.2-16 16v304c0 8.8 7.2 16 16 16h304c8.8 0 16-7.2 16-16V160c0-8.8-7.2-16-16-16zm-52 268H612V212h200v200zM464 544H160c-8.8 0-16 7.2-16 16v304c0 8.8 7.2 16 16 16h304c8.8 0 16-7.2 16-16V560c0-8.8-7.2-16-16-16zm-52 268H212V612h200v200zm452-268H560c-8.8 0-16 7.2-16 16v304c0 8.8 7.2 16 16 16h304c8.8 0 16-7.2 16-16V560c0-8.8-7.2-16-16-16zm-52 268H612V612h200v200z"}}]},name:"appstore",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},37527:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M894 462c30.9 0 43.8-39.7 18.7-58L530.8 126.2a31.81 31.81 0 00-37.6 0L111.3 404c-25.1 18.2-12.2 58 18.8 58H192v374h-72c-4.4 0-8 3.6-8 8v52c0 4.4 3.6 8 8 8h784c4.4 0 8-3.6 8-8v-52c0-4.4-3.6-8-8-8h-72V462h62zM512 196.7l271.1 197.2H240.9L512 196.7zM264 462h117v374H264V462zm189 0h117v374H453V462zm307 374H642V462h118v374z"}}]},name:"bank",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},9775:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M888 792H200V168c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v688c0 4.4 3.6 8 8 8h752c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8zm-600-80h56c4.4 0 8-3.6 8-8V560c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v144c0 4.4 3.6 8 8 8zm152 0h56c4.4 0 8-3.6 8-8V384c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v320c0 4.4 3.6 8 8 8zm152 0h56c4.4 0 8-3.6 8-8V462c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v242c0 4.4 3.6 8 8 8zm152 0h56c4.4 0 8-3.6 8-8V304c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v400c0 4.4 3.6 8 8 8z"}}]},name:"bar-chart",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},68208:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M856 376H648V168c0-8.8-7.2-16-16-16H168c-8.8 0-16 7.2-16 16v464c0 8.8 7.2 16 16 16h208v208c0 8.8 7.2 16 16 16h464c8.8 0 16-7.2 16-16V392c0-8.8-7.2-16-16-16zm-480 16v188H220V220h360v156H392c-8.8 0-16 7.2-16 16zm204 52v136H444V444h136zm224 360H444V648h188c8.8 0 16-7.2 16-16V444h156v360z"}}]},name:"block",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},9738:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M912 190h-69.9c-9.8 0-19.1 4.5-25.1 12.2L404.7 724.5 207 474a32 32 0 00-25.1-12.2H112c-6.7 0-10.4 7.7-6.3 12.9l273.9 347c12.8 16.2 37.4 16.2 50.3 0l488.4-618.9c4.1-5.1.4-12.8-6.3-12.8z"}}]},name:"check",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},44625:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M832 64H192c-17.7 0-32 14.3-32 32v832c0 17.7 14.3 32 32 32h640c17.7 0 32-14.3 32-32V96c0-17.7-14.3-32-32-32zm-600 72h560v208H232V136zm560 480H232V408h560v208zm0 272H232V680h560v208zM304 240a40 40 0 1080 0 40 40 0 10-80 0zm0 272a40 40 0 1080 0 40 40 0 10-80 0zm0 272a40 40 0 1080 0 40 40 0 10-80 0z"}}]},name:"database",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},70464:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M884 256h-75c-5.1 0-9.9 2.5-12.9 6.6L512 654.2 227.9 262.6c-3-4.1-7.8-6.6-12.9-6.6h-75c-6.5 0-10.3 7.4-6.5 12.7l352.6 486.1c12.8 17.6 39 17.6 51.7 0l352.6-486.1c3.9-5.3.1-12.7-6.4-12.7z"}}]},name:"down",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},73879:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M505.7 661a8 8 0 0012.6 0l112-141.7c4.1-5.2.4-12.9-6.3-12.9h-74.1V168c0-4.4-3.6-8-8-8h-60c-4.4 0-8 3.6-8 8v338.3H400c-6.7 0-10.4 7.7-6.3 12.9l112 141.8zM878 626h-60c-4.4 0-8 3.6-8 8v154H214V634c0-4.4-3.6-8-8-8h-60c-4.4 0-8 3.6-8 8v198c0 17.7 14.3 32 32 32h684c17.7 0 32-14.3 32-32V634c0-4.4-3.6-8-8-8z"}}]},name:"download",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},39760:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M176 511a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0z"}}]},name:"ellipsis",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},41169:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 472a40 40 0 1080 0 40 40 0 10-80 0zm367 352.9L696.3 352V178H768v-68H256v68h71.7v174L145 824.9c-2.8 7.4-4.3 15.2-4.3 23.1 0 35.3 28.7 64 64 64h614.6c7.9 0 15.7-1.5 23.1-4.3 33-12.7 49.4-49.8 36.6-82.8zM395.7 364.7V180h232.6v184.7L719.2 600c-20.7-5.3-42.1-8-63.9-8-61.2 0-119.2 21.5-165.3 60a188.78 188.78 0 01-121.3 43.9c-32.7 0-64.1-8.3-91.8-23.7l118.8-307.5zM210.5 844l41.7-107.8c35.7 18.1 75.4 27.8 116.6 27.8 61.2 0 119.2-21.5 165.3-60 33.9-28.2 76.3-43.9 121.3-43.9 35 0 68.4 9.5 97.6 27.1L813.5 844h-603z"}}]},name:"experiment",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},6520:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M942.2 486.2C847.4 286.5 704.1 186 512 186c-192.2 0-335.4 100.5-430.2 300.3a60.3 60.3 0 000 51.5C176.6 737.5 319.9 838 512 838c192.2 0 335.4-100.5 430.2-300.3 7.7-16.2 7.7-35 0-51.5zM512 766c-161.3 0-279.4-81.8-362.7-254C232.6 339.8 350.7 258 512 258c161.3 0 279.4 81.8 362.7 254C791.5 684.2 673.4 766 512 766zm-4-430c-97.2 0-176 78.8-176 176s78.8 176 176 176 176-78.8 176-176-78.8-176-176-176zm0 288c-61.9 0-112-50.1-112-112s50.1-112 112-112 112 50.1 112 112-50.1 112-112 112z"}}]},name:"eye",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},15424:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm0 820c-205.4 0-372-166.6-372-372s166.6-372 372-372 372 166.6 372 372-166.6 372-372 372z"}},{tag:"path",attrs:{d:"M464 336a48 48 0 1096 0 48 48 0 10-96 0zm72 112h-48c-4.4 0-8 3.6-8 8v272c0 4.4 3.6 8 8 8h48c4.4 0 8-3.6 8-8V456c0-4.4-3.6-8-8-8z"}}]},name:"info-circle",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},92403:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M608 112c-167.9 0-304 136.1-304 304 0 70.3 23.9 135 63.9 186.5l-41.1 41.1-62.3-62.3a8.15 8.15 0 00-11.4 0l-39.8 39.8a8.15 8.15 0 000 11.4l62.3 62.3-44.9 44.9-62.3-62.3a8.15 8.15 0 00-11.4 0l-39.8 39.8a8.15 8.15 0 000 11.4l62.3 62.3-65.3 65.3a8.03 8.03 0 000 11.3l42.3 42.3c3.1 3.1 8.2 3.1 11.3 0l253.6-253.6A304.06 304.06 0 00608 720c167.9 0 304-136.1 304-304S775.9 112 608 112zm161.2 465.2C726.2 620.3 668.9 644 608 644c-60.9 0-118.2-23.7-161.2-66.8-43.1-43-66.8-100.3-66.8-161.2 0-60.9 23.7-118.2 66.8-161.2 43-43.1 100.3-66.8 161.2-66.8 60.9 0 118.2 23.7 161.2 66.8 43.1 43 66.8 100.3 66.8 161.2 0 60.9-23.7 118.2-66.8 161.2z"}}]},name:"key",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},15327:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M724 218.3V141c0-6.7-7.7-10.4-12.9-6.3L260.3 486.8a31.86 31.86 0 000 50.3l450.8 352.1c5.3 4.1 12.9.4 12.9-6.3v-77.3c0-4.9-2.3-9.6-6.1-12.6l-360-281 360-281.1c3.8-3 6.1-7.7 6.1-12.6z"}}]},name:"left",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},48231:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M888 792H200V168c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v688c0 4.4 3.6 8 8 8h752c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8zM305.8 637.7c3.1 3.1 8.1 3.1 11.3 0l138.3-137.6L583 628.5c3.1 3.1 8.2 3.1 11.3 0l275.4-275.3c3.1-3.1 3.1-8.2 0-11.3l-39.6-39.6a8.03 8.03 0 00-11.3 0l-230 229.9L461.4 404a8.03 8.03 0 00-11.3 0L266.3 586.7a8.03 8.03 0 000 11.3l39.5 39.7z"}}]},name:"line-chart",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},40428:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M868 732h-70.3c-4.8 0-9.3 2.1-12.3 5.8-7 8.5-14.5 16.7-22.4 24.5a353.84 353.84 0 01-112.7 75.9A352.8 352.8 0 01512.4 866c-47.9 0-94.3-9.4-137.9-27.8a353.84 353.84 0 01-112.7-75.9 353.28 353.28 0 01-76-112.5C167.3 606.2 158 559.9 158 512s9.4-94.2 27.8-137.8c17.8-42.1 43.4-80 76-112.5s70.5-58.1 112.7-75.9c43.6-18.4 90-27.8 137.9-27.8 47.9 0 94.3 9.3 137.9 27.8 42.2 17.8 80.1 43.4 112.7 75.9 7.9 7.9 15.3 16.1 22.4 24.5 3 3.7 7.6 5.8 12.3 5.8H868c6.3 0 10.2-7 6.7-12.3C798 160.5 663.8 81.6 511.3 82 271.7 82.6 79.6 277.1 82 516.4 84.4 751.9 276.2 942 512.4 942c152.1 0 285.7-78.8 362.3-197.7 3.4-5.3-.4-12.3-6.7-12.3zm88.9-226.3L815 393.7c-5.3-4.2-13-.4-13 6.3v76H488c-4.4 0-8 3.6-8 8v56c0 4.4 3.6 8 8 8h314v76c0 6.7 7.8 10.5 13 6.3l141.9-112a8 8 0 000-12.6z"}}]},name:"logout",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},45246:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M696 480H328c-4.4 0-8 3.6-8 8v48c0 4.4 3.6 8 8 8h368c4.4 0 8-3.6 8-8v-48c0-4.4-3.6-8-8-8z"}},{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm0 820c-205.4 0-372-166.6-372-372s166.6-372 372-372 372 166.6 372 372-166.6 372-372 372z"}}]},name:"minus-circle",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},28595:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm0 820c-205.4 0-372-166.6-372-372s166.6-372 372-372 372 166.6 372 372-166.6 372-372 372z"}},{tag:"path",attrs:{d:"M719.4 499.1l-296.1-215A15.9 15.9 0 00398 297v430c0 13.1 14.8 20.5 25.3 12.9l296.1-215a15.9 15.9 0 000-25.8zm-257.6 134V390.9L628.5 512 461.8 633.1z"}}]},name:"play-circle",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},96473:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M482 152h60q8 0 8 8v704q0 8-8 8h-60q-8 0-8-8V160q0-8 8-8z"}},{tag:"path",attrs:{d:"M192 474h672q8 0 8 8v60q0 8-8 8H160q-8 0-8-8v-60q0-8 8-8z"}}]},name:"plus",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},57400:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"0 0 1024 1024",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64L128 192v384c0 212.1 171.9 384 384 384s384-171.9 384-384V192L512 64zm312 512c0 172.3-139.7 312-312 312S200 748.3 200 576V246l312-110 312 110v330z"}},{tag:"path",attrs:{d:"M378.4 475.1a35.91 35.91 0 00-50.9 0 35.91 35.91 0 000 50.9l129.4 129.4 2.1 2.1a33.98 33.98 0 0048.1 0L730.6 434a33.98 33.98 0 000-48.1l-2.8-2.8a33.98 33.98 0 00-48.1 0L483 579.7 378.4 475.1z"}}]},name:"safety",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},29436:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M909.6 854.5L649.9 594.8C690.2 542.7 712 479 712 412c0-80.2-31.3-155.4-87.9-212.1-56.6-56.7-132-87.9-212.1-87.9s-155.5 31.3-212.1 87.9C143.2 256.5 112 331.8 112 412c0 80.1 31.3 155.5 87.9 212.1C256.5 680.8 331.8 712 412 712c67 0 130.6-21.8 182.7-62l259.7 259.6a8.2 8.2 0 0011.6 0l43.6-43.5a8.2 8.2 0 000-11.6zM570.4 570.4C528 612.7 471.8 636 412 636s-116-23.3-158.4-65.6C211.3 528 188 471.8 188 412s23.3-116.1 65.6-158.4C296 211.3 352.2 188 412 188s116.1 23.2 158.4 65.6S636 352.2 636 412s-23.3 116.1-65.6 158.4z"}}]},name:"search",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},55322:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M924.8 625.7l-65.5-56c3.1-19 4.7-38.4 4.7-57.8s-1.6-38.8-4.7-57.8l65.5-56a32.03 32.03 0 009.3-35.2l-.9-2.6a443.74 443.74 0 00-79.7-137.9l-1.8-2.1a32.12 32.12 0 00-35.1-9.5l-81.3 28.9c-30-24.6-63.5-44-99.7-57.6l-15.7-85a32.05 32.05 0 00-25.8-25.7l-2.7-.5c-52.1-9.4-106.9-9.4-159 0l-2.7.5a32.05 32.05 0 00-25.8 25.7l-15.8 85.4a351.86 351.86 0 00-99 57.4l-81.9-29.1a32 32 0 00-35.1 9.5l-1.8 2.1a446.02 446.02 0 00-79.7 137.9l-.9 2.6c-4.5 12.5-.8 26.5 9.3 35.2l66.3 56.6c-3.1 18.8-4.6 38-4.6 57.1 0 19.2 1.5 38.4 4.6 57.1L99 625.5a32.03 32.03 0 00-9.3 35.2l.9 2.6c18.1 50.4 44.9 96.9 79.7 137.9l1.8 2.1a32.12 32.12 0 0035.1 9.5l81.9-29.1c29.8 24.5 63.1 43.9 99 57.4l15.8 85.4a32.05 32.05 0 0025.8 25.7l2.7.5a449.4 449.4 0 00159 0l2.7-.5a32.05 32.05 0 0025.8-25.7l15.7-85a350 350 0 0099.7-57.6l81.3 28.9a32 32 0 0035.1-9.5l1.8-2.1c34.8-41.1 61.6-87.5 79.7-137.9l.9-2.6c4.5-12.3.8-26.3-9.3-35zM788.3 465.9c2.5 15.1 3.8 30.6 3.8 46.1s-1.3 31-3.8 46.1l-6.6 40.1 74.7 63.9a370.03 370.03 0 01-42.6 73.6L721 702.8l-31.4 25.8c-23.9 19.6-50.5 35-79.3 45.8l-38.1 14.3-17.9 97a377.5 377.5 0 01-85 0l-17.9-97.2-37.8-14.5c-28.5-10.8-55-26.2-78.7-45.7l-31.4-25.9-93.4 33.2c-17-22.9-31.2-47.6-42.6-73.6l75.5-64.5-6.5-40c-2.4-14.9-3.7-30.3-3.7-45.5 0-15.3 1.2-30.6 3.7-45.5l6.5-40-75.5-64.5c11.3-26.1 25.6-50.7 42.6-73.6l93.4 33.2 31.4-25.9c23.7-19.5 50.2-34.9 78.7-45.7l37.9-14.3 17.9-97.2c28.1-3.2 56.8-3.2 85 0l17.9 97 38.1 14.3c28.7 10.8 55.4 26.2 79.3 45.8l31.4 25.8 92.8-32.9c17 22.9 31.2 47.6 42.6 73.6L781.8 426l6.5 39.9zM512 326c-97.2 0-176 78.8-176 176s78.8 176 176 176 176-78.8 176-176-78.8-176-176-176zm79.2 255.2A111.6 111.6 0 01512 614c-29.9 0-58-11.7-79.2-32.8A111.6 111.6 0 01400 502c0-29.9 11.7-58 32.8-79.2C454 401.6 482.1 390 512 390c29.9 0 58 11.6 79.2 32.8A111.6 111.6 0 01624 502c0 29.9-11.7 58-32.8 79.2z"}}]},name:"setting",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},41361:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M824.2 699.9a301.55 301.55 0 00-86.4-60.4C783.1 602.8 812 546.8 812 484c0-110.8-92.4-201.7-203.2-200-109.1 1.7-197 90.6-197 200 0 62.8 29 118.8 74.2 155.5a300.95 300.95 0 00-86.4 60.4C345 754.6 314 826.8 312 903.8a8 8 0 008 8.2h56c4.3 0 7.9-3.4 8-7.7 1.9-58 25.4-112.3 66.7-153.5A226.62 226.62 0 01612 684c60.9 0 118.2 23.7 161.3 66.8C814.5 792 838 846.3 840 904.3c.1 4.3 3.7 7.7 8 7.7h56a8 8 0 008-8.2c-2-77-33-149.2-87.8-203.9zM612 612c-34.2 0-66.4-13.3-90.5-37.5a126.86 126.86 0 01-37.5-91.8c.3-32.8 13.4-64.5 36.3-88 24-24.6 56.1-38.3 90.4-38.7 33.9-.3 66.8 12.9 91 36.6 24.8 24.3 38.4 56.8 38.4 91.4 0 34.2-13.3 66.3-37.5 90.5A127.3 127.3 0 01612 612zM361.5 510.4c-.9-8.7-1.4-17.5-1.4-26.4 0-15.9 1.5-31.4 4.3-46.5.7-3.6-1.2-7.3-4.5-8.8-13.6-6.1-26.1-14.5-36.9-25.1a127.54 127.54 0 01-38.7-95.4c.9-32.1 13.8-62.6 36.3-85.6 24.7-25.3 57.9-39.1 93.2-38.7 31.9.3 62.7 12.6 86 34.4 7.9 7.4 14.7 15.6 20.4 24.4 2 3.1 5.9 4.4 9.3 3.2 17.6-6.1 36.2-10.4 55.3-12.4 5.6-.6 8.8-6.6 6.3-11.6-32.5-64.3-98.9-108.7-175.7-109.9-110.9-1.7-203.3 89.2-203.3 199.9 0 62.8 28.9 118.8 74.2 155.5-31.8 14.7-61.1 35-86.5 60.4-54.8 54.7-85.8 126.9-87.8 204a8 8 0 008 8.2h56.1c4.3 0 7.9-3.4 8-7.7 1.9-58 25.4-112.3 66.7-153.5 29.4-29.4 65.4-49.8 104.7-59.7 3.9-1 6.5-4.7 6-8.7z"}}]},name:"team",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},19574:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M848 359.3H627.7L825.8 109c4.1-5.3.4-13-6.3-13H436c-2.8 0-5.5 1.5-6.9 4L170 547.5c-3.1 5.3.7 12 6.9 12h174.4l-89.4 357.6c-1.9 7.8 7.5 13.3 13.3 7.7L853.5 373c5.2-4.9 1.7-13.7-5.5-13.7zM378.2 732.5l60.3-241H281.1l189.6-327.4h224.6L487 427.4h211L378.2 732.5z"}}]},name:"thunderbolt",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},3632:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M400 317.7h73.9V656c0 4.4 3.6 8 8 8h60c4.4 0 8-3.6 8-8V317.7H624c6.7 0 10.4-7.7 6.3-12.9L518.3 163a8 8 0 00-12.6 0l-112 141.7c-4.1 5.3-.4 13 6.3 13zM878 626h-60c-4.4 0-8 3.6-8 8v154H214V634c0-4.4-3.6-8-8-8h-60c-4.4 0-8 3.6-8 8v198c0 17.7 14.3 32 32 32h684c17.7 0 32-14.3 32-32V634c0-4.4-3.6-8-8-8z"}}]},name:"upload",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},15883:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M858.5 763.6a374 374 0 00-80.6-119.5 375.63 375.63 0 00-119.5-80.6c-.4-.2-.8-.3-1.2-.5C719.5 518 760 444.7 760 362c0-137-111-248-248-248S264 225 264 362c0 82.7 40.5 156 102.8 201.1-.4.2-.8.3-1.2.5-44.8 18.9-85 46-119.5 80.6a375.63 375.63 0 00-80.6 119.5A371.7 371.7 0 00136 901.8a8 8 0 008 8.2h60c4.4 0 7.9-3.5 8-7.8 2-77.2 33-149.5 87.8-204.3 56.7-56.7 132-87.9 212.2-87.9s155.5 31.2 212.2 87.9C779 752.7 810 825 812 902.2c.1 4.4 3.6 7.8 8 7.8h60a8 8 0 008-8.2c-1-47.8-10.9-94.3-29.5-138.2zM512 534c-45.9 0-89.1-17.9-121.6-50.4S340 407.9 340 362c0-45.9 17.9-89.1 50.4-121.6S466.1 190 512 190s89.1 17.9 121.6 50.4S684 316.1 684 362c0 45.9-17.9 89.1-50.4 121.6S557.9 534 512 534z"}}]},name:"user",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},58747:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(5853),o=n(2265);let i=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M11.9999 13.1714L16.9497 8.22168L18.3639 9.63589L11.9999 15.9999L5.63599 9.63589L7.0502 8.22168L11.9999 13.1714Z"}))}},4537:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(5853),o=n(2265);let i=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M12 22C6.47715 22 2 17.5228 2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22ZM12 10.5858L9.17157 7.75736L7.75736 9.17157L10.5858 12L7.75736 14.8284L9.17157 16.2426L12 13.4142L14.8284 16.2426L16.2426 14.8284L13.4142 12L16.2426 9.17157L14.8284 7.75736L12 10.5858Z"}))}},69907:function(e,t,n){"use strict";n.d(t,{Z:function(){return em}});var r=n(5853),o=n(2265),i=n(47625),a=n(93765),l=n(61994),c=n(59221),s=n(86757),u=n.n(s),d=n(95645),f=n.n(d),p=n(77571),h=n.n(p),m=n(82559),g=n.n(m),v=n(21652),y=n.n(v),b=n(57165),x=n(81889),w=n(9841),S=n(58772),k=n(34067),E=n(16630),C=n(85355),O=n(82944),j=["layout","type","stroke","connectNulls","isRange","ref"];function P(e){return(P="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function M(){return(M=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(i,j));return o.createElement(w.m,{clipPath:n?"url(#clipPath-".concat(r,")"):null},o.createElement(b.H,M({},(0,O.L6)(d,!0),{points:e,connectNulls:s,type:l,baseLine:t,layout:a,stroke:"none",className:"recharts-area-area"})),"none"!==c&&o.createElement(b.H,M({},(0,O.L6)(this.props,!1),{className:"recharts-area-curve",layout:a,type:l,connectNulls:s,fill:"none",points:e})),"none"!==c&&u&&o.createElement(b.H,M({},(0,O.L6)(this.props,!1),{className:"recharts-area-curve",layout:a,type:l,connectNulls:s,fill:"none",points:t})))}},{key:"renderAreaWithAnimation",value:function(e,t){var n=this,r=this.props,i=r.points,a=r.baseLine,l=r.isAnimationActive,s=r.animationBegin,u=r.animationDuration,d=r.animationEasing,f=r.animationId,p=this.state,m=p.prevPoints,v=p.prevBaseLine;return o.createElement(c.ZP,{begin:s,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"area-".concat(f),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var l=r.t;if(m){var c,s=m.length/i.length,u=i.map(function(e,t){var n=Math.floor(t*s);if(m[n]){var r=m[n],o=(0,E.k4)(r.x,e.x),i=(0,E.k4)(r.y,e.y);return I(I({},e),{},{x:o(l),y:i(l)})}return e});return c=(0,E.hj)(a)&&"number"==typeof a?(0,E.k4)(v,a)(l):h()(a)||g()(a)?(0,E.k4)(v,0)(l):a.map(function(e,t){var n=Math.floor(t*s);if(v[n]){var r=v[n],o=(0,E.k4)(r.x,e.x),i=(0,E.k4)(r.y,e.y);return I(I({},e),{},{x:o(l),y:i(l)})}return e}),n.renderAreaStatically(u,c,e,t)}return o.createElement(w.m,null,o.createElement("defs",null,o.createElement("clipPath",{id:"animationClipPath-".concat(t)},n.renderClipRect(l))),o.createElement(w.m,{clipPath:"url(#animationClipPath-".concat(t,")")},n.renderAreaStatically(i,a,e,t)))})}},{key:"renderArea",value:function(e,t){var n=this.props,r=n.points,o=n.baseLine,i=n.isAnimationActive,a=this.state,l=a.prevPoints,c=a.prevBaseLine,s=a.totalLength;return i&&r&&r.length&&(!l&&s>0||!y()(l,r)||!y()(c,o))?this.renderAreaWithAnimation(e,t):this.renderAreaStatically(r,o,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,i=t.points,a=t.className,c=t.top,s=t.left,u=t.xAxis,d=t.yAxis,f=t.width,p=t.height,m=t.isAnimationActive,g=t.id;if(n||!i||!i.length)return null;var v=this.state.isAnimationFinished,y=1===i.length,b=(0,l.Z)("recharts-area",a),x=u&&u.allowDataOverflow,k=d&&d.allowDataOverflow,E=x||k,C=h()(g)?this.id:g,j=null!==(e=(0,O.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},P=j.r,M=j.strokeWidth,N=((0,O.$k)(r)?r:{}).clipDot,I=void 0===N||N,R=2*(void 0===P?3:P)+(void 0===M?2:M);return o.createElement(w.m,{className:b},x||k?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(C)},o.createElement("rect",{x:x?s:s-f/2,y:k?c:c-p/2,width:x?f:2*f,height:k?p:2*p})),!I&&o.createElement("clipPath",{id:"clipPath-dots-".concat(C)},o.createElement("rect",{x:s-R/2,y:c-R/2,width:f+R,height:p+R}))):null,y?null:this.renderArea(E,C),(r||y)&&this.renderDots(E,I,C),(!m||v)&&S.e.renderCallByParent(this.props,i))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,curBaseLine:e.baseLine,prevPoints:t.curPoints,prevBaseLine:t.curBaseLine}:e.points!==t.curPoints||e.baseLine!==t.curBaseLine?{curPoints:e.points,curBaseLine:e.baseLine}:null}}],n&&R(a.prototype,n),r&&R(a,r),Object.defineProperty(a,"prototype",{writable:!1}),a}(o.PureComponent);D(L,"displayName","Area"),D(L,"defaultProps",{stroke:"#3182bd",fill:"#3182bd",fillOpacity:.6,xAxisId:0,yAxisId:0,legendType:"line",connectNulls:!1,points:[],dot:!1,activeDot:!0,hide:!1,isAnimationActive:!k.x.isSsr,animationBegin:0,animationDuration:1500,animationEasing:"ease"}),D(L,"getBaseValue",function(e,t,n,r){var o=e.layout,i=e.baseValue,a=t.props.baseValue,l=null!=a?a:i;if((0,E.hj)(l)&&"number"==typeof l)return l;var c="horizontal"===o?r:n,s=c.scale.domain();if("number"===c.type){var u=Math.max(s[0],s[1]),d=Math.min(s[0],s[1]);return"dataMin"===l?d:"dataMax"===l?u:u<0?u:Math.max(Math.min(s[0],s[1]),0)}return"dataMin"===l?s[0]:"dataMax"===l?s[1]:s[0]}),D(L,"getComposedData",function(e){var t,n=e.props,r=e.item,o=e.xAxis,i=e.yAxis,a=e.xAxisTicks,l=e.yAxisTicks,c=e.bandSize,s=e.dataKey,u=e.stackedData,d=e.dataStartIndex,f=e.displayedData,p=e.offset,h=n.layout,m=u&&u.length,g=L.getBaseValue(n,r,o,i),v="horizontal"===h,y=!1,b=f.map(function(e,t){m?n=u[d+t]:Array.isArray(n=(0,C.F$)(e,s))?y=!0:n=[g,n];var n,r=null==n[1]||m&&null==(0,C.F$)(e,s);return v?{x:(0,C.Hv)({axis:o,ticks:a,bandSize:c,entry:e,index:t}),y:r?null:i.scale(n[1]),value:n,payload:e}:{x:r?null:o.scale(n[1]),y:(0,C.Hv)({axis:i,ticks:l,bandSize:c,entry:e,index:t}),value:n,payload:e}});return t=m||y?b.map(function(e){var t=Array.isArray(e.value)?e.value[0]:null;return v?{x:e.x,y:null!=t&&null!=e.y?i.scale(t):null}:{x:null!=t?o.scale(t):null,y:e.y}}):v?i.scale(g):o.scale(g),I({points:b,baseLine:t,layout:h,isRange:y},p)}),D(L,"renderDotItem",function(e,t){return o.isValidElement(e)?o.cloneElement(e,t):u()(e)?e(t):o.createElement(x.o,M({},t,{className:"recharts-area-dot"}))});var z=n(97059),B=n(62994),F=n(25311),H=(0,a.z)({chartName:"AreaChart",GraphicalChild:L,axisComponents:[{axisType:"xAxis",AxisComp:z.K},{axisType:"yAxis",AxisComp:B.B}],formatAxisMap:F.t9}),q=n(56940),W=n(8147),K=n(22190),U=n(13137),V=["type","layout","connectNulls","ref"];function G(e){return(G="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function X(){return(X=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);ni){c=[].concat(Q(r.slice(0,s)),[i-u]);break}var d=c.length%2==0?[0,l]:[l];return[].concat(Q(a.repeat(r,Math.floor(t/o))),Q(c),d).map(function(e){return"".concat(e,"px")}).join(", ")}),eo(en(e),"id",(0,E.EL)("recharts-line-")),eo(en(e),"pathRef",function(t){e.mainCurve=t}),eo(en(e),"handleAnimationEnd",function(){e.setState({isAnimationFinished:!0}),e.props.onAnimationEnd&&e.props.onAnimationEnd()}),eo(en(e),"handleAnimationStart",function(){e.setState({isAnimationFinished:!1}),e.props.onAnimationStart&&e.props.onAnimationStart()}),e}return n=[{key:"componentDidMount",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();this.setState({totalLength:e})}}},{key:"componentDidUpdate",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();e!==this.state.totalLength&&this.setState({totalLength:e})}}},{key:"getTotalLength",value:function(){var e=this.mainCurve;try{return e&&e.getTotalLength&&e.getTotalLength()||0}catch(e){return 0}}},{key:"renderErrorBar",value:function(e,t){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var n=this.props,r=n.points,i=n.xAxis,a=n.yAxis,l=n.layout,c=n.children,s=(0,O.NN)(c,U.W);if(!s)return null;var u=function(e,t){return{x:e.x,y:e.y,value:e.value,errorVal:(0,C.F$)(e.payload,t)}};return o.createElement(w.m,{clipPath:e?"url(#clipPath-".concat(t,")"):null},s.map(function(e){return o.cloneElement(e,{key:"bar-".concat(e.props.dataKey),data:r,xAxis:i,yAxis:a,layout:l,dataPointFormatter:u})}))}},{key:"renderDots",value:function(e,t,n){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var r=this.props,i=r.dot,l=r.points,c=r.dataKey,s=(0,O.L6)(this.props,!1),u=(0,O.L6)(i,!0),d=l.map(function(e,t){var n=Y(Y(Y({key:"dot-".concat(t),r:3},s),u),{},{value:e.value,dataKey:c,cx:e.x,cy:e.y,index:t,payload:e.payload});return a.renderDotItem(i,n)}),f={clipPath:e?"url(#clipPath-".concat(t?"":"dots-").concat(n,")"):null};return o.createElement(w.m,X({className:"recharts-line-dots",key:"dots"},f),d)}},{key:"renderCurveStatically",value:function(e,t,n,r){var i=this.props,a=i.type,l=i.layout,c=i.connectNulls,s=(i.ref,function(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(i,V)),u=Y(Y(Y({},(0,O.L6)(s,!0)),{},{fill:"none",className:"recharts-line-curve",clipPath:t?"url(#clipPath-".concat(n,")"):null,points:e},r),{},{type:a,layout:l,connectNulls:c});return o.createElement(b.H,X({},u,{pathRef:this.pathRef}))}},{key:"renderCurveWithAnimation",value:function(e,t){var n=this,r=this.props,i=r.points,a=r.strokeDasharray,l=r.isAnimationActive,s=r.animationBegin,u=r.animationDuration,d=r.animationEasing,f=r.animationId,p=r.animateNewValues,h=r.width,m=r.height,g=this.state,v=g.prevPoints,y=g.totalLength;return o.createElement(c.ZP,{begin:s,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"line-".concat(f),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var o,l=r.t;if(v){var c=v.length/i.length,s=i.map(function(e,t){var n=Math.floor(t*c);if(v[n]){var r=v[n],o=(0,E.k4)(r.x,e.x),i=(0,E.k4)(r.y,e.y);return Y(Y({},e),{},{x:o(l),y:i(l)})}if(p){var a=(0,E.k4)(2*h,e.x),s=(0,E.k4)(m/2,e.y);return Y(Y({},e),{},{x:a(l),y:s(l)})}return Y(Y({},e),{},{x:e.x,y:e.y})});return n.renderCurveStatically(s,e,t)}var u=(0,E.k4)(0,y)(l);if(a){var d="".concat(a).split(/[,\s]+/gim).map(function(e){return parseFloat(e)});o=n.getStrokeDasharray(u,y,d)}else o=n.generateSimpleStrokeDasharray(y,u);return n.renderCurveStatically(i,e,t,{strokeDasharray:o})})}},{key:"renderCurve",value:function(e,t){var n=this.props,r=n.points,o=n.isAnimationActive,i=this.state,a=i.prevPoints,l=i.totalLength;return o&&r&&r.length&&(!a&&l>0||!y()(a,r))?this.renderCurveWithAnimation(e,t):this.renderCurveStatically(r,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,i=t.points,a=t.className,c=t.xAxis,s=t.yAxis,u=t.top,d=t.left,f=t.width,p=t.height,m=t.isAnimationActive,g=t.id;if(n||!i||!i.length)return null;var v=this.state.isAnimationFinished,y=1===i.length,b=(0,l.Z)("recharts-line",a),x=c&&c.allowDataOverflow,k=s&&s.allowDataOverflow,E=x||k,C=h()(g)?this.id:g,j=null!==(e=(0,O.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},P=j.r,M=j.strokeWidth,N=((0,O.$k)(r)?r:{}).clipDot,I=void 0===N||N,R=2*(void 0===P?3:P)+(void 0===M?2:M);return o.createElement(w.m,{className:b},x||k?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(C)},o.createElement("rect",{x:x?d:d-f/2,y:k?u:u-p/2,width:x?f:2*f,height:k?p:2*p})),!I&&o.createElement("clipPath",{id:"clipPath-dots-".concat(C)},o.createElement("rect",{x:d-R/2,y:u-R/2,width:f+R,height:p+R}))):null,!y&&this.renderCurve(E,C),this.renderErrorBar(E,C),(y||r)&&this.renderDots(E,I,C),(!m||v)&&S.e.renderCallByParent(this.props,i))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,prevPoints:t.curPoints}:e.points!==t.curPoints?{curPoints:e.points}:null}},{key:"repeat",value:function(e,t){for(var n=e.length%2!=0?[].concat(Q(e),[0]):e,r=[],o=0;o{let{data:n=[],categories:a=[],index:l,stack:c=!1,colors:s=ef.s,valueFormatter:u=eh.Cj,startEndOnly:d=!1,showXAxis:f=!0,showYAxis:p=!0,yAxisWidth:h=56,intervalType:m="equidistantPreserveStart",showAnimation:g=!1,animationDuration:v=900,showTooltip:y=!0,showLegend:b=!0,showGridLines:w=!0,showGradient:S=!0,autoMinValue:k=!1,curveType:E="linear",minValue:C,maxValue:O,connectNulls:j=!1,allowDecimals:P=!0,noDataText:M,className:N,onValueChange:I,enableLegendSlider:R=!1,customTooltip:T,rotateLabelX:A,tickGap:_=5}=e,D=(0,r._T)(e,["data","categories","index","stack","colors","valueFormatter","startEndOnly","showXAxis","showYAxis","yAxisWidth","intervalType","showAnimation","animationDuration","showTooltip","showLegend","showGridLines","showGradient","autoMinValue","curveType","minValue","maxValue","connectNulls","allowDecimals","noDataText","className","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap"]),Z=(f||p)&&(!d||p)?20:0,[F,U]=(0,o.useState)(60),[V,G]=(0,o.useState)(void 0),[X,$]=(0,o.useState)(void 0),Y=(0,eu.me)(a,s),Q=(0,eu.i4)(k,C,O),J=!!I;function ee(e){J&&(e===X&&!V||(0,eu.FB)(n,e)&&V&&V.dataKey===e?($(void 0),null==I||I(null)):($(e),null==I||I({eventType:"category",categoryClicked:e})),G(void 0))}return o.createElement("div",Object.assign({ref:t,className:(0,ep.q)("w-full h-80",N)},D),o.createElement(i.h,{className:"h-full w-full"},(null==n?void 0:n.length)?o.createElement(H,{data:n,onClick:J&&(X||V)?()=>{G(void 0),$(void 0),null==I||I(null)}:void 0},w?o.createElement(q.q,{className:(0,ep.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:!0,vertical:!1}):null,o.createElement(z.K,{padding:{left:Z,right:Z},hide:!f,dataKey:l,tick:{transform:"translate(0, 6)"},ticks:d?[n[0][l],n[n.length-1][l]]:void 0,fill:"",stroke:"",className:(0,ep.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),interval:d?"preserveStartEnd":m,tickLine:!1,axisLine:!1,minTickGap:_,angle:null==A?void 0:A.angle,dy:null==A?void 0:A.verticalShift,height:null==A?void 0:A.xAxisHeight}),o.createElement(B.B,{width:h,hide:!p,axisLine:!1,tickLine:!1,type:"number",domain:Q,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,ep.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:u,allowDecimals:P}),o.createElement(W.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{stroke:"#d1d5db",strokeWidth:1},content:y?e=>{let{active:t,payload:n,label:r}=e;return T?o.createElement(T,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=Y.get(e.dataKey))&&void 0!==t?t:ed.fr.Gray})}),active:t,label:r}):o.createElement(ec.ZP,{active:t,payload:n,label:r,valueFormatter:u,categoryColors:Y})}:o.createElement(o.Fragment,null),position:{y:0}}),b?o.createElement(K.D,{verticalAlign:"top",height:F,content:e=>{let{payload:t}=e;return(0,el.Z)({payload:t},Y,U,X,J?e=>ee(e):void 0,R)}}):null,a.map(e=>{var t,n;return o.createElement("defs",{key:e},S?o.createElement("linearGradient",{className:(0,eh.bM)(null!==(t=Y.get(e))&&void 0!==t?t:ed.fr.Gray,ef.K.text).textColor,id:Y.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{offset:"5%",stopColor:"currentColor",stopOpacity:V||X&&X!==e?.15:.4}),o.createElement("stop",{offset:"95%",stopColor:"currentColor",stopOpacity:0})):o.createElement("linearGradient",{className:(0,eh.bM)(null!==(n=Y.get(e))&&void 0!==n?n:ed.fr.Gray,ef.K.text).textColor,id:Y.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{stopColor:"currentColor",stopOpacity:V||X&&X!==e?.1:.3})))}),a.map(e=>{var t;return o.createElement(L,{className:(0,eh.bM)(null!==(t=Y.get(e))&&void 0!==t?t:ed.fr.Gray,ef.K.text).strokeColor,strokeOpacity:V||X&&X!==e?.3:1,activeDot:e=>{var t;let{cx:r,cy:i,stroke:a,strokeLinecap:l,strokeLinejoin:c,strokeWidth:s,dataKey:u}=e;return o.createElement(x.o,{className:(0,ep.q)("stroke-tremor-background dark:stroke-dark-tremor-background",I?"cursor-pointer":"",(0,eh.bM)(null!==(t=Y.get(u))&&void 0!==t?t:ed.fr.Gray,ef.K.text).fillColor),cx:r,cy:i,r:5,fill:"",stroke:a,strokeLinecap:l,strokeLinejoin:c,strokeWidth:s,onClick:(t,r)=>{r.stopPropagation(),J&&(e.index===(null==V?void 0:V.index)&&e.dataKey===(null==V?void 0:V.dataKey)||(0,eu.FB)(n,e.dataKey)&&X&&X===e.dataKey?($(void 0),G(void 0),null==I||I(null)):($(e.dataKey),G({index:e.index,dataKey:e.dataKey}),null==I||I(Object.assign({eventType:"dot",categoryClicked:e.dataKey},e.payload))))}})},dot:t=>{var r;let{stroke:i,strokeLinecap:a,strokeLinejoin:l,strokeWidth:c,cx:s,cy:u,dataKey:d,index:f}=t;return(0,eu.FB)(n,e)&&!(V||X&&X!==e)||(null==V?void 0:V.index)===f&&(null==V?void 0:V.dataKey)===e?o.createElement(x.o,{key:f,cx:s,cy:u,r:5,stroke:i,fill:"",strokeLinecap:a,strokeLinejoin:l,strokeWidth:c,className:(0,ep.q)("stroke-tremor-background dark:stroke-dark-tremor-background",I?"cursor-pointer":"",(0,eh.bM)(null!==(r=Y.get(d))&&void 0!==r?r:ed.fr.Gray,ef.K.text).fillColor)}):o.createElement(o.Fragment,{key:f})},key:e,name:e,type:E,dataKey:e,stroke:"",fill:"url(#".concat(Y.get(e),")"),strokeWidth:2,strokeLinejoin:"round",strokeLinecap:"round",isAnimationActive:g,animationDuration:v,stackId:c?"a":void 0,connectNulls:j})}),I?a.map(e=>o.createElement(ea,{className:(0,ep.q)("cursor-pointer"),strokeOpacity:0,key:e,name:e,type:E,dataKey:e,stroke:"transparent",fill:"transparent",legendType:"none",tooltipType:"none",strokeWidth:12,connectNulls:j,onClick:(e,t)=>{t.stopPropagation();let{name:n}=e;ee(n)}})):null):o.createElement(es.Z,{noDataText:M})))});em.displayName="AreaChart"},40278:function(e,t,n){"use strict";n.d(t,{Z:function(){return k}});var r=n(5853),o=n(7084),i=n(26898),a=n(65954),l=n(1153),c=n(2265),s=n(47625),u=n(93765),d=n(31699),f=n(97059),p=n(62994),h=n(25311),m=(0,u.z)({chartName:"BarChart",GraphicalChild:d.$,defaultTooltipEventType:"axis",validateTooltipEventTypes:["axis","item"],axisComponents:[{axisType:"xAxis",AxisComp:f.K},{axisType:"yAxis",AxisComp:p.B}],formatAxisMap:h.t9}),g=n(56940),v=n(8147),y=n(22190),b=n(65278),x=n(98593),w=n(69448),S=n(32644);let k=c.forwardRef((e,t)=>{let{data:n=[],categories:u=[],index:h,colors:k=i.s,valueFormatter:E=l.Cj,layout:C="horizontal",stack:O=!1,relative:j=!1,startEndOnly:P=!1,animationDuration:M=900,showAnimation:N=!1,showXAxis:I=!0,showYAxis:R=!0,yAxisWidth:T=56,intervalType:A="equidistantPreserveStart",showTooltip:_=!0,showLegend:D=!0,showGridLines:Z=!0,autoMinValue:L=!1,minValue:z,maxValue:B,allowDecimals:F=!0,noDataText:H,onValueChange:q,enableLegendSlider:W=!1,customTooltip:K,rotateLabelX:U,tickGap:V=5,className:G}=e,X=(0,r._T)(e,["data","categories","index","colors","valueFormatter","layout","stack","relative","startEndOnly","animationDuration","showAnimation","showXAxis","showYAxis","yAxisWidth","intervalType","showTooltip","showLegend","showGridLines","autoMinValue","minValue","maxValue","allowDecimals","noDataText","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap","className"]),$=I||R?20:0,[Y,Q]=(0,c.useState)(60),J=(0,S.me)(u,k),[ee,et]=c.useState(void 0),[en,er]=(0,c.useState)(void 0),eo=!!q;function ei(e,t,n){var r,o,i,a;n.stopPropagation(),q&&((0,S.vZ)(ee,Object.assign(Object.assign({},e.payload),{value:e.value}))?(er(void 0),et(void 0),null==q||q(null)):(er(null===(o=null===(r=e.tooltipPayload)||void 0===r?void 0:r[0])||void 0===o?void 0:o.dataKey),et(Object.assign(Object.assign({},e.payload),{value:e.value})),null==q||q(Object.assign({eventType:"bar",categoryClicked:null===(a=null===(i=e.tooltipPayload)||void 0===i?void 0:i[0])||void 0===a?void 0:a.dataKey},e.payload))))}let ea=(0,S.i4)(L,z,B);return c.createElement("div",Object.assign({ref:t,className:(0,a.q)("w-full h-80",G)},X),c.createElement(s.h,{className:"h-full w-full"},(null==n?void 0:n.length)?c.createElement(m,{data:n,stackOffset:O?"sign":j?"expand":"none",layout:"vertical"===C?"vertical":"horizontal",onClick:eo&&(en||ee)?()=>{et(void 0),er(void 0),null==q||q(null)}:void 0},Z?c.createElement(g.q,{className:(0,a.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:"vertical"!==C,vertical:"vertical"===C}):null,"vertical"!==C?c.createElement(f.K,{padding:{left:$,right:$},hide:!I,dataKey:h,interval:P?"preserveStartEnd":A,tick:{transform:"translate(0, 6)"},ticks:P?[n[0][h],n[n.length-1][h]]:void 0,fill:"",stroke:"",className:(0,a.q)("mt-4 text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,angle:null==U?void 0:U.angle,dy:null==U?void 0:U.verticalShift,height:null==U?void 0:U.xAxisHeight,minTickGap:V}):c.createElement(f.K,{hide:!I,type:"number",tick:{transform:"translate(-3, 0)"},domain:ea,fill:"",stroke:"",className:(0,a.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,tickFormatter:E,minTickGap:V,allowDecimals:F,angle:null==U?void 0:U.angle,dy:null==U?void 0:U.verticalShift,height:null==U?void 0:U.xAxisHeight}),"vertical"!==C?c.createElement(p.B,{width:T,hide:!R,axisLine:!1,tickLine:!1,type:"number",domain:ea,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,a.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:j?e=>"".concat((100*e).toString()," %"):E,allowDecimals:F}):c.createElement(p.B,{width:T,hide:!R,dataKey:h,axisLine:!1,tickLine:!1,ticks:P?[n[0][h],n[n.length-1][h]]:void 0,type:"category",interval:"preserveStartEnd",tick:{transform:"translate(0, 6)"},fill:"",stroke:"",className:(0,a.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content")}),c.createElement(v.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{fill:"#d1d5db",opacity:"0.15"},content:_?e=>{let{active:t,payload:n,label:r}=e;return K?c.createElement(K,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=J.get(e.dataKey))&&void 0!==t?t:o.fr.Gray})}),active:t,label:r}):c.createElement(x.ZP,{active:t,payload:n,label:r,valueFormatter:E,categoryColors:J})}:c.createElement(c.Fragment,null),position:{y:0}}),D?c.createElement(y.D,{verticalAlign:"top",height:Y,content:e=>{let{payload:t}=e;return(0,b.Z)({payload:t},J,Q,en,eo?e=>{eo&&(e!==en||ee?(er(e),null==q||q({eventType:"category",categoryClicked:e})):(er(void 0),null==q||q(null)),et(void 0))}:void 0,W)}}):null,u.map(e=>{var t;return c.createElement(d.$,{className:(0,a.q)((0,l.bM)(null!==(t=J.get(e))&&void 0!==t?t:o.fr.Gray,i.K.background).fillColor,q?"cursor-pointer":""),key:e,name:e,type:"linear",stackId:O||j?"a":void 0,dataKey:e,fill:"",isAnimationActive:N,animationDuration:M,shape:e=>((e,t,n,r)=>{let{fillOpacity:o,name:i,payload:a,value:l}=e,{x:s,width:u,y:d,height:f}=e;return"horizontal"===r&&f<0?(d+=f,f=Math.abs(f)):"vertical"===r&&u<0&&(s+=u,u=Math.abs(u)),c.createElement("rect",{x:s,y:d,width:u,height:f,opacity:t||n&&n!==i?(0,S.vZ)(t,Object.assign(Object.assign({},a),{value:l}))?o:.3:o})})(e,ee,en,C),onClick:ei})})):c.createElement(w.Z,{noDataText:H})))});k.displayName="BarChart"},14042:function(e,t,n){"use strict";n.d(t,{Z:function(){return ez}});var r=n(5853),o=n(7084),i=n(26898),a=n(65954),l=n(1153),c=n(2265),s=n(60474),u=n(47625),d=n(93765),f=n(86757),p=n.n(f),h=n(9841),m=n(81889),g=n(61994),v=n(82944),y=["points","className","baseLinePoints","connectNulls"];function b(){return(b=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&void 0!==arguments[0]?arguments[0]:[],t=[[]];return e.forEach(function(e){S(e)?t[t.length-1].push(e):t[t.length-1].length>0&&t.push([])}),S(e[0])&&t[t.length-1].push(e[0]),t[t.length-1].length<=0&&(t=t.slice(0,-1)),t},E=function(e,t){var n=k(e);t&&(n=[n.reduce(function(e,t){return[].concat(x(e),x(t))},[])]);var r=n.map(function(e){return e.reduce(function(e,t,n){return"".concat(e).concat(0===n?"M":"L").concat(t.x,",").concat(t.y)},"")}).join("");return 1===n.length?"".concat(r,"Z"):r},C=function(e,t,n){var r=E(e,n);return"".concat("Z"===r.slice(-1)?r.slice(0,-1):r,"L").concat(E(t.reverse(),n).slice(1))},O=function(e){var t=e.points,n=e.className,r=e.baseLinePoints,o=e.connectNulls,i=function(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(e,y);if(!t||!t.length)return null;var a=(0,g.Z)("recharts-polygon",n);if(r&&r.length){var l=i.stroke&&"none"!==i.stroke,s=C(t,r,o);return c.createElement("g",{className:a},c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"Z"===s.slice(-1)?i.fill:"none",stroke:"none",d:s})),l?c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"none",d:E(t,o)})):null,l?c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"none",d:E(r,o)})):null)}var u=E(t,o);return c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"Z"===u.slice(-1)?i.fill:"none",className:a,d:u}))},j=n(58811),P=n(41637),M=n(39206);function N(e){return(N="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function I(){return(I=Object.assign?Object.assign.bind():function(e){for(var t=1;t1e-5?"outer"===t?"start":"end":n<-.00001?"outer"===t?"end":"start":"middle"}},{key:"renderAxisLine",value:function(){var e=this.props,t=e.cx,n=e.cy,r=e.radius,o=e.axisLine,i=e.axisLineType,a=T(T({},(0,v.L6)(this.props,!1)),{},{fill:"none"},(0,v.L6)(o,!1));if("circle"===i)return c.createElement(m.o,I({className:"recharts-polar-angle-axis-line"},a,{cx:t,cy:n,r:r}));var l=this.props.ticks.map(function(e){return(0,M.op)(t,n,r,e.coordinate)});return c.createElement(O,I({className:"recharts-polar-angle-axis-line"},a,{points:l}))}},{key:"renderTicks",value:function(){var e=this,t=this.props,n=t.ticks,r=t.tick,o=t.tickLine,a=t.tickFormatter,l=t.stroke,s=(0,v.L6)(this.props,!1),u=(0,v.L6)(r,!1),d=T(T({},s),{},{fill:"none"},(0,v.L6)(o,!1)),f=n.map(function(t,n){var f=e.getTickLineCoord(t),p=T(T(T({textAnchor:e.getTickTextAnchor(t)},s),{},{stroke:"none",fill:l},u),{},{index:n,payload:t,x:f.x2,y:f.y2});return c.createElement(h.m,I({className:"recharts-polar-angle-axis-tick",key:"tick-".concat(t.coordinate)},(0,P.bw)(e.props,t,n)),o&&c.createElement("line",I({className:"recharts-polar-angle-axis-tick-line"},d,f)),r&&i.renderTickItem(r,p,a?a(t.value,n):t.value))});return c.createElement(h.m,{className:"recharts-polar-angle-axis-ticks"},f)}},{key:"render",value:function(){var e=this.props,t=e.ticks,n=e.radius,r=e.axisLine;return!(n<=0)&&t&&t.length?c.createElement(h.m,{className:"recharts-polar-angle-axis"},r&&this.renderAxisLine(),this.renderTicks()):null}}],r=[{key:"renderTickItem",value:function(e,t,n){return c.isValidElement(e)?c.cloneElement(e,t):p()(e)?e(t):c.createElement(j.x,I({},t,{className:"recharts-polar-angle-axis-tick-value"}),n)}}],n&&A(i.prototype,n),r&&A(i,r),Object.defineProperty(i,"prototype",{writable:!1}),i}(c.PureComponent);Z(B,"displayName","PolarAngleAxis"),Z(B,"axisType","angleAxis"),Z(B,"defaultProps",{type:"category",angleAxisId:0,scale:"auto",cx:0,cy:0,orientation:"outer",axisLine:!0,tickLine:!0,tickSize:8,tick:!0,hide:!1,allowDuplicatedCategory:!0});var F=n(35802),H=n.n(F),q=n(37891),W=n.n(q),K=n(26680),U=["cx","cy","angle","ticks","axisLine"],V=["ticks","tick","angle","tickFormatter","stroke"];function G(e){return(G="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function X(){return(X=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function J(e,t){for(var n=0;n0?el()(e,"paddingAngle",0):0;if(n){var l=(0,eg.k4)(n.endAngle-n.startAngle,e.endAngle-e.startAngle),c=ek(ek({},e),{},{startAngle:i+a,endAngle:i+l(r)+a});o.push(c),i=c.endAngle}else{var s=e.endAngle,d=e.startAngle,f=(0,eg.k4)(0,s-d)(r),p=ek(ek({},e),{},{startAngle:i+a,endAngle:i+f+a});o.push(p),i=p.endAngle}}),c.createElement(h.m,null,e.renderSectorsStatically(o))})}},{key:"attachKeyboardHandlers",value:function(e){var t=this;e.onkeydown=function(e){if(!e.altKey)switch(e.key){case"ArrowLeft":var n=++t.state.sectorToFocus%t.sectorRefs.length;t.sectorRefs[n].focus(),t.setState({sectorToFocus:n});break;case"ArrowRight":var r=--t.state.sectorToFocus<0?t.sectorRefs.length-1:t.state.sectorToFocus%t.sectorRefs.length;t.sectorRefs[r].focus(),t.setState({sectorToFocus:r});break;case"Escape":t.sectorRefs[t.state.sectorToFocus].blur(),t.setState({sectorToFocus:0})}}}},{key:"renderSectors",value:function(){var e=this.props,t=e.sectors,n=e.isAnimationActive,r=this.state.prevSectors;return n&&t&&t.length&&(!r||!es()(r,t))?this.renderSectorsWithAnimation():this.renderSectorsStatically(t)}},{key:"componentDidMount",value:function(){this.pieRef&&this.attachKeyboardHandlers(this.pieRef)}},{key:"render",value:function(){var e=this,t=this.props,n=t.hide,r=t.sectors,o=t.className,i=t.label,a=t.cx,l=t.cy,s=t.innerRadius,u=t.outerRadius,d=t.isAnimationActive,f=this.state.isAnimationFinished;if(n||!r||!r.length||!(0,eg.hj)(a)||!(0,eg.hj)(l)||!(0,eg.hj)(s)||!(0,eg.hj)(u))return null;var p=(0,g.Z)("recharts-pie",o);return c.createElement(h.m,{tabIndex:this.props.rootTabIndex,className:p,ref:function(t){e.pieRef=t}},this.renderSectors(),i&&this.renderLabels(r),K._.renderCallByParent(this.props,null,!1),(!d||f)&&ep.e.renderCallByParent(this.props,r,!1))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return t.prevIsAnimationActive!==e.isAnimationActive?{prevIsAnimationActive:e.isAnimationActive,prevAnimationId:e.animationId,curSectors:e.sectors,prevSectors:[],isAnimationFinished:!0}:e.isAnimationActive&&e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curSectors:e.sectors,prevSectors:t.curSectors,isAnimationFinished:!0}:e.sectors!==t.curSectors?{curSectors:e.sectors,isAnimationFinished:!0}:null}},{key:"getTextAnchor",value:function(e,t){return e>t?"start":e=360?x:x-1)*u,S=i.reduce(function(e,t){var n=(0,ev.F$)(t,b,0);return e+((0,eg.hj)(n)?n:0)},0);return S>0&&(t=i.map(function(e,t){var r,o=(0,ev.F$)(e,b,0),i=(0,ev.F$)(e,f,t),a=((0,eg.hj)(o)?o:0)/S,s=(r=t?n.endAngle+(0,eg.uY)(v)*u*(0!==o?1:0):c)+(0,eg.uY)(v)*((0!==o?m:0)+a*w),d=(r+s)/2,p=(g.innerRadius+g.outerRadius)/2,y=[{name:i,value:o,payload:e,dataKey:b,type:h}],x=(0,M.op)(g.cx,g.cy,p,d);return n=ek(ek(ek({percent:a,cornerRadius:l,name:i,tooltipPayload:y,midAngle:d,middleRadius:p,tooltipPosition:x},e),g),{},{value:(0,ev.F$)(e,b),startAngle:r,endAngle:s,payload:e,paddingAngle:(0,eg.uY)(v)*u})})),ek(ek({},g),{},{sectors:t,data:i})});var eI=(0,d.z)({chartName:"PieChart",GraphicalChild:eN,validateTooltipEventTypes:["item"],defaultTooltipEventType:"item",legendContent:"children",axisComponents:[{axisType:"angleAxis",AxisComp:B},{axisType:"radiusAxis",AxisComp:eo}],formatAxisMap:M.t9,defaultProps:{layout:"centric",startAngle:0,endAngle:360,cx:"50%",cy:"50%",innerRadius:0,outerRadius:"80%"}}),eR=n(8147),eT=n(69448),eA=n(98593);let e_=e=>{let{active:t,payload:n,valueFormatter:r}=e;if(t&&(null==n?void 0:n[0])){let e=null==n?void 0:n[0];return c.createElement(eA.$B,null,c.createElement("div",{className:(0,a.q)("px-4 py-2")},c.createElement(eA.zX,{value:r(e.value),name:e.name,color:e.payload.color})))}return null},eD=(e,t)=>e.map((e,n)=>{let r=ne||t((0,l.vP)(n.map(e=>e[r]))),eL=e=>{let{cx:t,cy:n,innerRadius:r,outerRadius:o,startAngle:i,endAngle:a,className:l}=e;return c.createElement("g",null,c.createElement(s.L,{cx:t,cy:n,innerRadius:r,outerRadius:o,startAngle:i,endAngle:a,className:l,fill:"",opacity:.3,style:{outline:"none"}}))},ez=c.forwardRef((e,t)=>{let{data:n=[],category:s="value",index:d="name",colors:f=i.s,variant:p="donut",valueFormatter:h=l.Cj,label:m,showLabel:g=!0,animationDuration:v=900,showAnimation:y=!1,showTooltip:b=!0,noDataText:x,onValueChange:w,customTooltip:S,className:k}=e,E=(0,r._T)(e,["data","category","index","colors","variant","valueFormatter","label","showLabel","animationDuration","showAnimation","showTooltip","noDataText","onValueChange","customTooltip","className"]),C="donut"==p,O=eZ(m,h,n,s),[j,P]=c.useState(void 0),M=!!w;return(0,c.useEffect)(()=>{let e=document.querySelectorAll(".recharts-pie-sector");e&&e.forEach(e=>{e.setAttribute("style","outline: none")})},[j]),c.createElement("div",Object.assign({ref:t,className:(0,a.q)("w-full h-40",k)},E),c.createElement(u.h,{className:"h-full w-full"},(null==n?void 0:n.length)?c.createElement(eI,{onClick:M&&j?()=>{P(void 0),null==w||w(null)}:void 0,margin:{top:0,left:0,right:0,bottom:0}},g&&C?c.createElement("text",{className:(0,a.q)("fill-tremor-content-emphasis","dark:fill-dark-tremor-content-emphasis"),x:"50%",y:"50%",textAnchor:"middle",dominantBaseline:"middle"},O):null,c.createElement(eN,{className:(0,a.q)("stroke-tremor-background dark:stroke-dark-tremor-background",w?"cursor-pointer":"cursor-default"),data:eD(n,f),cx:"50%",cy:"50%",startAngle:90,endAngle:-270,innerRadius:C?"75%":"0%",outerRadius:"100%",stroke:"",strokeLinejoin:"round",dataKey:s,nameKey:d,isAnimationActive:y,animationDuration:v,onClick:function(e,t,n){n.stopPropagation(),M&&(j===t?(P(void 0),null==w||w(null)):(P(t),null==w||w(Object.assign({eventType:"slice"},e.payload.payload))))},activeIndex:j,inactiveShape:eL,style:{outline:"none"}}),c.createElement(eR.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,content:b?e=>{var t;let{active:n,payload:r}=e;return S?c.createElement(S,{payload:null==r?void 0:r.map(e=>{var t,n,i;return Object.assign(Object.assign({},e),{color:null!==(i=null===(n=null===(t=null==r?void 0:r[0])||void 0===t?void 0:t.payload)||void 0===n?void 0:n.color)&&void 0!==i?i:o.fr.Gray})}),active:n,label:null===(t=null==r?void 0:r[0])||void 0===t?void 0:t.name}):c.createElement(e_,{active:n,payload:r,valueFormatter:h})}:c.createElement(c.Fragment,null)})):c.createElement(eT.Z,{noDataText:x})))});ez.displayName="DonutChart"},65278:function(e,t,n){"use strict";n.d(t,{Z:function(){return m}});var r=n(2265);let o=(e,t)=>{let[n,o]=(0,r.useState)(t);(0,r.useEffect)(()=>{let t=()=>{o(window.innerWidth),e()};return t(),window.addEventListener("resize",t),()=>window.removeEventListener("resize",t)},[e,n])};var i=n(5853),a=n(26898),l=n(65954),c=n(1153);let s=e=>{var t=(0,i._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M8 12L14 6V18L8 12Z"}))},u=e=>{var t=(0,i._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M16 12L10 18V6L16 12Z"}))},d=(0,c.fn)("Legend"),f=e=>{let{name:t,color:n,onClick:o,activeLegend:i}=e,s=!!o;return r.createElement("li",{className:(0,l.q)(d("legendItem"),"group inline-flex items-center px-2 py-0.5 rounded-tremor-small transition whitespace-nowrap",s?"cursor-pointer":"cursor-default","text-tremor-content",s?"hover:bg-tremor-background-subtle":"","dark:text-dark-tremor-content",s?"dark:hover:bg-dark-tremor-background-subtle":""),onClick:e=>{e.stopPropagation(),null==o||o(t,n)}},r.createElement("svg",{className:(0,l.q)("flex-none h-2 w-2 mr-1.5",(0,c.bM)(n,a.K.text).textColor,i&&i!==t?"opacity-40":"opacity-100"),fill:"currentColor",viewBox:"0 0 8 8"},r.createElement("circle",{cx:4,cy:4,r:4})),r.createElement("p",{className:(0,l.q)("whitespace-nowrap truncate text-tremor-default","text-tremor-content",s?"group-hover:text-tremor-content-emphasis":"","dark:text-dark-tremor-content",i&&i!==t?"opacity-40":"opacity-100",s?"dark:group-hover:text-dark-tremor-content-emphasis":"")},t))},p=e=>{let{icon:t,onClick:n,disabled:o}=e,[i,a]=r.useState(!1),c=r.useRef(null);return r.useEffect(()=>(i?c.current=setInterval(()=>{null==n||n()},300):clearInterval(c.current),()=>clearInterval(c.current)),[i,n]),(0,r.useEffect)(()=>{o&&(clearInterval(c.current),a(!1))},[o]),r.createElement("button",{type:"button",className:(0,l.q)(d("legendSliderButton"),"w-5 group inline-flex items-center truncate rounded-tremor-small transition",o?"cursor-not-allowed":"cursor-pointer",o?"text-tremor-content-subtle":"text-tremor-content hover:text-tremor-content-emphasis hover:bg-tremor-background-subtle",o?"dark:text-dark-tremor-subtle":"dark:text-dark-tremor dark:hover:text-tremor-content-emphasis dark:hover:bg-dark-tremor-background-subtle"),disabled:o,onClick:e=>{e.stopPropagation(),null==n||n()},onMouseDown:e=>{e.stopPropagation(),a(!0)},onMouseUp:e=>{e.stopPropagation(),a(!1)}},r.createElement(t,{className:"w-full"}))},h=r.forwardRef((e,t)=>{var n,o;let{categories:c,colors:h=a.s,className:m,onClickLegendItem:g,activeLegend:v,enableLegendSlider:y=!1}=e,b=(0,i._T)(e,["categories","colors","className","onClickLegendItem","activeLegend","enableLegendSlider"]),x=r.useRef(null),[w,S]=r.useState(null),[k,E]=r.useState(null),C=r.useRef(null),O=(0,r.useCallback)(()=>{let e=null==x?void 0:x.current;e&&S({left:e.scrollLeft>0,right:e.scrollWidth-e.clientWidth>e.scrollLeft})},[S]),j=(0,r.useCallback)(e=>{var t;let n=null==x?void 0:x.current,r=null!==(t=null==n?void 0:n.clientWidth)&&void 0!==t?t:0;n&&y&&(n.scrollTo({left:"left"===e?n.scrollLeft-r:n.scrollLeft+r,behavior:"smooth"}),setTimeout(()=>{O()},400))},[y,O]);r.useEffect(()=>{let e=e=>{"ArrowLeft"===e?j("left"):"ArrowRight"===e&&j("right")};return k?(e(k),C.current=setInterval(()=>{e(k)},300)):clearInterval(C.current),()=>clearInterval(C.current)},[k,j]);let P=e=>{e.stopPropagation(),"ArrowLeft"!==e.key&&"ArrowRight"!==e.key||(e.preventDefault(),E(e.key))},M=e=>{e.stopPropagation(),E(null)};return r.useEffect(()=>{let e=null==x?void 0:x.current;return y&&(O(),null==e||e.addEventListener("keydown",P),null==e||e.addEventListener("keyup",M)),()=>{null==e||e.removeEventListener("keydown",P),null==e||e.removeEventListener("keyup",M)}},[O,y]),r.createElement("ol",Object.assign({ref:t,className:(0,l.q)(d("root"),"relative overflow-hidden",m)},b),r.createElement("div",{ref:x,tabIndex:0,className:(0,l.q)("h-full flex",y?(null==w?void 0:w.right)||(null==w?void 0:w.left)?"pl-4 pr-12 items-center overflow-auto snap-mandatory [&::-webkit-scrollbar]:hidden [scrollbar-width:none]":"":"flex-wrap")},c.map((e,t)=>r.createElement(f,{key:"item-".concat(t),name:e,color:h[t],onClick:g,activeLegend:v}))),y&&((null==w?void 0:w.right)||(null==w?void 0:w.left))?r.createElement(r.Fragment,null,r.createElement("div",{className:(0,l.q)("from-tremor-background","dark:from-dark-tremor-background","absolute top-0 bottom-0 left-0 w-4 bg-gradient-to-r to-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("to-tremor-background","dark:to-dark-tremor-background","absolute top-0 bottom-0 right-10 w-4 bg-gradient-to-r from-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("bg-tremor-background","dark:bg-dark-tremor-background","absolute flex top-0 pr-1 bottom-0 right-0 items-center justify-center h-full")},r.createElement(p,{icon:s,onClick:()=>{E(null),j("left")},disabled:!(null==w?void 0:w.left)}),r.createElement(p,{icon:u,onClick:()=>{E(null),j("right")},disabled:!(null==w?void 0:w.right)}))):null)});h.displayName="Legend";let m=(e,t,n,i,a,l)=>{let{payload:c}=e,s=(0,r.useRef)(null);o(()=>{var e,t;n((t=null===(e=s.current)||void 0===e?void 0:e.clientHeight)?Number(t)+20:60)});let u=c.filter(e=>"none"!==e.type);return r.createElement("div",{ref:s,className:"flex items-center justify-end"},r.createElement(h,{categories:u.map(e=>e.value),colors:u.map(e=>t.get(e.value)),onClickLegendItem:a,activeLegend:i,enableLegendSlider:l}))}},98593:function(e,t,n){"use strict";n.d(t,{$B:function(){return c},ZP:function(){return u},zX:function(){return s}});var r=n(2265),o=n(7084),i=n(26898),a=n(65954),l=n(1153);let c=e=>{let{children:t}=e;return r.createElement("div",{className:(0,a.q)("rounded-tremor-default text-tremor-default border","bg-tremor-background shadow-tremor-dropdown border-tremor-border","dark:bg-dark-tremor-background dark:shadow-dark-tremor-dropdown dark:border-dark-tremor-border")},t)},s=e=>{let{value:t,name:n,color:o}=e;return r.createElement("div",{className:"flex items-center justify-between space-x-8"},r.createElement("div",{className:"flex items-center space-x-2"},r.createElement("span",{className:(0,a.q)("shrink-0 rounded-tremor-full border-2 h-3 w-3","border-tremor-background shadow-tremor-card","dark:border-dark-tremor-background dark:shadow-dark-tremor-card",(0,l.bM)(o,i.K.background).bgColor)}),r.createElement("p",{className:(0,a.q)("text-right whitespace-nowrap","text-tremor-content","dark:text-dark-tremor-content")},n)),r.createElement("p",{className:(0,a.q)("font-medium tabular-nums text-right whitespace-nowrap","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},t))},u=e=>{let{active:t,payload:n,label:i,categoryColors:l,valueFormatter:u}=e;if(t&&n){let e=n.filter(e=>"none"!==e.type);return r.createElement(c,null,r.createElement("div",{className:(0,a.q)("border-tremor-border border-b px-4 py-2","dark:border-dark-tremor-border")},r.createElement("p",{className:(0,a.q)("font-medium","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},i)),r.createElement("div",{className:(0,a.q)("px-4 py-2 space-y-1")},e.map((e,t)=>{var n;let{value:i,name:a}=e;return r.createElement(s,{key:"id-".concat(t),value:u(i),name:a,color:null!==(n=l.get(a))&&void 0!==n?n:o.fr.Blue})})))}return null}},69448:function(e,t,n){"use strict";n.d(t,{Z:function(){return f}});var r=n(65954),o=n(2265),i=n(5853);let a=(0,n(1153).fn)("Flex"),l={start:"justify-start",end:"justify-end",center:"justify-center",between:"justify-between",around:"justify-around",evenly:"justify-evenly"},c={start:"items-start",end:"items-end",center:"items-center",baseline:"items-baseline",stretch:"items-stretch"},s={row:"flex-row",col:"flex-col","row-reverse":"flex-row-reverse","col-reverse":"flex-col-reverse"},u=o.forwardRef((e,t)=>{let{flexDirection:n="row",justifyContent:u="between",alignItems:d="center",children:f,className:p}=e,h=(0,i._T)(e,["flexDirection","justifyContent","alignItems","children","className"]);return o.createElement("div",Object.assign({ref:t,className:(0,r.q)(a("root"),"flex w-full",s[n],l[u],c[d],p)},h),f)});u.displayName="Flex";var d=n(84264);let f=e=>{let{noDataText:t="No data"}=e;return o.createElement(u,{alignItems:"center",justifyContent:"center",className:(0,r.q)("w-full h-full border border-dashed rounded-tremor-default","border-tremor-border","dark:border-dark-tremor-border")},o.createElement(d.Z,{className:(0,r.q)("text-tremor-content","dark:text-dark-tremor-content")},t))}},32644:function(e,t,n){"use strict";n.d(t,{FB:function(){return i},i4:function(){return o},me:function(){return r},vZ:function(){return function e(t,n){if(t===n)return!0;if("object"!=typeof t||"object"!=typeof n||null===t||null===n)return!1;let r=Object.keys(t),o=Object.keys(n);if(r.length!==o.length)return!1;for(let i of r)if(!o.includes(i)||!e(t[i],n[i]))return!1;return!0}}});let r=(e,t)=>{let n=new Map;return e.forEach((e,r)=>{n.set(e,t[r])}),n},o=(e,t,n)=>[e?"auto":null!=t?t:0,null!=n?n:"auto"];function i(e,t){let n=[];for(let r of e)if(Object.prototype.hasOwnProperty.call(r,t)&&(n.push(r[t]),n.length>1))return!1;return!0}},41649:function(e,t,n){"use strict";n.d(t,{Z:function(){return p}});var r=n(5853),o=n(2265),i=n(1526),a=n(7084),l=n(26898),c=n(65954),s=n(1153);let u={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},d={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},f=(0,s.fn)("Badge"),p=o.forwardRef((e,t)=>{let{color:n,icon:p,size:h=a.u8.SM,tooltip:m,className:g,children:v}=e,y=(0,r._T)(e,["color","icon","size","tooltip","className","children"]),b=p||null,{tooltipProps:x,getReferenceProps:w}=(0,i.l)();return o.createElement("span",Object.assign({ref:(0,s.lq)([t,x.refs.setReference]),className:(0,c.q)(f("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full",n?(0,c.q)((0,s.bM)(n,l.K.background).bgColor,(0,s.bM)(n,l.K.text).textColor,"bg-opacity-20 dark:bg-opacity-25"):(0,c.q)("bg-tremor-brand-muted text-tremor-brand-emphasis","dark:bg-dark-tremor-brand-muted dark:text-dark-tremor-brand-emphasis"),u[h].paddingX,u[h].paddingY,u[h].fontSize,g)},w,y),o.createElement(i.Z,Object.assign({text:m},x)),b?o.createElement(b,{className:(0,c.q)(f("icon"),"shrink-0 -ml-1 mr-1.5",d[h].height,d[h].width)}):null,o.createElement("p",{className:(0,c.q)(f("text"),"text-sm whitespace-nowrap")},v))});p.displayName="Badge"},47323:function(e,t,n){"use strict";n.d(t,{Z:function(){return m}});var r=n(5853),o=n(2265),i=n(1526),a=n(7084),l=n(65954),c=n(1153),s=n(26898);let u={xs:{paddingX:"px-1.5",paddingY:"py-1.5"},sm:{paddingX:"px-1.5",paddingY:"py-1.5"},md:{paddingX:"px-2",paddingY:"py-2"},lg:{paddingX:"px-2",paddingY:"py-2"},xl:{paddingX:"px-2.5",paddingY:"py-2.5"}},d={xs:{height:"h-3",width:"w-3"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-7",width:"w-7"},xl:{height:"h-9",width:"w-9"}},f={simple:{rounded:"",border:"",ring:"",shadow:""},light:{rounded:"rounded-tremor-default",border:"",ring:"",shadow:""},shadow:{rounded:"rounded-tremor-default",border:"border",ring:"",shadow:"shadow-tremor-card dark:shadow-dark-tremor-card"},solid:{rounded:"rounded-tremor-default",border:"border-2",ring:"ring-1",shadow:""},outlined:{rounded:"rounded-tremor-default",border:"border",ring:"ring-2",shadow:""}},p=(e,t)=>{switch(e){case"simple":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:"",borderColor:"",ringColor:""};case"light":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand-muted dark:bg-dark-tremor-brand-muted",borderColor:"",ringColor:""};case"shadow":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:"border-tremor-border dark:border-dark-tremor-border",ringColor:""};case"solid":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand dark:bg-dark-tremor-brand",borderColor:"border-tremor-brand-inverted dark:border-dark-tremor-brand-inverted",ringColor:"ring-tremor-ring dark:ring-dark-tremor-ring"};case"outlined":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:t?(0,c.bM)(t,s.K.ring).borderColor:"border-tremor-brand-subtle dark:border-dark-tremor-brand-subtle",ringColor:t?(0,l.q)((0,c.bM)(t,s.K.ring).ringColor,"ring-opacity-40"):"ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted"}}},h=(0,c.fn)("Icon"),m=o.forwardRef((e,t)=>{let{icon:n,variant:s="simple",tooltip:m,size:g=a.u8.SM,color:v,className:y}=e,b=(0,r._T)(e,["icon","variant","tooltip","size","color","className"]),x=p(s,v),{tooltipProps:w,getReferenceProps:S}=(0,i.l)();return o.createElement("span",Object.assign({ref:(0,c.lq)([t,w.refs.setReference]),className:(0,l.q)(h("root"),"inline-flex flex-shrink-0 items-center",x.bgColor,x.textColor,x.borderColor,x.ringColor,f[s].rounded,f[s].border,f[s].shadow,f[s].ring,u[g].paddingX,u[g].paddingY,y)},S,b),o.createElement(i.Z,Object.assign({text:m},w)),o.createElement(n,{className:(0,l.q)(h("icon"),"shrink-0",d[g].height,d[g].width)}))});m.displayName="Icon"},53003:function(e,t,n){"use strict";let r,o,i;n.d(t,{Z:function(){return nF}});var a,l,c,s,u=n(5853),d=n(2265),f=n(54887),p=n(13323),h=n(64518),m=n(96822),g=n(40293);function v(){for(var e=arguments.length,t=Array(e),n=0;n(0,g.r)(...t),[...t])}var y=n(72238),b=n(93689);let x=(0,d.createContext)(!1);var w=n(61424),S=n(27847);let k=d.Fragment,E=d.Fragment,C=(0,d.createContext)(null),O=(0,d.createContext)(null);Object.assign((0,S.yV)(function(e,t){var n;let r,o,i=(0,d.useRef)(null),a=(0,b.T)((0,b.h)(e=>{i.current=e}),t),l=v(i),c=function(e){let t=(0,d.useContext)(x),n=(0,d.useContext)(C),r=v(e),[o,i]=(0,d.useState)(()=>{if(!t&&null!==n||w.O.isServer)return null;let e=null==r?void 0:r.getElementById("headlessui-portal-root");if(e)return e;if(null===r)return null;let o=r.createElement("div");return o.setAttribute("id","headlessui-portal-root"),r.body.appendChild(o)});return(0,d.useEffect)(()=>{null!==o&&(null!=r&&r.body.contains(o)||null==r||r.body.appendChild(o))},[o,r]),(0,d.useEffect)(()=>{t||null!==n&&i(n.current)},[n,i,t]),o}(i),[s]=(0,d.useState)(()=>{var e;return w.O.isServer?null:null!=(e=null==l?void 0:l.createElement("div"))?e:null}),u=(0,d.useContext)(O),g=(0,y.H)();return(0,h.e)(()=>{!c||!s||c.contains(s)||(s.setAttribute("data-headlessui-portal",""),c.appendChild(s))},[c,s]),(0,h.e)(()=>{if(s&&u)return u.register(s)},[u,s]),n=()=>{var e;c&&s&&(s instanceof Node&&c.contains(s)&&c.removeChild(s),c.childNodes.length<=0&&(null==(e=c.parentElement)||e.removeChild(c)))},r=(0,p.z)(n),o=(0,d.useRef)(!1),(0,d.useEffect)(()=>(o.current=!1,()=>{o.current=!0,(0,m.Y)(()=>{o.current&&r()})}),[r]),g&&c&&s?(0,f.createPortal)((0,S.sY)({ourProps:{ref:a},theirProps:e,defaultTag:k,name:"Portal"}),s):null}),{Group:(0,S.yV)(function(e,t){let{target:n,...r}=e,o={ref:(0,b.T)(t)};return d.createElement(C.Provider,{value:n},(0,S.sY)({ourProps:o,theirProps:r,defaultTag:E,name:"Popover.Group"}))})});var j=n(31948),P=n(17684),M=n(98505),N=n(80004),I=n(38198),R=n(3141),T=((r=T||{})[r.Forwards=0]="Forwards",r[r.Backwards=1]="Backwards",r);function A(){let e=(0,d.useRef)(0);return(0,R.s)("keydown",t=>{"Tab"===t.key&&(e.current=t.shiftKey?1:0)},!0),e}var _=n(37863),D=n(47634),Z=n(37105),L=n(24536),z=n(37388),B=((o=B||{})[o.Open=0]="Open",o[o.Closed=1]="Closed",o),F=((i=F||{})[i.TogglePopover=0]="TogglePopover",i[i.ClosePopover=1]="ClosePopover",i[i.SetButton=2]="SetButton",i[i.SetButtonId=3]="SetButtonId",i[i.SetPanel=4]="SetPanel",i[i.SetPanelId=5]="SetPanelId",i);let H={0:e=>{let t={...e,popoverState:(0,L.E)(e.popoverState,{0:1,1:0})};return 0===t.popoverState&&(t.__demoMode=!1),t},1:e=>1===e.popoverState?e:{...e,popoverState:1},2:(e,t)=>e.button===t.button?e:{...e,button:t.button},3:(e,t)=>e.buttonId===t.buttonId?e:{...e,buttonId:t.buttonId},4:(e,t)=>e.panel===t.panel?e:{...e,panel:t.panel},5:(e,t)=>e.panelId===t.panelId?e:{...e,panelId:t.panelId}},q=(0,d.createContext)(null);function W(e){let t=(0,d.useContext)(q);if(null===t){let t=Error("<".concat(e," /> is missing a parent component."));throw Error.captureStackTrace&&Error.captureStackTrace(t,W),t}return t}q.displayName="PopoverContext";let K=(0,d.createContext)(null);function U(e){let t=(0,d.useContext)(K);if(null===t){let t=Error("<".concat(e," /> is missing a parent component."));throw Error.captureStackTrace&&Error.captureStackTrace(t,U),t}return t}K.displayName="PopoverAPIContext";let V=(0,d.createContext)(null);function G(){return(0,d.useContext)(V)}V.displayName="PopoverGroupContext";let X=(0,d.createContext)(null);function $(e,t){return(0,L.E)(t.type,H,e,t)}X.displayName="PopoverPanelContext";let Y=S.AN.RenderStrategy|S.AN.Static,Q=S.AN.RenderStrategy|S.AN.Static,J=Object.assign((0,S.yV)(function(e,t){var n,r,o,i;let a,l,c,s,u,f;let{__demoMode:h=!1,...m}=e,g=(0,d.useRef)(null),y=(0,b.T)(t,(0,b.h)(e=>{g.current=e})),x=(0,d.useRef)([]),w=(0,d.useReducer)($,{__demoMode:h,popoverState:h?0:1,buttons:x,button:null,buttonId:null,panel:null,panelId:null,beforePanelSentinel:(0,d.createRef)(),afterPanelSentinel:(0,d.createRef)()}),[{popoverState:k,button:E,buttonId:C,panel:P,panelId:N,beforePanelSentinel:R,afterPanelSentinel:T},A]=w,D=v(null!=(n=g.current)?n:E),z=(0,d.useMemo)(()=>{if(!E||!P)return!1;for(let e of document.querySelectorAll("body > *"))if(Number(null==e?void 0:e.contains(E))^Number(null==e?void 0:e.contains(P)))return!0;let e=(0,Z.GO)(),t=e.indexOf(E),n=(t+e.length-1)%e.length,r=(t+1)%e.length,o=e[n],i=e[r];return!P.contains(o)&&!P.contains(i)},[E,P]),B=(0,j.E)(C),F=(0,j.E)(N),H=(0,d.useMemo)(()=>({buttonId:B,panelId:F,close:()=>A({type:1})}),[B,F,A]),W=G(),U=null==W?void 0:W.registerPopover,V=(0,p.z)(()=>{var e;return null!=(e=null==W?void 0:W.isFocusWithinPopoverGroup())?e:(null==D?void 0:D.activeElement)&&((null==E?void 0:E.contains(D.activeElement))||(null==P?void 0:P.contains(D.activeElement)))});(0,d.useEffect)(()=>null==U?void 0:U(H),[U,H]);let[Y,Q]=(a=(0,d.useContext)(O),l=(0,d.useRef)([]),c=(0,p.z)(e=>(l.current.push(e),a&&a.register(e),()=>s(e))),s=(0,p.z)(e=>{let t=l.current.indexOf(e);-1!==t&&l.current.splice(t,1),a&&a.unregister(e)}),u=(0,d.useMemo)(()=>({register:c,unregister:s,portals:l}),[c,s,l]),[l,(0,d.useMemo)(()=>function(e){let{children:t}=e;return d.createElement(O.Provider,{value:u},t)},[u])]),J=function(){var e;let{defaultContainers:t=[],portals:n,mainTreeNodeRef:r}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},o=(0,d.useRef)(null!=(e=null==r?void 0:r.current)?e:null),i=v(o),a=(0,p.z)(()=>{var e,r,a;let l=[];for(let e of t)null!==e&&(e instanceof HTMLElement?l.push(e):"current"in e&&e.current instanceof HTMLElement&&l.push(e.current));if(null!=n&&n.current)for(let e of n.current)l.push(e);for(let t of null!=(e=null==i?void 0:i.querySelectorAll("html > *, body > *"))?e:[])t!==document.body&&t!==document.head&&t instanceof HTMLElement&&"headlessui-portal-root"!==t.id&&(t.contains(o.current)||t.contains(null==(a=null==(r=o.current)?void 0:r.getRootNode())?void 0:a.host)||l.some(e=>t.contains(e))||l.push(t));return l});return{resolveContainers:a,contains:(0,p.z)(e=>a().some(t=>t.contains(e))),mainTreeNodeRef:o,MainTreeNode:(0,d.useMemo)(()=>function(){return null!=r?null:d.createElement(I._,{features:I.A.Hidden,ref:o})},[o,r])}}({mainTreeNodeRef:null==W?void 0:W.mainTreeNodeRef,portals:Y,defaultContainers:[E,P]});r=null==D?void 0:D.defaultView,o="focus",i=e=>{var t,n,r,o;e.target!==window&&e.target instanceof HTMLElement&&0===k&&(V()||E&&P&&(J.contains(e.target)||null!=(n=null==(t=R.current)?void 0:t.contains)&&n.call(t,e.target)||null!=(o=null==(r=T.current)?void 0:r.contains)&&o.call(r,e.target)||A({type:1})))},f=(0,j.E)(i),(0,d.useEffect)(()=>{function e(e){f.current(e)}return(r=null!=r?r:window).addEventListener(o,e,!0),()=>r.removeEventListener(o,e,!0)},[r,o,!0]),(0,M.O)(J.resolveContainers,(e,t)=>{A({type:1}),(0,Z.sP)(t,Z.tJ.Loose)||(e.preventDefault(),null==E||E.focus())},0===k);let ee=(0,p.z)(e=>{A({type:1});let t=e?e instanceof HTMLElement?e:"current"in e&&e.current instanceof HTMLElement?e.current:E:E;null==t||t.focus()}),et=(0,d.useMemo)(()=>({close:ee,isPortalled:z}),[ee,z]),en=(0,d.useMemo)(()=>({open:0===k,close:ee}),[k,ee]);return d.createElement(X.Provider,{value:null},d.createElement(q.Provider,{value:w},d.createElement(K.Provider,{value:et},d.createElement(_.up,{value:(0,L.E)(k,{0:_.ZM.Open,1:_.ZM.Closed})},d.createElement(Q,null,(0,S.sY)({ourProps:{ref:y},theirProps:m,slot:en,defaultTag:"div",name:"Popover"}),d.createElement(J.MainTreeNode,null))))))}),{Button:(0,S.yV)(function(e,t){let n=(0,P.M)(),{id:r="headlessui-popover-button-".concat(n),...o}=e,[i,a]=W("Popover.Button"),{isPortalled:l}=U("Popover.Button"),c=(0,d.useRef)(null),s="headlessui-focus-sentinel-".concat((0,P.M)()),u=G(),f=null==u?void 0:u.closeOthers,h=null!==(0,d.useContext)(X);(0,d.useEffect)(()=>{if(!h)return a({type:3,buttonId:r}),()=>{a({type:3,buttonId:null})}},[h,r,a]);let[m]=(0,d.useState)(()=>Symbol()),g=(0,b.T)(c,t,h?null:e=>{if(e)i.buttons.current.push(m);else{let e=i.buttons.current.indexOf(m);-1!==e&&i.buttons.current.splice(e,1)}i.buttons.current.length>1&&console.warn("You are already using a but only 1 is supported."),e&&a({type:2,button:e})}),y=(0,b.T)(c,t),x=v(c),w=(0,p.z)(e=>{var t,n,r;if(h){if(1===i.popoverState)return;switch(e.key){case z.R.Space:case z.R.Enter:e.preventDefault(),null==(n=(t=e.target).click)||n.call(t),a({type:1}),null==(r=i.button)||r.focus()}}else switch(e.key){case z.R.Space:case z.R.Enter:e.preventDefault(),e.stopPropagation(),1===i.popoverState&&(null==f||f(i.buttonId)),a({type:0});break;case z.R.Escape:if(0!==i.popoverState)return null==f?void 0:f(i.buttonId);if(!c.current||null!=x&&x.activeElement&&!c.current.contains(x.activeElement))return;e.preventDefault(),e.stopPropagation(),a({type:1})}}),k=(0,p.z)(e=>{h||e.key===z.R.Space&&e.preventDefault()}),E=(0,p.z)(t=>{var n,r;(0,D.P)(t.currentTarget)||e.disabled||(h?(a({type:1}),null==(n=i.button)||n.focus()):(t.preventDefault(),t.stopPropagation(),1===i.popoverState&&(null==f||f(i.buttonId)),a({type:0}),null==(r=i.button)||r.focus()))}),C=(0,p.z)(e=>{e.preventDefault(),e.stopPropagation()}),O=0===i.popoverState,j=(0,d.useMemo)(()=>({open:O}),[O]),M=(0,N.f)(e,c),R=h?{ref:y,type:M,onKeyDown:w,onClick:E}:{ref:g,id:i.buttonId,type:M,"aria-expanded":0===i.popoverState,"aria-controls":i.panel?i.panelId:void 0,onKeyDown:w,onKeyUp:k,onClick:E,onMouseDown:C},_=A(),B=(0,p.z)(()=>{let e=i.panel;e&&(0,L.E)(_.current,{[T.Forwards]:()=>(0,Z.jA)(e,Z.TO.First),[T.Backwards]:()=>(0,Z.jA)(e,Z.TO.Last)})===Z.fE.Error&&(0,Z.jA)((0,Z.GO)().filter(e=>"true"!==e.dataset.headlessuiFocusGuard),(0,L.E)(_.current,{[T.Forwards]:Z.TO.Next,[T.Backwards]:Z.TO.Previous}),{relativeTo:i.button})});return d.createElement(d.Fragment,null,(0,S.sY)({ourProps:R,theirProps:o,slot:j,defaultTag:"button",name:"Popover.Button"}),O&&!h&&l&&d.createElement(I._,{id:s,features:I.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:B}))}),Overlay:(0,S.yV)(function(e,t){let n=(0,P.M)(),{id:r="headlessui-popover-overlay-".concat(n),...o}=e,[{popoverState:i},a]=W("Popover.Overlay"),l=(0,b.T)(t),c=(0,_.oJ)(),s=null!==c?(c&_.ZM.Open)===_.ZM.Open:0===i,u=(0,p.z)(e=>{if((0,D.P)(e.currentTarget))return e.preventDefault();a({type:1})}),f=(0,d.useMemo)(()=>({open:0===i}),[i]);return(0,S.sY)({ourProps:{ref:l,id:r,"aria-hidden":!0,onClick:u},theirProps:o,slot:f,defaultTag:"div",features:Y,visible:s,name:"Popover.Overlay"})}),Panel:(0,S.yV)(function(e,t){let n=(0,P.M)(),{id:r="headlessui-popover-panel-".concat(n),focus:o=!1,...i}=e,[a,l]=W("Popover.Panel"),{close:c,isPortalled:s}=U("Popover.Panel"),u="headlessui-focus-sentinel-before-".concat((0,P.M)()),f="headlessui-focus-sentinel-after-".concat((0,P.M)()),m=(0,d.useRef)(null),g=(0,b.T)(m,t,e=>{l({type:4,panel:e})}),y=v(m),x=(0,S.Y2)();(0,h.e)(()=>(l({type:5,panelId:r}),()=>{l({type:5,panelId:null})}),[r,l]);let w=(0,_.oJ)(),k=null!==w?(w&_.ZM.Open)===_.ZM.Open:0===a.popoverState,E=(0,p.z)(e=>{var t;if(e.key===z.R.Escape){if(0!==a.popoverState||!m.current||null!=y&&y.activeElement&&!m.current.contains(y.activeElement))return;e.preventDefault(),e.stopPropagation(),l({type:1}),null==(t=a.button)||t.focus()}});(0,d.useEffect)(()=>{var t;e.static||1===a.popoverState&&(null==(t=e.unmount)||t)&&l({type:4,panel:null})},[a.popoverState,e.unmount,e.static,l]),(0,d.useEffect)(()=>{if(a.__demoMode||!o||0!==a.popoverState||!m.current)return;let e=null==y?void 0:y.activeElement;m.current.contains(e)||(0,Z.jA)(m.current,Z.TO.First)},[a.__demoMode,o,m,a.popoverState]);let C=(0,d.useMemo)(()=>({open:0===a.popoverState,close:c}),[a,c]),O={ref:g,id:r,onKeyDown:E,onBlur:o&&0===a.popoverState?e=>{var t,n,r,o,i;let c=e.relatedTarget;c&&m.current&&(null!=(t=m.current)&&t.contains(c)||(l({type:1}),(null!=(r=null==(n=a.beforePanelSentinel.current)?void 0:n.contains)&&r.call(n,c)||null!=(i=null==(o=a.afterPanelSentinel.current)?void 0:o.contains)&&i.call(o,c))&&c.focus({preventScroll:!0})))}:void 0,tabIndex:-1},j=A(),M=(0,p.z)(()=>{let e=m.current;e&&(0,L.E)(j.current,{[T.Forwards]:()=>{var t;(0,Z.jA)(e,Z.TO.First)===Z.fE.Error&&(null==(t=a.afterPanelSentinel.current)||t.focus())},[T.Backwards]:()=>{var e;null==(e=a.button)||e.focus({preventScroll:!0})}})}),N=(0,p.z)(()=>{let e=m.current;e&&(0,L.E)(j.current,{[T.Forwards]:()=>{var e;if(!a.button)return;let t=(0,Z.GO)(),n=t.indexOf(a.button),r=t.slice(0,n+1),o=[...t.slice(n+1),...r];for(let t of o.slice())if("true"===t.dataset.headlessuiFocusGuard||null!=(e=a.panel)&&e.contains(t)){let e=o.indexOf(t);-1!==e&&o.splice(e,1)}(0,Z.jA)(o,Z.TO.First,{sorted:!1})},[T.Backwards]:()=>{var t;(0,Z.jA)(e,Z.TO.Previous)===Z.fE.Error&&(null==(t=a.button)||t.focus())}})});return d.createElement(X.Provider,{value:r},k&&s&&d.createElement(I._,{id:u,ref:a.beforePanelSentinel,features:I.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:M}),(0,S.sY)({mergeRefs:x,ourProps:O,theirProps:i,slot:C,defaultTag:"div",features:Q,visible:k,name:"Popover.Panel"}),k&&s&&d.createElement(I._,{id:f,ref:a.afterPanelSentinel,features:I.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:N}))}),Group:(0,S.yV)(function(e,t){let n;let r=(0,d.useRef)(null),o=(0,b.T)(r,t),[i,a]=(0,d.useState)([]),l={mainTreeNodeRef:n=(0,d.useRef)(null),MainTreeNode:(0,d.useMemo)(()=>function(){return d.createElement(I._,{features:I.A.Hidden,ref:n})},[n])},c=(0,p.z)(e=>{a(t=>{let n=t.indexOf(e);if(-1!==n){let e=t.slice();return e.splice(n,1),e}return t})}),s=(0,p.z)(e=>(a(t=>[...t,e]),()=>c(e))),u=(0,p.z)(()=>{var e;let t=(0,g.r)(r);if(!t)return!1;let n=t.activeElement;return!!(null!=(e=r.current)&&e.contains(n))||i.some(e=>{var r,o;return(null==(r=t.getElementById(e.buttonId.current))?void 0:r.contains(n))||(null==(o=t.getElementById(e.panelId.current))?void 0:o.contains(n))})}),f=(0,p.z)(e=>{for(let t of i)t.buttonId.current!==e&&t.close()}),h=(0,d.useMemo)(()=>({registerPopover:s,unregisterPopover:c,isFocusWithinPopoverGroup:u,closeOthers:f,mainTreeNodeRef:l.mainTreeNodeRef}),[s,c,u,f,l.mainTreeNodeRef]),m=(0,d.useMemo)(()=>({}),[]);return d.createElement(V.Provider,{value:h},(0,S.sY)({ourProps:{ref:o},theirProps:e,slot:m,defaultTag:"div",name:"Popover.Group"}),d.createElement(l.MainTreeNode,null))})});var ee=n(33044),et=n(28517);let en=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 20 20",fill:"currentColor"}),d.createElement("path",{fillRule:"evenodd",d:"M6 2a1 1 0 00-1 1v1H4a2 2 0 00-2 2v10a2 2 0 002 2h12a2 2 0 002-2V6a2 2 0 00-2-2h-1V3a1 1 0 10-2 0v1H7V3a1 1 0 00-1-1zm0 5a1 1 0 000 2h8a1 1 0 100-2H6z",clipRule:"evenodd"}))};var er=n(4537),eo=n(99735),ei=n(7656);function ea(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return t.setHours(0,0,0,0),t}function el(){return ea(Date.now())}function ec(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return t.setDate(1),t.setHours(0,0,0,0),t}var es=n(65954),eu=n(96398),ed=n(41154);function ef(e){var t,n;if((0,ei.Z)(1,arguments),e&&"function"==typeof e.forEach)t=e;else{if("object"!==(0,ed.Z)(e)||null===e)return new Date(NaN);t=Array.prototype.slice.call(e)}return t.forEach(function(e){var t=(0,eo.Z)(e);(void 0===n||nt||isNaN(t.getDate()))&&(n=t)}),n||new Date(NaN)}var eh=n(25721),em=n(47869);function eg(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,eh.Z)(e,-n)}var ev=n(55463);function ey(e,t){if((0,ei.Z)(2,arguments),!t||"object"!==(0,ed.Z)(t))return new Date(NaN);var n=t.years?(0,em.Z)(t.years):0,r=t.months?(0,em.Z)(t.months):0,o=t.weeks?(0,em.Z)(t.weeks):0,i=t.days?(0,em.Z)(t.days):0,a=t.hours?(0,em.Z)(t.hours):0,l=t.minutes?(0,em.Z)(t.minutes):0,c=t.seconds?(0,em.Z)(t.seconds):0;return new Date(eg(function(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,ev.Z)(e,-n)}(e,r+12*n),i+7*o).getTime()-1e3*(c+60*(l+60*a)))}function eb(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=new Date(0);return n.setFullYear(t.getFullYear(),0,1),n.setHours(0,0,0,0),n}function ex(e){return(0,ei.Z)(1,arguments),e instanceof Date||"object"===(0,ed.Z)(e)&&"[object Date]"===Object.prototype.toString.call(e)}function ew(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getUTCDay();return t.setUTCDate(t.getUTCDate()-((n<1?7:0)+n-1)),t.setUTCHours(0,0,0,0),t}function eS(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getUTCFullYear(),r=new Date(0);r.setUTCFullYear(n+1,0,4),r.setUTCHours(0,0,0,0);var o=ew(r),i=new Date(0);i.setUTCFullYear(n,0,4),i.setUTCHours(0,0,0,0);var a=ew(i);return t.getTime()>=o.getTime()?n+1:t.getTime()>=a.getTime()?n:n-1}var ek={};function eE(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.weekStartsOn)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.weekStartsOn)&&void 0!==o?o:ek.weekStartsOn)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.weekStartsOn)&&void 0!==n?n:0);if(!(u>=0&&u<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");var d=(0,eo.Z)(e),f=d.getUTCDay();return d.setUTCDate(d.getUTCDate()-((f=1&&f<=7))throw RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var p=new Date(0);p.setUTCFullYear(d+1,0,f),p.setUTCHours(0,0,0,0);var h=eE(p,t),m=new Date(0);m.setUTCFullYear(d,0,f),m.setUTCHours(0,0,0,0);var g=eE(m,t);return u.getTime()>=h.getTime()?d+1:u.getTime()>=g.getTime()?d:d-1}function eO(e,t){for(var n=Math.abs(e).toString();n.length0?n:1-n;return eO("yy"===t?r%100:r,t.length)},M:function(e,t){var n=e.getUTCMonth();return"M"===t?String(n+1):eO(n+1,2)},d:function(e,t){return eO(e.getUTCDate(),t.length)},h:function(e,t){return eO(e.getUTCHours()%12||12,t.length)},H:function(e,t){return eO(e.getUTCHours(),t.length)},m:function(e,t){return eO(e.getUTCMinutes(),t.length)},s:function(e,t){return eO(e.getUTCSeconds(),t.length)},S:function(e,t){var n=t.length;return eO(Math.floor(e.getUTCMilliseconds()*Math.pow(10,n-3)),t.length)}},eP={midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"};function eM(e,t){var n=e>0?"-":"+",r=Math.abs(e),o=Math.floor(r/60),i=r%60;return 0===i?n+String(o):n+String(o)+(t||"")+eO(i,2)}function eN(e,t){return e%60==0?(e>0?"-":"+")+eO(Math.abs(e)/60,2):eI(e,t)}function eI(e,t){var n=Math.abs(e);return(e>0?"-":"+")+eO(Math.floor(n/60),2)+(t||"")+eO(n%60,2)}var eR={G:function(e,t,n){var r=e.getUTCFullYear()>0?1:0;switch(t){case"G":case"GG":case"GGG":return n.era(r,{width:"abbreviated"});case"GGGGG":return n.era(r,{width:"narrow"});default:return n.era(r,{width:"wide"})}},y:function(e,t,n){if("yo"===t){var r=e.getUTCFullYear();return n.ordinalNumber(r>0?r:1-r,{unit:"year"})}return ej.y(e,t)},Y:function(e,t,n,r){var o=eC(e,r),i=o>0?o:1-o;return"YY"===t?eO(i%100,2):"Yo"===t?n.ordinalNumber(i,{unit:"year"}):eO(i,t.length)},R:function(e,t){return eO(eS(e),t.length)},u:function(e,t){return eO(e.getUTCFullYear(),t.length)},Q:function(e,t,n){var r=Math.ceil((e.getUTCMonth()+1)/3);switch(t){case"Q":return String(r);case"QQ":return eO(r,2);case"Qo":return n.ordinalNumber(r,{unit:"quarter"});case"QQQ":return n.quarter(r,{width:"abbreviated",context:"formatting"});case"QQQQQ":return n.quarter(r,{width:"narrow",context:"formatting"});default:return n.quarter(r,{width:"wide",context:"formatting"})}},q:function(e,t,n){var r=Math.ceil((e.getUTCMonth()+1)/3);switch(t){case"q":return String(r);case"qq":return eO(r,2);case"qo":return n.ordinalNumber(r,{unit:"quarter"});case"qqq":return n.quarter(r,{width:"abbreviated",context:"standalone"});case"qqqqq":return n.quarter(r,{width:"narrow",context:"standalone"});default:return n.quarter(r,{width:"wide",context:"standalone"})}},M:function(e,t,n){var r=e.getUTCMonth();switch(t){case"M":case"MM":return ej.M(e,t);case"Mo":return n.ordinalNumber(r+1,{unit:"month"});case"MMM":return n.month(r,{width:"abbreviated",context:"formatting"});case"MMMMM":return n.month(r,{width:"narrow",context:"formatting"});default:return n.month(r,{width:"wide",context:"formatting"})}},L:function(e,t,n){var r=e.getUTCMonth();switch(t){case"L":return String(r+1);case"LL":return eO(r+1,2);case"Lo":return n.ordinalNumber(r+1,{unit:"month"});case"LLL":return n.month(r,{width:"abbreviated",context:"standalone"});case"LLLLL":return n.month(r,{width:"narrow",context:"standalone"});default:return n.month(r,{width:"wide",context:"standalone"})}},w:function(e,t,n,r){var o=function(e,t){(0,ei.Z)(1,arguments);var n=(0,eo.Z)(e);return Math.round((eE(n,t).getTime()-(function(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.firstWeekContainsDate)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.firstWeekContainsDate)&&void 0!==o?o:ek.firstWeekContainsDate)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.firstWeekContainsDate)&&void 0!==n?n:1),d=eC(e,t),f=new Date(0);return f.setUTCFullYear(d,0,u),f.setUTCHours(0,0,0,0),eE(f,t)})(n,t).getTime())/6048e5)+1}(e,r);return"wo"===t?n.ordinalNumber(o,{unit:"week"}):eO(o,t.length)},I:function(e,t,n){var r=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return Math.round((ew(t).getTime()-(function(e){(0,ei.Z)(1,arguments);var t=eS(e),n=new Date(0);return n.setUTCFullYear(t,0,4),n.setUTCHours(0,0,0,0),ew(n)})(t).getTime())/6048e5)+1}(e);return"Io"===t?n.ordinalNumber(r,{unit:"week"}):eO(r,t.length)},d:function(e,t,n){return"do"===t?n.ordinalNumber(e.getUTCDate(),{unit:"date"}):ej.d(e,t)},D:function(e,t,n){var r=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getTime();return t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0),Math.floor((n-t.getTime())/864e5)+1}(e);return"Do"===t?n.ordinalNumber(r,{unit:"dayOfYear"}):eO(r,t.length)},E:function(e,t,n){var r=e.getUTCDay();switch(t){case"E":case"EE":case"EEE":return n.day(r,{width:"abbreviated",context:"formatting"});case"EEEEE":return n.day(r,{width:"narrow",context:"formatting"});case"EEEEEE":return n.day(r,{width:"short",context:"formatting"});default:return n.day(r,{width:"wide",context:"formatting"})}},e:function(e,t,n,r){var o=e.getUTCDay(),i=(o-r.weekStartsOn+8)%7||7;switch(t){case"e":return String(i);case"ee":return eO(i,2);case"eo":return n.ordinalNumber(i,{unit:"day"});case"eee":return n.day(o,{width:"abbreviated",context:"formatting"});case"eeeee":return n.day(o,{width:"narrow",context:"formatting"});case"eeeeee":return n.day(o,{width:"short",context:"formatting"});default:return n.day(o,{width:"wide",context:"formatting"})}},c:function(e,t,n,r){var o=e.getUTCDay(),i=(o-r.weekStartsOn+8)%7||7;switch(t){case"c":return String(i);case"cc":return eO(i,t.length);case"co":return n.ordinalNumber(i,{unit:"day"});case"ccc":return n.day(o,{width:"abbreviated",context:"standalone"});case"ccccc":return n.day(o,{width:"narrow",context:"standalone"});case"cccccc":return n.day(o,{width:"short",context:"standalone"});default:return n.day(o,{width:"wide",context:"standalone"})}},i:function(e,t,n){var r=e.getUTCDay(),o=0===r?7:r;switch(t){case"i":return String(o);case"ii":return eO(o,t.length);case"io":return n.ordinalNumber(o,{unit:"day"});case"iii":return n.day(r,{width:"abbreviated",context:"formatting"});case"iiiii":return n.day(r,{width:"narrow",context:"formatting"});case"iiiiii":return n.day(r,{width:"short",context:"formatting"});default:return n.day(r,{width:"wide",context:"formatting"})}},a:function(e,t,n){var r=e.getUTCHours()/12>=1?"pm":"am";switch(t){case"a":case"aa":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"});case"aaa":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"}).toLowerCase();case"aaaaa":return n.dayPeriod(r,{width:"narrow",context:"formatting"});default:return n.dayPeriod(r,{width:"wide",context:"formatting"})}},b:function(e,t,n){var r,o=e.getUTCHours();switch(r=12===o?eP.noon:0===o?eP.midnight:o/12>=1?"pm":"am",t){case"b":case"bb":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"});case"bbb":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"}).toLowerCase();case"bbbbb":return n.dayPeriod(r,{width:"narrow",context:"formatting"});default:return n.dayPeriod(r,{width:"wide",context:"formatting"})}},B:function(e,t,n){var r,o=e.getUTCHours();switch(r=o>=17?eP.evening:o>=12?eP.afternoon:o>=4?eP.morning:eP.night,t){case"B":case"BB":case"BBB":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"});case"BBBBB":return n.dayPeriod(r,{width:"narrow",context:"formatting"});default:return n.dayPeriod(r,{width:"wide",context:"formatting"})}},h:function(e,t,n){if("ho"===t){var r=e.getUTCHours()%12;return 0===r&&(r=12),n.ordinalNumber(r,{unit:"hour"})}return ej.h(e,t)},H:function(e,t,n){return"Ho"===t?n.ordinalNumber(e.getUTCHours(),{unit:"hour"}):ej.H(e,t)},K:function(e,t,n){var r=e.getUTCHours()%12;return"Ko"===t?n.ordinalNumber(r,{unit:"hour"}):eO(r,t.length)},k:function(e,t,n){var r=e.getUTCHours();return(0===r&&(r=24),"ko"===t)?n.ordinalNumber(r,{unit:"hour"}):eO(r,t.length)},m:function(e,t,n){return"mo"===t?n.ordinalNumber(e.getUTCMinutes(),{unit:"minute"}):ej.m(e,t)},s:function(e,t,n){return"so"===t?n.ordinalNumber(e.getUTCSeconds(),{unit:"second"}):ej.s(e,t)},S:function(e,t){return ej.S(e,t)},X:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();if(0===o)return"Z";switch(t){case"X":return eN(o);case"XXXX":case"XX":return eI(o);default:return eI(o,":")}},x:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();switch(t){case"x":return eN(o);case"xxxx":case"xx":return eI(o);default:return eI(o,":")}},O:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();switch(t){case"O":case"OO":case"OOO":return"GMT"+eM(o,":");default:return"GMT"+eI(o,":")}},z:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();switch(t){case"z":case"zz":case"zzz":return"GMT"+eM(o,":");default:return"GMT"+eI(o,":")}},t:function(e,t,n,r){return eO(Math.floor((r._originalDate||e).getTime()/1e3),t.length)},T:function(e,t,n,r){return eO((r._originalDate||e).getTime(),t.length)}},eT=function(e,t){switch(e){case"P":return t.date({width:"short"});case"PP":return t.date({width:"medium"});case"PPP":return t.date({width:"long"});default:return t.date({width:"full"})}},eA=function(e,t){switch(e){case"p":return t.time({width:"short"});case"pp":return t.time({width:"medium"});case"ppp":return t.time({width:"long"});default:return t.time({width:"full"})}},e_={p:eA,P:function(e,t){var n,r=e.match(/(P+)(p+)?/)||[],o=r[1],i=r[2];if(!i)return eT(e,t);switch(o){case"P":n=t.dateTime({width:"short"});break;case"PP":n=t.dateTime({width:"medium"});break;case"PPP":n=t.dateTime({width:"long"});break;default:n=t.dateTime({width:"full"})}return n.replace("{{date}}",eT(o,t)).replace("{{time}}",eA(i,t))}};function eD(e){var t=new Date(Date.UTC(e.getFullYear(),e.getMonth(),e.getDate(),e.getHours(),e.getMinutes(),e.getSeconds(),e.getMilliseconds()));return t.setUTCFullYear(e.getFullYear()),e.getTime()-t.getTime()}var eZ=["D","DD"],eL=["YY","YYYY"];function ez(e,t,n){if("YYYY"===e)throw RangeError("Use `yyyy` instead of `YYYY` (in `".concat(t,"`) for formatting years to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("YY"===e)throw RangeError("Use `yy` instead of `YY` (in `".concat(t,"`) for formatting years to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("D"===e)throw RangeError("Use `d` instead of `D` (in `".concat(t,"`) for formatting days of the month to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("DD"===e)throw RangeError("Use `dd` instead of `DD` (in `".concat(t,"`) for formatting days of the month to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"))}var eB={lessThanXSeconds:{one:"less than a second",other:"less than {{count}} seconds"},xSeconds:{one:"1 second",other:"{{count}} seconds"},halfAMinute:"half a minute",lessThanXMinutes:{one:"less than a minute",other:"less than {{count}} minutes"},xMinutes:{one:"1 minute",other:"{{count}} minutes"},aboutXHours:{one:"about 1 hour",other:"about {{count}} hours"},xHours:{one:"1 hour",other:"{{count}} hours"},xDays:{one:"1 day",other:"{{count}} days"},aboutXWeeks:{one:"about 1 week",other:"about {{count}} weeks"},xWeeks:{one:"1 week",other:"{{count}} weeks"},aboutXMonths:{one:"about 1 month",other:"about {{count}} months"},xMonths:{one:"1 month",other:"{{count}} months"},aboutXYears:{one:"about 1 year",other:"about {{count}} years"},xYears:{one:"1 year",other:"{{count}} years"},overXYears:{one:"over 1 year",other:"over {{count}} years"},almostXYears:{one:"almost 1 year",other:"almost {{count}} years"}};function eF(e){return function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=t.width?String(t.width):e.defaultWidth;return e.formats[n]||e.formats[e.defaultWidth]}}var eH={date:eF({formats:{full:"EEEE, MMMM do, y",long:"MMMM do, y",medium:"MMM d, y",short:"MM/dd/yyyy"},defaultWidth:"full"}),time:eF({formats:{full:"h:mm:ss a zzzz",long:"h:mm:ss a z",medium:"h:mm:ss a",short:"h:mm a"},defaultWidth:"full"}),dateTime:eF({formats:{full:"{{date}} 'at' {{time}}",long:"{{date}} 'at' {{time}}",medium:"{{date}}, {{time}}",short:"{{date}}, {{time}}"},defaultWidth:"full"})},eq={lastWeek:"'last' eeee 'at' p",yesterday:"'yesterday at' p",today:"'today at' p",tomorrow:"'tomorrow at' p",nextWeek:"eeee 'at' p",other:"P"};function eW(e){return function(t,n){var r;if("formatting"===(null!=n&&n.context?String(n.context):"standalone")&&e.formattingValues){var o=e.defaultFormattingWidth||e.defaultWidth,i=null!=n&&n.width?String(n.width):o;r=e.formattingValues[i]||e.formattingValues[o]}else{var a=e.defaultWidth,l=null!=n&&n.width?String(n.width):e.defaultWidth;r=e.values[l]||e.values[a]}return r[e.argumentCallback?e.argumentCallback(t):t]}}function eK(e){return function(t){var n,r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},o=r.width,i=o&&e.matchPatterns[o]||e.matchPatterns[e.defaultMatchWidth],a=t.match(i);if(!a)return null;var l=a[0],c=o&&e.parsePatterns[o]||e.parsePatterns[e.defaultParseWidth],s=Array.isArray(c)?function(e,t){for(var n=0;n0?"in "+r:r+" ago":r},formatLong:eH,formatRelative:function(e,t,n,r){return eq[e]},localize:{ordinalNumber:function(e,t){var n=Number(e),r=n%100;if(r>20||r<10)switch(r%10){case 1:return n+"st";case 2:return n+"nd";case 3:return n+"rd"}return n+"th"},era:eW({values:{narrow:["B","A"],abbreviated:["BC","AD"],wide:["Before Christ","Anno Domini"]},defaultWidth:"wide"}),quarter:eW({values:{narrow:["1","2","3","4"],abbreviated:["Q1","Q2","Q3","Q4"],wide:["1st quarter","2nd quarter","3rd quarter","4th quarter"]},defaultWidth:"wide",argumentCallback:function(e){return e-1}}),month:eW({values:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],abbreviated:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],wide:["January","February","March","April","May","June","July","August","September","October","November","December"]},defaultWidth:"wide"}),day:eW({values:{narrow:["S","M","T","W","T","F","S"],short:["Su","Mo","Tu","We","Th","Fr","Sa"],abbreviated:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],wide:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},defaultWidth:"wide"}),dayPeriod:eW({values:{narrow:{am:"a",pm:"p",midnight:"mi",noon:"n",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"},abbreviated:{am:"AM",pm:"PM",midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"},wide:{am:"a.m.",pm:"p.m.",midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"}},defaultWidth:"wide",formattingValues:{narrow:{am:"a",pm:"p",midnight:"mi",noon:"n",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"},abbreviated:{am:"AM",pm:"PM",midnight:"midnight",noon:"noon",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"},wide:{am:"a.m.",pm:"p.m.",midnight:"midnight",noon:"noon",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"}},defaultFormattingWidth:"wide"})},match:{ordinalNumber:(a={matchPattern:/^(\d+)(th|st|nd|rd)?/i,parsePattern:/\d+/i,valueCallback:function(e){return parseInt(e,10)}},function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=e.match(a.matchPattern);if(!n)return null;var r=n[0],o=e.match(a.parsePattern);if(!o)return null;var i=a.valueCallback?a.valueCallback(o[0]):o[0];return{value:i=t.valueCallback?t.valueCallback(i):i,rest:e.slice(r.length)}}),era:eK({matchPatterns:{narrow:/^(b|a)/i,abbreviated:/^(b\.?\s?c\.?|b\.?\s?c\.?\s?e\.?|a\.?\s?d\.?|c\.?\s?e\.?)/i,wide:/^(before christ|before common era|anno domini|common era)/i},defaultMatchWidth:"wide",parsePatterns:{any:[/^b/i,/^(a|c)/i]},defaultParseWidth:"any"}),quarter:eK({matchPatterns:{narrow:/^[1234]/i,abbreviated:/^q[1234]/i,wide:/^[1234](th|st|nd|rd)? quarter/i},defaultMatchWidth:"wide",parsePatterns:{any:[/1/i,/2/i,/3/i,/4/i]},defaultParseWidth:"any",valueCallback:function(e){return e+1}}),month:eK({matchPatterns:{narrow:/^[jfmasond]/i,abbreviated:/^(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)/i,wide:/^(january|february|march|april|may|june|july|august|september|october|november|december)/i},defaultMatchWidth:"wide",parsePatterns:{narrow:[/^j/i,/^f/i,/^m/i,/^a/i,/^m/i,/^j/i,/^j/i,/^a/i,/^s/i,/^o/i,/^n/i,/^d/i],any:[/^ja/i,/^f/i,/^mar/i,/^ap/i,/^may/i,/^jun/i,/^jul/i,/^au/i,/^s/i,/^o/i,/^n/i,/^d/i]},defaultParseWidth:"any"}),day:eK({matchPatterns:{narrow:/^[smtwf]/i,short:/^(su|mo|tu|we|th|fr|sa)/i,abbreviated:/^(sun|mon|tue|wed|thu|fri|sat)/i,wide:/^(sunday|monday|tuesday|wednesday|thursday|friday|saturday)/i},defaultMatchWidth:"wide",parsePatterns:{narrow:[/^s/i,/^m/i,/^t/i,/^w/i,/^t/i,/^f/i,/^s/i],any:[/^su/i,/^m/i,/^tu/i,/^w/i,/^th/i,/^f/i,/^sa/i]},defaultParseWidth:"any"}),dayPeriod:eK({matchPatterns:{narrow:/^(a|p|mi|n|(in the|at) (morning|afternoon|evening|night))/i,any:/^([ap]\.?\s?m\.?|midnight|noon|(in the|at) (morning|afternoon|evening|night))/i},defaultMatchWidth:"any",parsePatterns:{any:{am:/^a/i,pm:/^p/i,midnight:/^mi/i,noon:/^no/i,morning:/morning/i,afternoon:/afternoon/i,evening:/evening/i,night:/night/i}},defaultParseWidth:"any"})},options:{weekStartsOn:0,firstWeekContainsDate:1}},eV=/[yYQqMLwIdDecihHKkms]o|(\w)\1*|''|'(''|[^'])+('|$)|./g,eG=/P+p+|P+|p+|''|'(''|[^'])+('|$)|./g,eX=/^'([^]*?)'?$/,e$=/''/g,eY=/[a-zA-Z]/;function eQ(e,t,n){(0,ei.Z)(2,arguments);var r,o,i,a,l,c,s,u,d,f,p,h,m,g,v,y,b,x,w=String(t),S=null!==(r=null!==(o=null==n?void 0:n.locale)&&void 0!==o?o:ek.locale)&&void 0!==r?r:eU,k=(0,em.Z)(null!==(i=null!==(a=null!==(l=null!==(c=null==n?void 0:n.firstWeekContainsDate)&&void 0!==c?c:null==n?void 0:null===(s=n.locale)||void 0===s?void 0:null===(u=s.options)||void 0===u?void 0:u.firstWeekContainsDate)&&void 0!==l?l:ek.firstWeekContainsDate)&&void 0!==a?a:null===(d=ek.locale)||void 0===d?void 0:null===(f=d.options)||void 0===f?void 0:f.firstWeekContainsDate)&&void 0!==i?i:1);if(!(k>=1&&k<=7))throw RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var E=(0,em.Z)(null!==(p=null!==(h=null!==(m=null!==(g=null==n?void 0:n.weekStartsOn)&&void 0!==g?g:null==n?void 0:null===(v=n.locale)||void 0===v?void 0:null===(y=v.options)||void 0===y?void 0:y.weekStartsOn)&&void 0!==m?m:ek.weekStartsOn)&&void 0!==h?h:null===(b=ek.locale)||void 0===b?void 0:null===(x=b.options)||void 0===x?void 0:x.weekStartsOn)&&void 0!==p?p:0);if(!(E>=0&&E<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");if(!S.localize)throw RangeError("locale must contain localize property");if(!S.formatLong)throw RangeError("locale must contain formatLong property");var C=(0,eo.Z)(e);if(!function(e){return(0,ei.Z)(1,arguments),(!!ex(e)||"number"==typeof e)&&!isNaN(Number((0,eo.Z)(e)))}(C))throw RangeError("Invalid time value");var O=eD(C),j=function(e,t){return(0,ei.Z)(2,arguments),function(e,t){return(0,ei.Z)(2,arguments),new Date((0,eo.Z)(e).getTime()+(0,em.Z)(t))}(e,-(0,em.Z)(t))}(C,O),P={firstWeekContainsDate:k,weekStartsOn:E,locale:S,_originalDate:C};return w.match(eG).map(function(e){var t=e[0];return"p"===t||"P"===t?(0,e_[t])(e,S.formatLong):e}).join("").match(eV).map(function(r){if("''"===r)return"'";var o,i=r[0];if("'"===i)return(o=r.match(eX))?o[1].replace(e$,"'"):r;var a=eR[i];if(a)return null!=n&&n.useAdditionalWeekYearTokens||-1===eL.indexOf(r)||ez(r,t,String(e)),null!=n&&n.useAdditionalDayOfYearTokens||-1===eZ.indexOf(r)||ez(r,t,String(e)),a(j,r,S.localize,P);if(i.match(eY))throw RangeError("Format string contains an unescaped latin alphabet character `"+i+"`");return r}).join("")}var eJ=n(1153);let e0=(0,eJ.fn)("DateRangePicker"),e1=(e,t,n,r)=>{var o;if(n&&(e=null===(o=r.get(n))||void 0===o?void 0:o.from),e)return ea(e&&!t?e:ef([e,t]))},e2=(e,t,n,r)=>{var o,i;if(n&&(e=ea(null!==(i=null===(o=r.get(n))||void 0===o?void 0:o.to)&&void 0!==i?i:el())),e)return ea(e&&!t?e:ep([e,t]))},e6=[{value:"tdy",text:"Today",from:el()},{value:"w",text:"Last 7 days",from:ey(el(),{days:7})},{value:"t",text:"Last 30 days",from:ey(el(),{days:30})},{value:"m",text:"Month to Date",from:ec(el())},{value:"y",text:"Year to Date",from:eb(el())}],e3=(e,t,n,r)=>{let o=(null==n?void 0:n.code)||"en-US";if(!e&&!t)return"";if(e&&!t)return r?eQ(e,r):e.toLocaleDateString(o,{year:"numeric",month:"short",day:"numeric"});if(e&&t){if(function(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return n.getTime()===r.getTime()}(e,t))return r?eQ(e,r):e.toLocaleDateString(o,{year:"numeric",month:"short",day:"numeric"});if(e.getMonth()===t.getMonth()&&e.getFullYear()===t.getFullYear())return r?"".concat(eQ(e,r)," - ").concat(eQ(t,r)):"".concat(e.toLocaleDateString(o,{month:"short",day:"numeric"})," - \n ").concat(t.getDate(),", ").concat(t.getFullYear());{if(r)return"".concat(eQ(e,r)," - ").concat(eQ(t,r));let n={year:"numeric",month:"short",day:"numeric"};return"".concat(e.toLocaleDateString(o,n)," - \n ").concat(t.toLocaleDateString(o,n))}}return""};function e4(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(23,59,59,999),t}function e5(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,em.Z)(t),o=n.getFullYear(),i=n.getDate(),a=new Date(0);a.setFullYear(o,r,15),a.setHours(0,0,0,0);var l=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getFullYear(),r=t.getMonth(),o=new Date(0);return o.setFullYear(n,r+1,0),o.setHours(0,0,0,0),o.getDate()}(a);return n.setMonth(r,Math.min(i,l)),n}function e8(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,em.Z)(t);return isNaN(n.getTime())?new Date(NaN):(n.setFullYear(r),n)}function e7(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return 12*(n.getFullYear()-r.getFullYear())+(n.getMonth()-r.getMonth())}function e9(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return n.getFullYear()===r.getFullYear()&&n.getMonth()===r.getMonth()}function te(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return n.getTime()=0&&u<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");var d=(0,eo.Z)(e),f=d.getDay();return d.setDate(d.getDate()-((fr.getTime()}function ti(e,t){(0,ei.Z)(2,arguments);var n=ea(e),r=ea(t);return Math.round((n.getTime()-eD(n)-(r.getTime()-eD(r)))/864e5)}function ta(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,eh.Z)(e,7*n)}function tl(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,ev.Z)(e,12*n)}function tc(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.weekStartsOn)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.weekStartsOn)&&void 0!==o?o:ek.weekStartsOn)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.weekStartsOn)&&void 0!==n?n:0);if(!(u>=0&&u<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");var d=(0,eo.Z)(e),f=d.getDay();return d.setDate(d.getDate()+((fe7(l,a)&&(a=(0,ev.Z)(l,-1*((void 0===s?1:s)-1))),c&&0>e7(a,c)&&(a=c),u=ec(a),f=t.month,h=(p=(0,d.useState)(u))[0],m=[void 0===f?h:f,p[1]])[0],v=m[1],[g,function(e){if(!t.disableNavigation){var n,r=ec(e);v(r),null===(n=t.onMonthChange)||void 0===n||n.call(t,r)}}]),x=b[0],w=b[1],S=function(e,t){for(var n=t.reverseMonths,r=t.numberOfMonths,o=ec(e),i=e7(ec((0,ev.Z)(o,r)),o),a=[],l=0;l=e7(i,n)))return(0,ev.Z)(i,-(r?void 0===o?1:o:1))}}(x,y),C=function(e){return S.some(function(t){return e9(e,t)})};return th.jsx(tM.Provider,{value:{currentMonth:x,displayMonths:S,goToMonth:w,goToDate:function(e,t){C(e)||(t&&te(e,t)?w((0,ev.Z)(e,1+-1*y.numberOfMonths)):w(e))},previousMonth:E,nextMonth:k,isDateDisplayed:C},children:e.children})}function tI(){var e=(0,d.useContext)(tM);if(!e)throw Error("useNavigation must be used within a NavigationProvider");return e}function tR(e){var t,n=tk(),r=n.classNames,o=n.styles,i=n.components,a=tI().goToMonth,l=function(t){a((0,ev.Z)(t,e.displayIndex?-e.displayIndex:0))},c=null!==(t=null==i?void 0:i.CaptionLabel)&&void 0!==t?t:tE,s=th.jsx(c,{id:e.id,displayMonth:e.displayMonth});return th.jsxs("div",{className:r.caption_dropdowns,style:o.caption_dropdowns,children:[th.jsx("div",{className:r.vhidden,children:s}),th.jsx(tj,{onChange:l,displayMonth:e.displayMonth}),th.jsx(tP,{onChange:l,displayMonth:e.displayMonth})]})}function tT(e){return th.jsx("svg",tu({width:"16px",height:"16px",viewBox:"0 0 120 120"},e,{children:th.jsx("path",{d:"M69.490332,3.34314575 C72.6145263,0.218951416 77.6798462,0.218951416 80.8040405,3.34314575 C83.8617626,6.40086786 83.9268205,11.3179931 80.9992143,14.4548388 L80.8040405,14.6568542 L35.461,60 L80.8040405,105.343146 C83.8617626,108.400868 83.9268205,113.317993 80.9992143,116.454839 L80.8040405,116.656854 C77.7463184,119.714576 72.8291931,119.779634 69.6923475,116.852028 L69.490332,116.656854 L18.490332,65.6568542 C15.4326099,62.5991321 15.367552,57.6820069 18.2951583,54.5451612 L18.490332,54.3431458 L69.490332,3.34314575 Z",fill:"currentColor",fillRule:"nonzero"})}))}function tA(e){return th.jsx("svg",tu({width:"16px",height:"16px",viewBox:"0 0 120 120"},e,{children:th.jsx("path",{d:"M49.8040405,3.34314575 C46.6798462,0.218951416 41.6145263,0.218951416 38.490332,3.34314575 C35.4326099,6.40086786 35.367552,11.3179931 38.2951583,14.4548388 L38.490332,14.6568542 L83.8333725,60 L38.490332,105.343146 C35.4326099,108.400868 35.367552,113.317993 38.2951583,116.454839 L38.490332,116.656854 C41.5480541,119.714576 46.4651794,119.779634 49.602025,116.852028 L49.8040405,116.656854 L100.804041,65.6568542 C103.861763,62.5991321 103.926821,57.6820069 100.999214,54.5451612 L100.804041,54.3431458 L49.8040405,3.34314575 Z",fill:"currentColor"})}))}var t_=(0,d.forwardRef)(function(e,t){var n=tk(),r=n.classNames,o=n.styles,i=[r.button_reset,r.button];e.className&&i.push(e.className);var a=i.join(" "),l=tu(tu({},o.button_reset),o.button);return e.style&&Object.assign(l,e.style),th.jsx("button",tu({},e,{ref:t,type:"button",className:a,style:l}))});function tD(e){var t,n,r=tk(),o=r.dir,i=r.locale,a=r.classNames,l=r.styles,c=r.labels,s=c.labelPrevious,u=c.labelNext,d=r.components;if(!e.nextMonth&&!e.previousMonth)return th.jsx(th.Fragment,{});var f=s(e.previousMonth,{locale:i}),p=[a.nav_button,a.nav_button_previous].join(" "),h=u(e.nextMonth,{locale:i}),m=[a.nav_button,a.nav_button_next].join(" "),g=null!==(t=null==d?void 0:d.IconRight)&&void 0!==t?t:tA,v=null!==(n=null==d?void 0:d.IconLeft)&&void 0!==n?n:tT;return th.jsxs("div",{className:a.nav,style:l.nav,children:[!e.hidePrevious&&th.jsx(t_,{name:"previous-month","aria-label":f,className:p,style:l.nav_button_previous,disabled:!e.previousMonth,onClick:e.onPreviousClick,children:"rtl"===o?th.jsx(g,{className:a.nav_icon,style:l.nav_icon}):th.jsx(v,{className:a.nav_icon,style:l.nav_icon})}),!e.hideNext&&th.jsx(t_,{name:"next-month","aria-label":h,className:m,style:l.nav_button_next,disabled:!e.nextMonth,onClick:e.onNextClick,children:"rtl"===o?th.jsx(v,{className:a.nav_icon,style:l.nav_icon}):th.jsx(g,{className:a.nav_icon,style:l.nav_icon})})]})}function tZ(e){var t=tk().numberOfMonths,n=tI(),r=n.previousMonth,o=n.nextMonth,i=n.goToMonth,a=n.displayMonths,l=a.findIndex(function(t){return e9(e.displayMonth,t)}),c=0===l,s=l===a.length-1;return th.jsx(tD,{displayMonth:e.displayMonth,hideNext:t>1&&(c||!s),hidePrevious:t>1&&(s||!c),nextMonth:o,previousMonth:r,onPreviousClick:function(){r&&i(r)},onNextClick:function(){o&&i(o)}})}function tL(e){var t,n,r=tk(),o=r.classNames,i=r.disableNavigation,a=r.styles,l=r.captionLayout,c=r.components,s=null!==(t=null==c?void 0:c.CaptionLabel)&&void 0!==t?t:tE;return n=i?th.jsx(s,{id:e.id,displayMonth:e.displayMonth}):"dropdown"===l?th.jsx(tR,{displayMonth:e.displayMonth,id:e.id}):"dropdown-buttons"===l?th.jsxs(th.Fragment,{children:[th.jsx(tR,{displayMonth:e.displayMonth,displayIndex:e.displayIndex,id:e.id}),th.jsx(tZ,{displayMonth:e.displayMonth,displayIndex:e.displayIndex,id:e.id})]}):th.jsxs(th.Fragment,{children:[th.jsx(s,{id:e.id,displayMonth:e.displayMonth,displayIndex:e.displayIndex}),th.jsx(tZ,{displayMonth:e.displayMonth,id:e.id})]}),th.jsx("div",{className:o.caption,style:a.caption,children:n})}function tz(e){var t=tk(),n=t.footer,r=t.styles,o=t.classNames.tfoot;return n?th.jsx("tfoot",{className:o,style:r.tfoot,children:th.jsx("tr",{children:th.jsx("td",{colSpan:8,children:n})})}):th.jsx(th.Fragment,{})}function tB(){var e=tk(),t=e.classNames,n=e.styles,r=e.showWeekNumber,o=e.locale,i=e.weekStartsOn,a=e.ISOWeek,l=e.formatters.formatWeekdayName,c=e.labels.labelWeekday,s=function(e,t,n){for(var r=n?tn(new Date):tt(new Date,{locale:e,weekStartsOn:t}),o=[],i=0;i<7;i++){var a=(0,eh.Z)(r,i);o.push(a)}return o}(o,i,a);return th.jsxs("tr",{style:n.head_row,className:t.head_row,children:[r&&th.jsx("td",{style:n.head_cell,className:t.head_cell}),s.map(function(e,r){return th.jsx("th",{scope:"col",className:t.head_cell,style:n.head_cell,"aria-label":c(e,{locale:o}),children:l(e,{locale:o})},r)})]})}function tF(){var e,t=tk(),n=t.classNames,r=t.styles,o=t.components,i=null!==(e=null==o?void 0:o.HeadRow)&&void 0!==e?e:tB;return th.jsx("thead",{style:r.head,className:n.head,children:th.jsx(i,{})})}function tH(e){var t=tk(),n=t.locale,r=t.formatters.formatDay;return th.jsx(th.Fragment,{children:r(e.date,{locale:n})})}var tq=(0,d.createContext)(void 0);function tW(e){return tm(e.initialProps)?th.jsx(tK,{initialProps:e.initialProps,children:e.children}):th.jsx(tq.Provider,{value:{selected:void 0,modifiers:{disabled:[]}},children:e.children})}function tK(e){var t=e.initialProps,n=e.children,r=t.selected,o=t.min,i=t.max,a={disabled:[]};return r&&a.disabled.push(function(e){var t=i&&r.length>i-1,n=r.some(function(t){return tr(t,e)});return!!(t&&!n)}),th.jsx(tq.Provider,{value:{selected:r,onDayClick:function(e,n,a){if(null===(l=t.onDayClick)||void 0===l||l.call(t,e,n,a),(!n.selected||!o||(null==r?void 0:r.length)!==o)&&(n.selected||!i||(null==r?void 0:r.length)!==i)){var l,c,s=r?td([],r,!0):[];if(n.selected){var u=s.findIndex(function(t){return tr(e,t)});s.splice(u,1)}else s.push(e);null===(c=t.onSelect)||void 0===c||c.call(t,s,e,n,a)}},modifiers:a},children:n})}function tU(){var e=(0,d.useContext)(tq);if(!e)throw Error("useSelectMultiple must be used within a SelectMultipleProvider");return e}var tV=(0,d.createContext)(void 0);function tG(e){return tg(e.initialProps)?th.jsx(tX,{initialProps:e.initialProps,children:e.children}):th.jsx(tV.Provider,{value:{selected:void 0,modifiers:{range_start:[],range_end:[],range_middle:[],disabled:[]}},children:e.children})}function tX(e){var t=e.initialProps,n=e.children,r=t.selected,o=r||{},i=o.from,a=o.to,l=t.min,c=t.max,s={range_start:[],range_end:[],range_middle:[],disabled:[]};if(i?(s.range_start=[i],a?(s.range_end=[a],tr(i,a)||(s.range_middle=[{after:i,before:a}])):s.range_end=[i]):a&&(s.range_start=[a],s.range_end=[a]),l&&(i&&!a&&s.disabled.push({after:eg(i,l-1),before:(0,eh.Z)(i,l-1)}),i&&a&&s.disabled.push({after:i,before:(0,eh.Z)(i,l-1)}),!i&&a&&s.disabled.push({after:eg(a,l-1),before:(0,eh.Z)(a,l-1)})),c){if(i&&!a&&(s.disabled.push({before:(0,eh.Z)(i,-c+1)}),s.disabled.push({after:(0,eh.Z)(i,c-1)})),i&&a){var u=c-(ti(a,i)+1);s.disabled.push({before:eg(i,u)}),s.disabled.push({after:(0,eh.Z)(a,u)})}!i&&a&&(s.disabled.push({before:(0,eh.Z)(a,-c+1)}),s.disabled.push({after:(0,eh.Z)(a,c-1)}))}return th.jsx(tV.Provider,{value:{selected:r,onDayClick:function(e,n,o){null===(c=t.onDayClick)||void 0===c||c.call(t,e,n,o);var i,a,l,c,s,u=(a=(i=r||{}).from,l=i.to,a&&l?tr(l,e)&&tr(a,e)?void 0:tr(l,e)?{from:l,to:void 0}:tr(a,e)?void 0:to(a,e)?{from:e,to:l}:{from:a,to:e}:l?to(e,l)?{from:l,to:e}:{from:e,to:l}:a?te(e,a)?{from:e,to:a}:{from:a,to:e}:{from:e,to:void 0});null===(s=t.onSelect)||void 0===s||s.call(t,u,e,n,o)},modifiers:s},children:n})}function t$(){var e=(0,d.useContext)(tV);if(!e)throw Error("useSelectRange must be used within a SelectRangeProvider");return e}function tY(e){return Array.isArray(e)?td([],e,!0):void 0!==e?[e]:[]}(l=s||(s={})).Outside="outside",l.Disabled="disabled",l.Selected="selected",l.Hidden="hidden",l.Today="today",l.RangeStart="range_start",l.RangeEnd="range_end",l.RangeMiddle="range_middle";var tQ=s.Selected,tJ=s.Disabled,t0=s.Hidden,t1=s.Today,t2=s.RangeEnd,t6=s.RangeMiddle,t3=s.RangeStart,t4=s.Outside,t5=(0,d.createContext)(void 0);function t8(e){var t,n,r,o=tk(),i=tU(),a=t$(),l=((t={})[tQ]=tY(o.selected),t[tJ]=tY(o.disabled),t[t0]=tY(o.hidden),t[t1]=[o.today],t[t2]=[],t[t6]=[],t[t3]=[],t[t4]=[],o.fromDate&&t[tJ].push({before:o.fromDate}),o.toDate&&t[tJ].push({after:o.toDate}),tm(o)?t[tJ]=t[tJ].concat(i.modifiers[tJ]):tg(o)&&(t[tJ]=t[tJ].concat(a.modifiers[tJ]),t[t3]=a.modifiers[t3],t[t6]=a.modifiers[t6],t[t2]=a.modifiers[t2]),t),c=(n=o.modifiers,r={},Object.entries(n).forEach(function(e){var t=e[0],n=e[1];r[t]=tY(n)}),r),s=tu(tu({},l),c);return th.jsx(t5.Provider,{value:s,children:e.children})}function t7(){var e=(0,d.useContext)(t5);if(!e)throw Error("useModifiers must be used within a ModifiersProvider");return e}function t9(e,t,n){var r=Object.keys(t).reduce(function(n,r){return t[r].some(function(t){if("boolean"==typeof t)return t;if(ex(t))return tr(e,t);if(Array.isArray(t)&&t.every(ex))return t.includes(e);if(t&&"object"==typeof t&&"from"in t)return r=t.from,o=t.to,r&&o?(0>ti(o,r)&&(r=(n=[o,r])[0],o=n[1]),ti(e,r)>=0&&ti(o,e)>=0):o?tr(o,e):!!r&&tr(r,e);if(t&&"object"==typeof t&&"dayOfWeek"in t)return t.dayOfWeek.includes(e.getDay());if(t&&"object"==typeof t&&"before"in t&&"after"in t){var n,r,o,i=ti(t.before,e),a=ti(t.after,e),l=i>0,c=a<0;return to(t.before,t.after)?c&&l:l||c}return t&&"object"==typeof t&&"after"in t?ti(e,t.after)>0:t&&"object"==typeof t&&"before"in t?ti(t.before,e)>0:"function"==typeof t&&t(e)})&&n.push(r),n},[]),o={};return r.forEach(function(e){return o[e]=!0}),n&&!e9(e,n)&&(o.outside=!0),o}var ne=(0,d.createContext)(void 0);function nt(e){var t=tI(),n=t7(),r=(0,d.useState)(),o=r[0],i=r[1],a=(0,d.useState)(),l=a[0],c=a[1],s=function(e,t){for(var n,r,o=ec(e[0]),i=e4(e[e.length-1]),a=o;a<=i;){var l=t9(a,t);if(!(!l.disabled&&!l.hidden)){a=(0,eh.Z)(a,1);continue}if(l.selected)return a;l.today&&!r&&(r=a),n||(n=a),a=(0,eh.Z)(a,1)}return r||n}(t.displayMonths,n),u=(null!=o?o:l&&t.isDateDisplayed(l))?l:s,f=function(e){i(e)},p=tk(),h=function(e,r){if(o){var i=function e(t,n){var r=n.moveBy,o=n.direction,i=n.context,a=n.modifiers,l=n.retry,c=void 0===l?{count:0,lastFocused:t}:l,s=i.weekStartsOn,u=i.fromDate,d=i.toDate,f=i.locale,p=({day:eh.Z,week:ta,month:ev.Z,year:tl,startOfWeek:function(e){return i.ISOWeek?tn(e):tt(e,{locale:f,weekStartsOn:s})},endOfWeek:function(e){return i.ISOWeek?ts(e):tc(e,{locale:f,weekStartsOn:s})}})[r](t,"after"===o?1:-1);"before"===o&&u?p=ef([u,p]):"after"===o&&d&&(p=ep([d,p]));var h=!0;if(a){var m=t9(p,a);h=!m.disabled&&!m.hidden}return h?p:c.count>365?c.lastFocused:e(p,{moveBy:r,direction:o,context:i,modifiers:a,retry:tu(tu({},c),{count:c.count+1})})}(o,{moveBy:e,direction:r,context:p,modifiers:n});tr(o,i)||(t.goToDate(i,o),f(i))}};return th.jsx(ne.Provider,{value:{focusedDay:o,focusTarget:u,blur:function(){c(o),i(void 0)},focus:f,focusDayAfter:function(){return h("day","after")},focusDayBefore:function(){return h("day","before")},focusWeekAfter:function(){return h("week","after")},focusWeekBefore:function(){return h("week","before")},focusMonthBefore:function(){return h("month","before")},focusMonthAfter:function(){return h("month","after")},focusYearBefore:function(){return h("year","before")},focusYearAfter:function(){return h("year","after")},focusStartOfWeek:function(){return h("startOfWeek","before")},focusEndOfWeek:function(){return h("endOfWeek","after")}},children:e.children})}function nn(){var e=(0,d.useContext)(ne);if(!e)throw Error("useFocusContext must be used within a FocusProvider");return e}var nr=(0,d.createContext)(void 0);function no(e){return tv(e.initialProps)?th.jsx(ni,{initialProps:e.initialProps,children:e.children}):th.jsx(nr.Provider,{value:{selected:void 0},children:e.children})}function ni(e){var t=e.initialProps,n=e.children,r={selected:t.selected,onDayClick:function(e,n,r){var o,i,a;if(null===(o=t.onDayClick)||void 0===o||o.call(t,e,n,r),n.selected&&!t.required){null===(i=t.onSelect)||void 0===i||i.call(t,void 0,e,n,r);return}null===(a=t.onSelect)||void 0===a||a.call(t,e,e,n,r)}};return th.jsx(nr.Provider,{value:r,children:n})}function na(){var e=(0,d.useContext)(nr);if(!e)throw Error("useSelectSingle must be used within a SelectSingleProvider");return e}function nl(e){var t,n,r,o,i,a,l,c,u,f,p,h,m,g,v,y,b,x,w,S,k,E,C,O,j,P,M,N,I,R,T,A,_,D,Z,L,z,B,F,H,q,W,K=(0,d.useRef)(null),U=(t=e.date,n=e.displayMonth,a=tk(),l=nn(),c=t9(t,t7(),n),u=tk(),f=na(),p=tU(),h=t$(),g=(m=nn()).focusDayAfter,v=m.focusDayBefore,y=m.focusWeekAfter,b=m.focusWeekBefore,x=m.blur,w=m.focus,S=m.focusMonthBefore,k=m.focusMonthAfter,E=m.focusYearBefore,C=m.focusYearAfter,O=m.focusStartOfWeek,j=m.focusEndOfWeek,P={onClick:function(e){var n,r,o,i;tv(u)?null===(n=f.onDayClick)||void 0===n||n.call(f,t,c,e):tm(u)?null===(r=p.onDayClick)||void 0===r||r.call(p,t,c,e):tg(u)?null===(o=h.onDayClick)||void 0===o||o.call(h,t,c,e):null===(i=u.onDayClick)||void 0===i||i.call(u,t,c,e)},onFocus:function(e){var n;w(t),null===(n=u.onDayFocus)||void 0===n||n.call(u,t,c,e)},onBlur:function(e){var n;x(),null===(n=u.onDayBlur)||void 0===n||n.call(u,t,c,e)},onKeyDown:function(e){var n;switch(e.key){case"ArrowLeft":e.preventDefault(),e.stopPropagation(),"rtl"===u.dir?g():v();break;case"ArrowRight":e.preventDefault(),e.stopPropagation(),"rtl"===u.dir?v():g();break;case"ArrowDown":e.preventDefault(),e.stopPropagation(),y();break;case"ArrowUp":e.preventDefault(),e.stopPropagation(),b();break;case"PageUp":e.preventDefault(),e.stopPropagation(),e.shiftKey?E():S();break;case"PageDown":e.preventDefault(),e.stopPropagation(),e.shiftKey?C():k();break;case"Home":e.preventDefault(),e.stopPropagation(),O();break;case"End":e.preventDefault(),e.stopPropagation(),j()}null===(n=u.onDayKeyDown)||void 0===n||n.call(u,t,c,e)},onKeyUp:function(e){var n;null===(n=u.onDayKeyUp)||void 0===n||n.call(u,t,c,e)},onMouseEnter:function(e){var n;null===(n=u.onDayMouseEnter)||void 0===n||n.call(u,t,c,e)},onMouseLeave:function(e){var n;null===(n=u.onDayMouseLeave)||void 0===n||n.call(u,t,c,e)},onPointerEnter:function(e){var n;null===(n=u.onDayPointerEnter)||void 0===n||n.call(u,t,c,e)},onPointerLeave:function(e){var n;null===(n=u.onDayPointerLeave)||void 0===n||n.call(u,t,c,e)},onTouchCancel:function(e){var n;null===(n=u.onDayTouchCancel)||void 0===n||n.call(u,t,c,e)},onTouchEnd:function(e){var n;null===(n=u.onDayTouchEnd)||void 0===n||n.call(u,t,c,e)},onTouchMove:function(e){var n;null===(n=u.onDayTouchMove)||void 0===n||n.call(u,t,c,e)},onTouchStart:function(e){var n;null===(n=u.onDayTouchStart)||void 0===n||n.call(u,t,c,e)}},M=tk(),N=na(),I=tU(),R=t$(),T=tv(M)?N.selected:tm(M)?I.selected:tg(M)?R.selected:void 0,A=!!(a.onDayClick||"default"!==a.mode),(0,d.useEffect)(function(){var e;!c.outside&&l.focusedDay&&A&&tr(l.focusedDay,t)&&(null===(e=K.current)||void 0===e||e.focus())},[l.focusedDay,t,K,A,c.outside]),D=(_=[a.classNames.day],Object.keys(c).forEach(function(e){var t=a.modifiersClassNames[e];if(t)_.push(t);else if(Object.values(s).includes(e)){var n=a.classNames["day_".concat(e)];n&&_.push(n)}}),_).join(" "),Z=tu({},a.styles.day),Object.keys(c).forEach(function(e){var t;Z=tu(tu({},Z),null===(t=a.modifiersStyles)||void 0===t?void 0:t[e])}),L=Z,z=!!(c.outside&&!a.showOutsideDays||c.hidden),B=null!==(i=null===(o=a.components)||void 0===o?void 0:o.DayContent)&&void 0!==i?i:tH,F={style:L,className:D,children:th.jsx(B,{date:t,displayMonth:n,activeModifiers:c}),role:"gridcell"},H=l.focusTarget&&tr(l.focusTarget,t)&&!c.outside,q=l.focusedDay&&tr(l.focusedDay,t),W=tu(tu(tu({},F),((r={disabled:c.disabled,role:"gridcell"})["aria-selected"]=c.selected,r.tabIndex=q||H?0:-1,r)),P),{isButton:A,isHidden:z,activeModifiers:c,selectedDays:T,buttonProps:W,divProps:F});return U.isHidden?th.jsx("div",{role:"gridcell"}):U.isButton?th.jsx(t_,tu({name:"day",ref:K},U.buttonProps)):th.jsx("div",tu({},U.divProps))}function nc(e){var t=e.number,n=e.dates,r=tk(),o=r.onWeekNumberClick,i=r.styles,a=r.classNames,l=r.locale,c=r.labels.labelWeekNumber,s=(0,r.formatters.formatWeekNumber)(Number(t),{locale:l});if(!o)return th.jsx("span",{className:a.weeknumber,style:i.weeknumber,children:s});var u=c(Number(t),{locale:l});return th.jsx(t_,{name:"week-number","aria-label":u,className:a.weeknumber,style:i.weeknumber,onClick:function(e){o(t,n,e)},children:s})}function ns(e){var t,n,r,o=tk(),i=o.styles,a=o.classNames,l=o.showWeekNumber,c=o.components,s=null!==(t=null==c?void 0:c.Day)&&void 0!==t?t:nl,u=null!==(n=null==c?void 0:c.WeekNumber)&&void 0!==n?n:nc;return l&&(r=th.jsx("td",{className:a.cell,style:i.cell,children:th.jsx(u,{number:e.weekNumber,dates:e.dates})})),th.jsxs("tr",{className:a.row,style:i.row,children:[r,e.dates.map(function(t){return th.jsx("td",{className:a.cell,style:i.cell,role:"presentation",children:th.jsx(s,{displayMonth:e.displayMonth,date:t})},function(e){return(0,ei.Z)(1,arguments),Math.floor(function(e){return(0,ei.Z)(1,arguments),(0,eo.Z)(e).getTime()}(e)/1e3)}(t))})]})}function nu(e,t,n){for(var r=(null==n?void 0:n.ISOWeek)?ts(t):tc(t,n),o=(null==n?void 0:n.ISOWeek)?tn(e):tt(e,n),i=ti(r,o),a=[],l=0;l<=i;l++)a.push((0,eh.Z)(o,l));return a.reduce(function(e,t){var r=(null==n?void 0:n.ISOWeek)?function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return Math.round((tn(t).getTime()-(function(e){(0,ei.Z)(1,arguments);var t=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getFullYear(),r=new Date(0);r.setFullYear(n+1,0,4),r.setHours(0,0,0,0);var o=tn(r),i=new Date(0);i.setFullYear(n,0,4),i.setHours(0,0,0,0);var a=tn(i);return t.getTime()>=o.getTime()?n+1:t.getTime()>=a.getTime()?n:n-1}(e),n=new Date(0);return n.setFullYear(t,0,4),n.setHours(0,0,0,0),tn(n)})(t).getTime())/6048e5)+1}(t):function(e,t){(0,ei.Z)(1,arguments);var n=(0,eo.Z)(e);return Math.round((tt(n,t).getTime()-(function(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.firstWeekContainsDate)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.firstWeekContainsDate)&&void 0!==o?o:ek.firstWeekContainsDate)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.firstWeekContainsDate)&&void 0!==n?n:1),d=function(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,eo.Z)(e),d=u.getFullYear(),f=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.firstWeekContainsDate)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.firstWeekContainsDate)&&void 0!==o?o:ek.firstWeekContainsDate)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.firstWeekContainsDate)&&void 0!==n?n:1);if(!(f>=1&&f<=7))throw RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var p=new Date(0);p.setFullYear(d+1,0,f),p.setHours(0,0,0,0);var h=tt(p,t),m=new Date(0);m.setFullYear(d,0,f),m.setHours(0,0,0,0);var g=tt(m,t);return u.getTime()>=h.getTime()?d+1:u.getTime()>=g.getTime()?d:d-1}(e,t),f=new Date(0);return f.setFullYear(d,0,u),f.setHours(0,0,0,0),tt(f,t)})(n,t).getTime())/6048e5)+1}(t,n),o=e.find(function(e){return e.weekNumber===r});return o?o.dates.push(t):e.push({weekNumber:r,dates:[t]}),e},[])}function nd(e){var t,n,r,o=tk(),i=o.locale,a=o.classNames,l=o.styles,c=o.hideHead,s=o.fixedWeeks,u=o.components,d=o.weekStartsOn,f=o.firstWeekContainsDate,p=o.ISOWeek,h=function(e,t){var n=nu(ec(e),e4(e),t);if(null==t?void 0:t.useFixedWeeks){var r=function(e,t){return(0,ei.Z)(1,arguments),function(e,t,n){(0,ei.Z)(2,arguments);var r=tt(e,n),o=tt(t,n);return Math.round((r.getTime()-eD(r)-(o.getTime()-eD(o)))/6048e5)}(function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(0,0,0,0),t}(e),ec(e),t)+1}(e,t);if(r<6){var o=n[n.length-1],i=o.dates[o.dates.length-1],a=ta(i,6-r),l=nu(ta(i,1),a,t);n.push.apply(n,l)}}return n}(e.displayMonth,{useFixedWeeks:!!s,ISOWeek:p,locale:i,weekStartsOn:d,firstWeekContainsDate:f}),m=null!==(t=null==u?void 0:u.Head)&&void 0!==t?t:tF,g=null!==(n=null==u?void 0:u.Row)&&void 0!==n?n:ns,v=null!==(r=null==u?void 0:u.Footer)&&void 0!==r?r:tz;return th.jsxs("table",{id:e.id,className:a.table,style:l.table,role:"grid","aria-labelledby":e["aria-labelledby"],children:[!c&&th.jsx(m,{}),th.jsx("tbody",{className:a.tbody,style:l.tbody,children:h.map(function(t){return th.jsx(g,{displayMonth:e.displayMonth,dates:t.dates,weekNumber:t.weekNumber},t.weekNumber)})}),th.jsx(v,{displayMonth:e.displayMonth})]})}var nf="undefined"!=typeof window&&window.document&&window.document.createElement?d.useLayoutEffect:d.useEffect,np=!1,nh=0;function nm(){return"react-day-picker-".concat(++nh)}function ng(e){var t,n,r,o,i,a,l,c,s=tk(),u=s.dir,f=s.classNames,p=s.styles,h=s.components,m=tI().displayMonths,g=(r=null!=(t=s.id?"".concat(s.id,"-").concat(e.displayIndex):void 0)?t:np?nm():null,i=(o=(0,d.useState)(r))[0],a=o[1],nf(function(){null===i&&a(nm())},[]),(0,d.useEffect)(function(){!1===np&&(np=!0)},[]),null!==(n=null!=t?t:i)&&void 0!==n?n:void 0),v=s.id?"".concat(s.id,"-grid-").concat(e.displayIndex):void 0,y=[f.month],b=p.month,x=0===e.displayIndex,w=e.displayIndex===m.length-1,S=!x&&!w;"rtl"===u&&(w=(l=[x,w])[0],x=l[1]),x&&(y.push(f.caption_start),b=tu(tu({},b),p.caption_start)),w&&(y.push(f.caption_end),b=tu(tu({},b),p.caption_end)),S&&(y.push(f.caption_between),b=tu(tu({},b),p.caption_between));var k=null!==(c=null==h?void 0:h.Caption)&&void 0!==c?c:tL;return th.jsxs("div",{className:y.join(" "),style:b,children:[th.jsx(k,{id:g,displayMonth:e.displayMonth,displayIndex:e.displayIndex}),th.jsx(nd,{id:v,"aria-labelledby":g,displayMonth:e.displayMonth})]},e.displayIndex)}function nv(e){var t=tk(),n=t.classNames,r=t.styles;return th.jsx("div",{className:n.months,style:r.months,children:e.children})}function ny(e){var t,n,r=e.initialProps,o=tk(),i=nn(),a=tI(),l=(0,d.useState)(!1),c=l[0],s=l[1];(0,d.useEffect)(function(){o.initialFocus&&i.focusTarget&&(c||(i.focus(i.focusTarget),s(!0)))},[o.initialFocus,c,i.focus,i.focusTarget,i]);var u=[o.classNames.root,o.className];o.numberOfMonths>1&&u.push(o.classNames.multiple_months),o.showWeekNumber&&u.push(o.classNames.with_weeknumber);var f=tu(tu({},o.styles.root),o.style),p=Object.keys(r).filter(function(e){return e.startsWith("data-")}).reduce(function(e,t){var n;return tu(tu({},e),((n={})[t]=r[t],n))},{}),h=null!==(n=null===(t=r.components)||void 0===t?void 0:t.Months)&&void 0!==n?n:nv;return th.jsx("div",tu({className:u.join(" "),style:f,dir:o.dir,id:o.id,nonce:r.nonce,title:r.title,lang:r.lang},p,{children:th.jsx(h,{children:a.displayMonths.map(function(e,t){return th.jsx(ng,{displayIndex:t,displayMonth:e},t)})})}))}function nb(e){var t=e.children,n=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n}(e,["children"]);return th.jsx(tS,{initialProps:n,children:th.jsx(tN,{children:th.jsx(no,{initialProps:n,children:th.jsx(tW,{initialProps:n,children:th.jsx(tG,{initialProps:n,children:th.jsx(t8,{children:th.jsx(nt,{children:t})})})})})})})}function nx(e){return th.jsx(nb,tu({},e,{children:th.jsx(ny,{initialProps:e})}))}let nw=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M10.8284 12.0007L15.7782 16.9504L14.364 18.3646L8 12.0007L14.364 5.63672L15.7782 7.05093L10.8284 12.0007Z"}))},nS=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M13.1717 12.0007L8.22192 7.05093L9.63614 5.63672L16.0001 12.0007L9.63614 18.3646L8.22192 16.9504L13.1717 12.0007Z"}))},nk=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M4.83582 12L11.0429 18.2071L12.4571 16.7929L7.66424 12L12.4571 7.20712L11.0429 5.79291L4.83582 12ZM10.4857 12L16.6928 18.2071L18.107 16.7929L13.3141 12L18.107 7.20712L16.6928 5.79291L10.4857 12Z"}))},nE=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M19.1642 12L12.9571 5.79291L11.5429 7.20712L16.3358 12L11.5429 16.7929L12.9571 18.2071L19.1642 12ZM13.5143 12L7.30722 5.79291L5.89301 7.20712L10.6859 12L5.89301 16.7929L7.30722 18.2071L13.5143 12Z"}))};var nC=n(84264);n(41649);var nO=n(1526),nj=n(7084),nP=n(26898);let nM={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-1",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-1.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-1.5",fontSize:"text-lg"},xl:{paddingX:"px-3.5",paddingY:"py-1.5",fontSize:"text-xl"}},nN={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},nI={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},nR={[nj.wu.Increase]:{bgColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.text).textColor},[nj.wu.ModerateIncrease]:{bgColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.text).textColor},[nj.wu.Decrease]:{bgColor:(0,eJ.bM)(nj.fr.Rose,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Rose,nP.K.text).textColor},[nj.wu.ModerateDecrease]:{bgColor:(0,eJ.bM)(nj.fr.Rose,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Rose,nP.K.text).textColor},[nj.wu.Unchanged]:{bgColor:(0,eJ.bM)(nj.fr.Orange,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Orange,nP.K.text).textColor}},nT={[nj.wu.Increase]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M13.0001 7.82843V20H11.0001V7.82843L5.63614 13.1924L4.22192 11.7782L12.0001 4L19.7783 11.7782L18.3641 13.1924L13.0001 7.82843Z"}))},[nj.wu.ModerateIncrease]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M16.0037 9.41421L7.39712 18.0208L5.98291 16.6066L14.5895 8H7.00373V6H18.0037V17H16.0037V9.41421Z"}))},[nj.wu.Decrease]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M13.0001 16.1716L18.3641 10.8076L19.7783 12.2218L12.0001 20L4.22192 12.2218L5.63614 10.8076L11.0001 16.1716V4H13.0001V16.1716Z"}))},[nj.wu.ModerateDecrease]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M14.5895 16.0032L5.98291 7.39664L7.39712 5.98242L16.0037 14.589V7.00324H18.0037V18.0032H7.00373V16.0032H14.5895Z"}))},[nj.wu.Unchanged]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M16.1716 10.9999L10.8076 5.63589L12.2218 4.22168L20 11.9999L12.2218 19.778L10.8076 18.3638L16.1716 12.9999H4V10.9999H16.1716Z"}))}},nA=(0,eJ.fn)("BadgeDelta");d.forwardRef((e,t)=>{let{deltaType:n=nj.wu.Increase,isIncreasePositive:r=!0,size:o=nj.u8.SM,tooltip:i,children:a,className:l}=e,c=(0,u._T)(e,["deltaType","isIncreasePositive","size","tooltip","children","className"]),s=nT[n],f=(0,eJ.Fo)(n,r),p=a?nN:nM,{tooltipProps:h,getReferenceProps:m}=(0,nO.l)();return d.createElement("span",Object.assign({ref:(0,eJ.lq)([t,h.refs.setReference]),className:(0,es.q)(nA("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full bg-opacity-20 dark:bg-opacity-25",nR[f].bgColor,nR[f].textColor,p[o].paddingX,p[o].paddingY,p[o].fontSize,l)},m,c),d.createElement(nO.Z,Object.assign({text:i},h)),d.createElement(s,{className:(0,es.q)(nA("icon"),"shrink-0",a?(0,es.q)("-ml-1 mr-1.5"):nI[o].height,nI[o].width)}),a?d.createElement("p",{className:(0,es.q)(nA("text"),"text-sm whitespace-nowrap")},a):null)}).displayName="BadgeDelta";var n_=n(47323);let nD=e=>{var{onClick:t,icon:n}=e,r=(0,u._T)(e,["onClick","icon"]);return d.createElement("button",Object.assign({type:"button",className:(0,es.q)("flex items-center justify-center p-1 h-7 w-7 outline-none focus:ring-2 transition duration-100 border border-tremor-border dark:border-dark-tremor-border hover:bg-tremor-background-muted dark:hover:bg-dark-tremor-background-muted rounded-tremor-small focus:border-tremor-brand-subtle select-none dark:focus:border-dark-tremor-brand-subtle focus:ring-tremor-brand-muted dark:focus:ring-dark-tremor-brand-muted text-tremor-content-subtle dark:text-dark-tremor-content-subtle hover:text-tremor-content dark:hover:text-dark-tremor-content")},r),d.createElement(n_.Z,{onClick:t,icon:n,variant:"simple",color:"slate",size:"sm"}))};function nZ(e){var{mode:t,defaultMonth:n,selected:r,onSelect:o,locale:i,disabled:a,enableYearNavigation:l,classNames:c,weekStartsOn:s=0}=e,f=(0,u._T)(e,["mode","defaultMonth","selected","onSelect","locale","disabled","enableYearNavigation","classNames","weekStartsOn"]);return d.createElement(nx,Object.assign({showOutsideDays:!0,mode:t,defaultMonth:n,selected:r,onSelect:o,locale:i,disabled:a,weekStartsOn:s,classNames:Object.assign({months:"flex flex-col sm:flex-row space-y-4 sm:space-x-4 sm:space-y-0",month:"space-y-4",caption:"flex justify-center pt-2 relative items-center",caption_label:"text-tremor-default text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis font-medium",nav:"space-x-1 flex items-center",nav_button:"flex items-center justify-center p-1 h-7 w-7 outline-none focus:ring-2 transition duration-100 border border-tremor-border dark:border-dark-tremor-border hover:bg-tremor-background-muted dark:hover:bg-dark-tremor-background-muted rounded-tremor-small focus:border-tremor-brand-subtle dark:focus:border-dark-tremor-brand-subtle focus:ring-tremor-brand-muted dark:focus:ring-dark-tremor-brand-muted text-tremor-content-subtle dark:text-dark-tremor-content-subtle hover:text-tremor-content dark:hover:text-dark-tremor-content",nav_button_previous:"absolute left-1",nav_button_next:"absolute right-1",table:"w-full border-collapse space-y-1",head_row:"flex",head_cell:"w-9 font-normal text-center text-tremor-content-subtle dark:text-dark-tremor-content-subtle",row:"flex w-full mt-0.5",cell:"text-center p-0 relative focus-within:relative text-tremor-default text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis",day:"h-9 w-9 p-0 hover:bg-tremor-background-subtle dark:hover:bg-dark-tremor-background-subtle outline-tremor-brand dark:outline-dark-tremor-brand rounded-tremor-default",day_today:"font-bold",day_selected:"aria-selected:bg-tremor-background-emphasis aria-selected:text-tremor-content-inverted dark:aria-selected:bg-dark-tremor-background-emphasis dark:aria-selected:text-dark-tremor-content-inverted ",day_disabled:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle disabled:hover:bg-transparent",day_outside:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle"},c),components:{IconLeft:e=>{var t=(0,u._T)(e,[]);return d.createElement(nw,Object.assign({className:"h-4 w-4"},t))},IconRight:e=>{var t=(0,u._T)(e,[]);return d.createElement(nS,Object.assign({className:"h-4 w-4"},t))},Caption:e=>{var t=(0,u._T)(e,[]);let{goToMonth:n,nextMonth:r,previousMonth:o,currentMonth:a}=tI();return d.createElement("div",{className:"flex justify-between items-center"},d.createElement("div",{className:"flex items-center space-x-1"},l&&d.createElement(nD,{onClick:()=>a&&n(tl(a,-1)),icon:nk}),d.createElement(nD,{onClick:()=>o&&n(o),icon:nw})),d.createElement(nC.Z,{className:"text-tremor-default tabular-nums capitalize text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis font-medium"},eQ(t.displayMonth,"LLLL yyy",{locale:i})),d.createElement("div",{className:"flex items-center space-x-1"},d.createElement(nD,{onClick:()=>r&&n(r),icon:nS}),l&&d.createElement(nD,{onClick:()=>a&&n(tl(a,1)),icon:nE})))}}},f))}nZ.displayName="DateRangePicker",n(27281);var nL=n(57365),nz=n(44140);let nB=el(),nF=d.forwardRef((e,t)=>{var n,r;let{value:o,defaultValue:i,onValueChange:a,enableSelect:l=!0,minDate:c,maxDate:s,placeholder:f="Select range",selectPlaceholder:p="Select range",disabled:h=!1,locale:m=eU,enableClear:g=!0,displayFormat:v,children:y,className:b,enableYearNavigation:x=!1,weekStartsOn:w=0,disabledDates:S}=e,k=(0,u._T)(e,["value","defaultValue","onValueChange","enableSelect","minDate","maxDate","placeholder","selectPlaceholder","disabled","locale","enableClear","displayFormat","children","className","enableYearNavigation","weekStartsOn","disabledDates"]),[E,C]=(0,nz.Z)(i,o),[O,j]=(0,d.useState)(!1),[P,M]=(0,d.useState)(!1),N=(0,d.useMemo)(()=>{let e=[];return c&&e.push({before:c}),s&&e.push({after:s}),[...e,...null!=S?S:[]]},[c,s,S]),I=(0,d.useMemo)(()=>{let e=new Map;return y?d.Children.forEach(y,t=>{var n;e.set(t.props.value,{text:null!==(n=(0,eu.qg)(t))&&void 0!==n?n:t.props.value,from:t.props.from,to:t.props.to})}):e6.forEach(t=>{e.set(t.value,{text:t.text,from:t.from,to:nB})}),e},[y]),R=(0,d.useMemo)(()=>{if(y)return(0,eu.sl)(y);let e=new Map;return e6.forEach(t=>e.set(t.value,t.text)),e},[y]),T=(null==E?void 0:E.selectValue)||"",A=e1(null==E?void 0:E.from,c,T,I),_=e2(null==E?void 0:E.to,s,T,I),D=A||_?e3(A,_,m,v):f,Z=ec(null!==(r=null!==(n=null!=_?_:A)&&void 0!==n?n:s)&&void 0!==r?r:nB),L=g&&!h;return d.createElement("div",Object.assign({ref:t,className:(0,es.q)("w-full min-w-[10rem] relative flex justify-between text-tremor-default max-w-sm shadow-tremor-input dark:shadow-dark-tremor-input rounded-tremor-default",b)},k),d.createElement(J,{as:"div",className:(0,es.q)("w-full",l?"rounded-l-tremor-default":"rounded-tremor-default",O&&"ring-2 ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted z-10")},d.createElement("div",{className:"relative w-full"},d.createElement(J.Button,{onFocus:()=>j(!0),onBlur:()=>j(!1),disabled:h,className:(0,es.q)("w-full outline-none text-left whitespace-nowrap truncate focus:ring-2 transition duration-100 rounded-l-tremor-default flex flex-nowrap border pl-3 py-2","rounded-l-tremor-default border-tremor-border text-tremor-content-emphasis focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:text-dark-tremor-content-emphasis dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",l?"rounded-l-tremor-default":"rounded-tremor-default",L?"pr-8":"pr-4",(0,eu.um)((0,eu.Uh)(A||_),h))},d.createElement(en,{className:(0,es.q)(e0("calendarIcon"),"flex-none shrink-0 h-5 w-5 -ml-0.5 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle"),"aria-hidden":"true"}),d.createElement("p",{className:"truncate"},D)),L&&A?d.createElement("button",{type:"button",className:(0,es.q)("absolute outline-none inset-y-0 right-0 flex items-center transition duration-100 mr-4"),onClick:e=>{e.preventDefault(),null==a||a({}),C({})}},d.createElement(er.Z,{className:(0,es.q)(e0("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null),d.createElement(ee.u,{className:"absolute z-10 min-w-min left-0",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},d.createElement(J.Panel,{focus:!0,className:(0,es.q)("divide-y overflow-y-auto outline-none rounded-tremor-default p-3 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},d.createElement(nZ,Object.assign({mode:"range",showOutsideDays:!0,defaultMonth:Z,selected:{from:A,to:_},onSelect:e=>{null==a||a({from:null==e?void 0:e.from,to:null==e?void 0:e.to}),C({from:null==e?void 0:e.from,to:null==e?void 0:e.to})},locale:m,disabled:N,enableYearNavigation:x,classNames:{day_range_middle:(0,es.q)("!rounded-none aria-selected:!bg-tremor-background-subtle aria-selected:dark:!bg-dark-tremor-background-subtle aria-selected:!text-tremor-content aria-selected:dark:!bg-dark-tremor-background-subtle"),day_range_start:"rounded-r-none rounded-l-tremor-small aria-selected:text-tremor-brand-inverted dark:aria-selected:text-dark-tremor-brand-inverted",day_range_end:"rounded-l-none rounded-r-tremor-small aria-selected:text-tremor-brand-inverted dark:aria-selected:text-dark-tremor-brand-inverted"},weekStartsOn:w},e))))),l&&d.createElement(et.R,{as:"div",className:(0,es.q)("w-48 -ml-px rounded-r-tremor-default",P&&"ring-2 ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted z-10"),value:T,onChange:e=>{let{from:t,to:n}=I.get(e),r=null!=n?n:nB;null==a||a({from:t,to:r,selectValue:e}),C({from:t,to:r,selectValue:e})},disabled:h},e=>{var t;let{value:n}=e;return d.createElement(d.Fragment,null,d.createElement(et.R.Button,{onFocus:()=>M(!0),onBlur:()=>M(!1),className:(0,es.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-r-tremor-default transition duration-100 border px-4 py-2","border-tremor-border shadow-tremor-input text-tremor-content-emphasis focus:border-tremor-brand-subtle","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:text-dark-tremor-content-emphasis dark:focus:border-dark-tremor-brand-subtle",(0,eu.um)((0,eu.Uh)(n),h))},n&&null!==(t=R.get(n))&&void 0!==t?t:p),d.createElement(ee.u,{className:"absolute z-10 w-full inset-x-0 right-0",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},d.createElement(et.R.Options,{className:(0,es.q)("divide-y overflow-y-auto outline-none border my-1","shadow-tremor-dropdown bg-tremor-background border-tremor-border divide-tremor-border rounded-tremor-default","dark:shadow-dark-tremor-dropdown dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border")},null!=y?y:e6.map(e=>d.createElement(nL.Z,{key:e.value,value:e.value},e.text)))))}))});nF.displayName="DateRangePicker"},92414:function(e,t,n){"use strict";n.d(t,{Z:function(){return v}});var r=n(5853),o=n(2265);n(42698),n(64016),n(8710);var i=n(33232),a=n(44140),l=n(58747);let c=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M18.031 16.6168L22.3137 20.8995L20.8995 22.3137L16.6168 18.031C15.0769 19.263 13.124 20 11 20C6.032 20 2 15.968 2 11C2 6.032 6.032 2 11 2C15.968 2 20 6.032 20 11C20 13.124 19.263 15.0769 18.031 16.6168ZM16.0247 15.8748C17.2475 14.6146 18 12.8956 18 11C18 7.1325 14.8675 4 11 4C7.1325 4 4 7.1325 4 11C4 14.8675 7.1325 18 11 18C12.8956 18 14.6146 17.2475 15.8748 16.0247L16.0247 15.8748Z"}))};var s=n(4537),u=n(28517),d=n(33044);let f=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",width:"100%",height:"100%",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"},t),o.createElement("line",{x1:"18",y1:"6",x2:"6",y2:"18"}),o.createElement("line",{x1:"6",y1:"6",x2:"18",y2:"18"}))};var p=n(65954),h=n(1153),m=n(96398);let g=(0,h.fn)("MultiSelect"),v=o.forwardRef((e,t)=>{let{defaultValue:n,value:h,onValueChange:v,placeholder:y="Select...",placeholderSearch:b="Search",disabled:x=!1,icon:w,children:S,className:k}=e,E=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","placeholderSearch","disabled","icon","children","className"]),[C,O]=(0,a.Z)(n,h),{reactElementChildren:j,optionsAvailable:P}=(0,o.useMemo)(()=>{let e=o.Children.toArray(S).filter(o.isValidElement);return{reactElementChildren:e,optionsAvailable:(0,m.n0)("",e)}},[S]),[M,N]=(0,o.useState)(""),I=(null!=C?C:[]).length>0,R=(0,o.useMemo)(()=>M?(0,m.n0)(M,j):P,[M,j,P]),T=()=>{N("")};return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:C,value:C,onChange:e=>{null==v||v(e),O(e)},disabled:x,className:(0,p.q)("w-full min-w-[10rem] relative text-tremor-default",k)},E,{multiple:!0}),e=>{let{value:t}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,p.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-1.5","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",w?"pl-11 -ml-0.5":"pl-3",(0,m.um)(t.length>0,x))},w&&o.createElement("span",{className:(0,p.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(w,{className:(0,p.q)(g("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("div",{className:"h-6 flex items-center"},t.length>0?o.createElement("div",{className:"flex flex-nowrap overflow-x-scroll [&::-webkit-scrollbar]:hidden [scrollbar-width:none] gap-x-1 mr-5 -ml-1.5 relative"},P.filter(e=>t.includes(e.props.value)).map((e,n)=>{var r;return o.createElement("div",{key:n,className:(0,p.q)("max-w-[100px] lg:max-w-[200px] flex justify-center items-center pl-2 pr-1.5 py-1 font-medium","rounded-tremor-small","bg-tremor-background-muted dark:bg-dark-tremor-background-muted","bg-tremor-background-subtle dark:bg-dark-tremor-background-subtle","text-tremor-content-default dark:text-dark-tremor-content-default","text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis")},o.createElement("div",{className:"text-xs truncate "},null!==(r=e.props.children)&&void 0!==r?r:e.props.value),o.createElement("div",{onClick:n=>{n.preventDefault();let r=t.filter(t=>t!==e.props.value);null==v||v(r),O(r)}},o.createElement(f,{className:(0,p.q)(g("clearIconItem"),"cursor-pointer rounded-tremor-full w-3.5 h-3.5 ml-2","text-tremor-content-subtle hover:text-tremor-content","dark:text-dark-tremor-content-subtle dark:hover:text-tremor-content")})))})):o.createElement("span",null,y)),o.createElement("span",{className:(0,p.q)("absolute inset-y-0 right-0 flex items-center mr-2.5")},o.createElement(l.Z,{className:(0,p.q)(g("arrowDownIcon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),I&&!x?o.createElement("button",{type:"button",className:(0,p.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),O([]),null==v||v([])}},o.createElement(s.Z,{className:(0,p.q)(g("clearIconAllItems"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,p.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},o.createElement("div",{className:(0,p.q)("flex items-center w-full px-2.5","bg-tremor-background-muted","dark:bg-dark-tremor-background-muted")},o.createElement("span",null,o.createElement(c,{className:(0,p.q)("flex-none w-4 h-4 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("input",{name:"search",type:"input",autoComplete:"off",placeholder:b,className:(0,p.q)("w-full focus:outline-none focus:ring-none bg-transparent text-tremor-default py-2","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis"),onKeyDown:e=>{"Space"===e.code&&""!==e.target.value&&e.stopPropagation()},onChange:e=>N(e.target.value),value:M})),o.createElement(i.Z.Provider,Object.assign({},{onBlur:{handleResetSearch:T}},{value:{selectedValue:t}}),R))))})});v.displayName="MultiSelect"},46030:function(e,t,n){"use strict";n.d(t,{Z:function(){return u}});var r=n(5853);n(42698),n(64016),n(8710);var o=n(33232),i=n(2265),a=n(65954),l=n(1153),c=n(28517);let s=(0,l.fn)("MultiSelectItem"),u=i.forwardRef((e,t)=>{let{value:n,className:u,children:d}=e,f=(0,r._T)(e,["value","className","children"]),{selectedValue:p}=(0,i.useContext)(o.Z),h=(0,l.NZ)(n,p);return i.createElement(c.R.Option,Object.assign({className:(0,a.q)(s("root"),"flex justify-start items-center cursor-default text-tremor-default p-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",u),ref:t,key:n,value:n},f),i.createElement("input",{type:"checkbox",className:(0,a.q)(s("checkbox"),"flex-none focus:ring-none focus:outline-none cursor-pointer mr-2.5","accent-tremor-brand","dark:accent-dark-tremor-brand"),checked:h,readOnly:!0}),i.createElement("span",{className:"whitespace-nowrap truncate"},null!=d?d:n))});u.displayName="MultiSelectItem"},30150:function(e,t,n){"use strict";n.d(t,{Z:function(){return f}});var r=n(5853),o=n(2265);let i=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),o.createElement("path",{d:"M12 4v16m8-8H4"}))},a=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),o.createElement("path",{d:"M20 12H4"}))};var l=n(65954),c=n(1153),s=n(69262);let u="flex mx-auto text-tremor-content-subtle dark:text-dark-tremor-content-subtle",d="cursor-pointer hover:text-tremor-content dark:hover:text-dark-tremor-content",f=o.forwardRef((e,t)=>{let{onSubmit:n,enableStepper:f=!0,disabled:p,onValueChange:h,onChange:m}=e,g=(0,r._T)(e,["onSubmit","enableStepper","disabled","onValueChange","onChange"]),v=(0,o.useRef)(null),[y,b]=o.useState(!1),x=o.useCallback(()=>{b(!0)},[]),w=o.useCallback(()=>{b(!1)},[]),[S,k]=o.useState(!1),E=o.useCallback(()=>{k(!0)},[]),C=o.useCallback(()=>{k(!1)},[]);return o.createElement(s.Z,Object.assign({type:"number",ref:(0,c.lq)([v,t]),disabled:p,makeInputClassName:(0,c.fn)("NumberInput"),onKeyDown:e=>{var t;if("Enter"===e.key&&!e.ctrlKey&&!e.altKey&&!e.shiftKey){let e=null===(t=v.current)||void 0===t?void 0:t.value;null==n||n(parseFloat(null!=e?e:""))}"ArrowDown"===e.key&&x(),"ArrowUp"===e.key&&E()},onKeyUp:e=>{"ArrowDown"===e.key&&w(),"ArrowUp"===e.key&&C()},onChange:e=>{p||(null==h||h(parseFloat(e.target.value)),null==m||m(e))},stepper:f?o.createElement("div",{className:(0,l.q)("flex justify-center align-middle")},o.createElement("div",{tabIndex:-1,onClick:e=>e.preventDefault(),onMouseDown:e=>e.preventDefault(),onTouchStart:e=>{e.cancelable&&e.preventDefault()},onMouseUp:()=>{var e,t;p||(null===(e=v.current)||void 0===e||e.stepDown(),null===(t=v.current)||void 0===t||t.dispatchEvent(new Event("input",{bubbles:!0})))},className:(0,l.q)(!p&&d,u,"group py-[10px] px-2.5 border-l border-tremor-border dark:border-dark-tremor-border")},o.createElement(a,{"data-testid":"step-down",className:(y?"scale-95":"")+" h-4 w-4 duration-75 transition group-active:scale-95"})),o.createElement("div",{tabIndex:-1,onClick:e=>e.preventDefault(),onMouseDown:e=>e.preventDefault(),onTouchStart:e=>{e.cancelable&&e.preventDefault()},onMouseUp:()=>{var e,t;p||(null===(e=v.current)||void 0===e||e.stepUp(),null===(t=v.current)||void 0===t||t.dispatchEvent(new Event("input",{bubbles:!0})))},className:(0,l.q)(!p&&d,u,"group py-[10px] px-2.5 border-l border-tremor-border dark:border-dark-tremor-border")},o.createElement(i,{"data-testid":"step-up",className:(S?"scale-95":"")+" h-4 w-4 duration-75 transition group-active:scale-95"}))):null},g))});f.displayName="NumberInput"},27281:function(e,t,n){"use strict";n.d(t,{Z:function(){return h}});var r=n(5853),o=n(2265),i=n(58747),a=n(4537),l=n(65954),c=n(1153),s=n(96398),u=n(28517),d=n(33044),f=n(44140);let p=(0,c.fn)("Select"),h=o.forwardRef((e,t)=>{let{defaultValue:n,value:c,onValueChange:h,placeholder:m="Select...",disabled:g=!1,icon:v,enableClear:y=!0,children:b,className:x}=e,w=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","disabled","icon","enableClear","children","className"]),[S,k]=(0,f.Z)(n,c),E=(0,o.useMemo)(()=>{let e=o.Children.toArray(b).filter(o.isValidElement);return(0,s.sl)(e)},[b]);return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:S,value:S,onChange:e=>{null==h||h(e),k(e)},disabled:g,className:(0,l.q)("w-full min-w-[10rem] relative text-tremor-default",x)},w),e=>{var t;let{value:n}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,l.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-2","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",v?"pl-10":"pl-3",(0,s.um)((0,s.Uh)(n),g))},v&&o.createElement("span",{className:(0,l.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(v,{className:(0,l.q)(p("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("span",{className:"w-[90%] block truncate"},n&&null!==(t=E.get(n))&&void 0!==t?t:m),o.createElement("span",{className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-3")},o.createElement(i.Z,{className:(0,l.q)(p("arrowDownIcon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),y&&S?o.createElement("button",{type:"button",className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),k(""),null==h||h("")}},o.createElement(a.Z,{className:(0,l.q)(p("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,l.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},b)))})});h.displayName="Select"},57365:function(e,t,n){"use strict";n.d(t,{Z:function(){return c}});var r=n(5853),o=n(2265),i=n(28517),a=n(65954);let l=(0,n(1153).fn)("SelectItem"),c=o.forwardRef((e,t)=>{let{value:n,icon:c,className:s,children:u}=e,d=(0,r._T)(e,["value","icon","className","children"]);return o.createElement(i.R.Option,Object.assign({className:(0,a.q)(l("root"),"flex justify-start items-center cursor-default text-tremor-default px-2.5 py-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong ui-selected:bg-tremor-background-muted text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",s),ref:t,key:n,value:n},d),c&&o.createElement(c,{className:(0,a.q)(l("icon"),"flex-none w-5 h-5 mr-1.5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}),o.createElement("span",{className:"whitespace-nowrap truncate"},null!=u?u:n))});c.displayName="SelectItem"},92858:function(e,t,n){"use strict";n.d(t,{Z:function(){return N}});var r=n(5853),o=n(2265),i=n(62963),a=n(90945),l=n(13323),c=n(17684),s=n(80004),u=n(93689),d=n(38198),f=n(47634),p=n(56314),h=n(27847),m=n(64518);let g=(0,o.createContext)(null),v=Object.assign((0,h.yV)(function(e,t){let n=(0,c.M)(),{id:r="headlessui-description-".concat(n),...i}=e,a=function e(){let t=(0,o.useContext)(g);if(null===t){let t=Error("You used a component, but it is not inside a relevant parent.");throw Error.captureStackTrace&&Error.captureStackTrace(t,e),t}return t}(),l=(0,u.T)(t);(0,m.e)(()=>a.register(r),[r,a.register]);let s={ref:l,...a.props,id:r};return(0,h.sY)({ourProps:s,theirProps:i,slot:a.slot||{},defaultTag:"p",name:a.name||"Description"})}),{});var y=n(37388);let b=(0,o.createContext)(null),x=Object.assign((0,h.yV)(function(e,t){let n=(0,c.M)(),{id:r="headlessui-label-".concat(n),passive:i=!1,...a}=e,l=function e(){let t=(0,o.useContext)(b);if(null===t){let t=Error("You used a