diff --git a/.circleci/config.yml b/.circleci/config.yml index b2f6b7edce..7158087445 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -49,7 +49,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.54.0 + pip install openai==1.66.1 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -168,7 +168,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.54.0 + pip install openai==1.66.1 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -267,7 +267,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.54.0 + pip install openai==1.66.1 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -511,7 +511,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.54.0 + pip install openai==1.66.1 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -678,6 +678,48 @@ jobs: paths: - llm_translation_coverage.xml - llm_translation_coverage + llm_responses_api_testing: + docker: + - image: cimg/python:3.11 + auth: + username: ${DOCKERHUB_USERNAME} + password: ${DOCKERHUB_PASSWORD} + working_directory: ~/project + + steps: + - checkout + - run: + name: Install Dependencies + command: | + python -m pip install --upgrade pip + python -m pip install -r requirements.txt + pip install "pytest==7.3.1" + pip install "pytest-retry==1.6.3" + pip install "pytest-cov==5.0.0" + pip install "pytest-asyncio==0.21.1" + pip install "respx==0.21.1" + # Run pytest and generate JUnit XML report + - run: + name: Run tests + command: | + pwd + ls + python -m pytest -vv tests/llm_responses_api_testing --cov=litellm --cov-report=xml -x -s -v --junitxml=test-results/junit.xml --durations=5 + no_output_timeout: 120m + - run: + name: Rename the coverage files + command: | + mv coverage.xml llm_responses_api_coverage.xml + mv .coverage llm_responses_api_coverage + + # Store test results + - store_test_results: + path: test-results + - persist_to_workspace: + root: . + paths: + - llm_responses_api_coverage.xml + - llm_responses_api_coverage litellm_mapped_tests: docker: - image: cimg/python:3.11 @@ -1234,7 +1276,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.54.0 " + pip install "openai==1.66.1" - run: name: Install Grype command: | @@ -1309,7 +1351,7 @@ jobs: command: | pwd ls - python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/image_gen_tests --ignore=tests/pass_through_unit_tests + python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/llm_responses_api_testing --ignore=tests/image_gen_tests --ignore=tests/pass_through_unit_tests no_output_timeout: 120m # Store test results @@ -1370,7 +1412,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.54.0 " + pip install "openai==1.66.1" # Run pytest and generate JUnit XML report - run: name: Build Docker image @@ -1492,7 +1534,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.54.0 " + pip install "openai==1.66.1" - run: name: Build Docker image command: docker build -t my-app:latest -f ./docker/Dockerfile.database . @@ -1921,7 +1963,7 @@ jobs: pip install "pytest-asyncio==0.21.1" pip install "google-cloud-aiplatform==1.43.0" pip install aiohttp - pip install "openai==1.54.0 " + pip install "openai==1.66.1" pip install "assemblyai==0.37.0" python -m pip install --upgrade pip pip install "pydantic==2.7.1" @@ -1935,12 +1977,12 @@ jobs: pip install prisma pip install fastapi pip install jsonschema - pip install "httpx==0.24.1" + pip install "httpx==0.27.0" pip install "anyio==3.7.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" pip install "google-cloud-aiplatform==1.59.0" - pip install "anthropic==0.21.3" + pip install "anthropic==0.49.0" # Run pytest and generate JUnit XML report - run: name: Build Docker image @@ -2068,7 +2110,7 @@ jobs: python -m venv venv . venv/bin/activate pip install coverage - coverage combine llm_translation_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage pass_through_unit_tests_coverage batches_coverage litellm_proxy_security_tests_coverage + coverage combine llm_translation_coverage llm_responses_api_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage pass_through_unit_tests_coverage batches_coverage litellm_proxy_security_tests_coverage coverage xml - codecov/upload: file: ./coverage.xml @@ -2197,7 +2239,7 @@ jobs: pip install "pytest-retry==1.6.3" pip install "pytest-asyncio==0.21.1" pip install aiohttp - pip install "openai==1.54.0 " + pip install "openai==1.66.1" python -m pip install --upgrade pip pip install "pydantic==2.7.1" pip install "pytest==7.3.1" @@ -2429,6 +2471,12 @@ workflows: only: - main - /litellm_.*/ + - llm_responses_api_testing: + filters: + branches: + only: + - main + - /litellm_.*/ - litellm_mapped_tests: filters: branches: @@ -2468,6 +2516,7 @@ workflows: - upload-coverage: requires: - llm_translation_testing + - llm_responses_api_testing - litellm_mapped_tests - batches_testing - litellm_utils_testing @@ -2526,6 +2575,7 @@ workflows: - load_testing - test_bad_database_url - llm_translation_testing + - llm_responses_api_testing - litellm_mapped_tests - batches_testing - litellm_utils_testing diff --git a/.circleci/requirements.txt b/.circleci/requirements.txt index 12e83a40f2..e63fb9dd9a 100644 --- a/.circleci/requirements.txt +++ b/.circleci/requirements.txt @@ -1,5 +1,5 @@ # used by CI/CD testing -openai==1.54.0 +openai==1.66.1 python-dotenv tiktoken importlib_metadata diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 3615d030bf..d50aefa8bb 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -6,6 +6,16 @@ +## Pre-Submission checklist + +**Please complete all items before asking a LiteLLM maintainer to review your PR** + +- [ ] I have Added testing in the `tests/litellm/` directory, **Adding at least 1 test is a hard requirement** - [see details](https://docs.litellm.ai/docs/extras/contributing_code) +- [ ] I have added a screenshot of my new test passing locally +- [ ] My PR passes all unit tests on (`make test-unit`)[https://docs.litellm.ai/docs/extras/contributing_code] +- [ ] My PR's scope is as isolated as possible, it only solves 1 specific problem + + ## Type @@ -20,10 +30,4 @@ ## Changes - - -## [REQUIRED] Testing - Attach a screenshot of any new tests passing locally -If UI changes, send a screenshot/GIF of working UI fixes - - diff --git a/.github/workflows/helm_unit_test.yml b/.github/workflows/helm_unit_test.yml new file mode 100644 index 0000000000..c4b83af70a --- /dev/null +++ b/.github/workflows/helm_unit_test.yml @@ -0,0 +1,27 @@ +name: Helm unit test + +on: + pull_request: + push: + branches: + - main + +jobs: + unit-test: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Set up Helm 3.11.1 + uses: azure/setup-helm@v1 + with: + version: '3.11.1' + + - name: Install Helm Unit Test Plugin + run: | + helm plugin install https://github.com/helm-unittest/helm-unittest --version v0.4.4 + + - name: Run unit tests + run: + helm unittest -f 'tests/*.yaml' deploy/charts/litellm-helm \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..6bd3cb57d4 --- /dev/null +++ b/Makefile @@ -0,0 +1,21 @@ +# LiteLLM Makefile +# Simple Makefile for running tests and basic development tasks + +.PHONY: help test test-unit test-integration + +# Default target +help: + @echo "Available commands:" + @echo " make test - Run all tests" + @echo " make test-unit - Run unit tests" + @echo " make test-integration - Run integration tests" + +# Testing +test: + poetry run pytest tests/ + +test-unit: + poetry run pytest tests/litellm/ + +test-integration: + poetry run pytest tests/ -k "not litellm" \ No newline at end of file diff --git a/README.md b/README.md index 014df0ccdd..2d2f71e4d1 100644 --- a/README.md +++ b/README.md @@ -340,71 +340,7 @@ curl 'http://0.0.0.0:4000/key/generate' \ ## Contributing -To contribute: Clone the repo locally -> Make a change -> Submit a PR with the change. - -Here's how to modify the repo locally: - -Step 1: Clone the repo - -``` -git clone https://github.com/BerriAI/litellm.git -``` - -Step 2: Install dependencies: - -``` -pip install -r requirements.txt -``` - -Step 3: Test your change: - -a. Add a pytest test within `tests/litellm/` - -This folder follows the same directory structure as `litellm/`. - -If a corresponding test file does not exist, create one. - -b. Run the test - -``` -cd tests/litellm # pwd: Documents/litellm/litellm/tests/litellm -pytest /path/to/test_file.py -``` - -Step 4: Submit a PR with your changes! 🚀 - -- push your fork to your GitHub repo -- submit a PR from there - -### Building LiteLLM Docker Image - -Follow these instructions if you want to build / run the LiteLLM Docker Image yourself. - -Step 1: Clone the repo - -``` -git clone https://github.com/BerriAI/litellm.git -``` - -Step 2: Build the Docker Image - -Build using Dockerfile.non_root -``` -docker build -f docker/Dockerfile.non_root -t litellm_test_image . -``` - -Step 3: Run the Docker Image - -Make sure config.yaml is present in the root directory. This is your litellm proxy config file. -``` -docker run \ - -v $(pwd)/proxy_config.yaml:/app/config.yaml \ - -e DATABASE_URL="postgresql://xxxxxxxx" \ - -e LITELLM_MASTER_KEY="sk-1234" \ - -p 4000:4000 \ - litellm_test_image \ - --config /app/config.yaml --detailed_debug -``` +Interested in contributing? Contributions to LiteLLM Python SDK, Proxy Server, and contributing LLM integrations are both accepted and highly encouraged! [See our Contribution Guide for more details](https://docs.litellm.ai/docs/extras/contributing_code) # Enterprise For companies that need better security, user management and professional support diff --git a/deploy/charts/litellm-helm/tests/deployment_tests.yaml b/deploy/charts/litellm-helm/tests/deployment_tests.yaml new file mode 100644 index 0000000000..e7ce44b052 --- /dev/null +++ b/deploy/charts/litellm-helm/tests/deployment_tests.yaml @@ -0,0 +1,54 @@ +suite: test deployment +templates: + - deployment.yaml + - configmap-litellm.yaml +tests: + - it: should work + template: deployment.yaml + set: + image.tag: test + asserts: + - isKind: + of: Deployment + - matchRegex: + path: metadata.name + pattern: -litellm$ + - equal: + path: spec.template.spec.containers[0].image + value: ghcr.io/berriai/litellm-database:test + - it: should work with tolerations + template: deployment.yaml + set: + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + asserts: + - equal: + path: spec.template.spec.tolerations[0].key + value: node-role.kubernetes.io/master + - equal: + path: spec.template.spec.tolerations[0].operator + value: Exists + - it: should work with affinity + template: deployment.yaml + set: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + asserts: + - equal: + path: spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key + value: topology.kubernetes.io/zone + - equal: + path: spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator + value: In + - equal: + path: spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] + value: antarctica-east1 diff --git a/docker-compose.yml b/docker-compose.yml index 78044c03b8..d16ec6ed20 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,10 +20,18 @@ services: STORE_MODEL_IN_DB: "True" # allows adding models to proxy via UI env_file: - .env # Load local .env file + depends_on: + - db # Indicates that this service depends on the 'db' service, ensuring 'db' starts first + healthcheck: # Defines the health check configuration for the container + test: [ "CMD", "curl", "-f", "http://localhost:4000/health/liveliness || exit 1" ] # Command to execute for health check + interval: 30s # Perform health check every 30 seconds + timeout: 10s # Health check command times out after 10 seconds + retries: 3 # Retry up to 3 times if health check fails + start_period: 40s # Wait 40 seconds after container start before beginning health checks db: - image: postgres + image: postgres:16 restart: always environment: POSTGRES_DB: litellm @@ -31,6 +39,8 @@ services: POSTGRES_PASSWORD: dbpassword9090 ports: - "5432:5432" + volumes: + - postgres_data:/var/lib/postgresql/data # Persists Postgres data across container restarts healthcheck: test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"] interval: 1s @@ -53,6 +63,8 @@ services: volumes: prometheus_data: driver: local + postgres_data: + name: litellm_postgres_data # Named volume for Postgres data persistence # ...rest of your docker-compose config if any diff --git a/docs/my-website/docs/anthropic_unified.md b/docs/my-website/docs/anthropic_unified.md new file mode 100644 index 0000000000..cf6ba798d5 --- /dev/null +++ b/docs/my-website/docs/anthropic_unified.md @@ -0,0 +1,92 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# /v1/messages [BETA] + +LiteLLM provides a BETA endpoint in the spec of Anthropic's `/v1/messages` endpoint. + +This currently just supports the Anthropic API. + +| Feature | Supported | Notes | +|-------|-------|-------| +| Cost Tracking | ✅ | | +| Logging | ✅ | works across all integrations | +| End-user Tracking | ✅ | | +| Streaming | ✅ | | +| Fallbacks | ✅ | between anthropic models | +| Loadbalancing | ✅ | between anthropic models | + +Planned improvement: +- Vertex AI Anthropic support +- Bedrock Anthropic support + +## Usage + + + + +1. Setup config.yaml + +```yaml +model_list: + - model_name: anthropic-claude + litellm_params: + model: claude-3-7-sonnet-latest +``` + +2. Start proxy + +```bash +litellm --config /path/to/config.yaml +``` + +3. Test it! + +```bash +curl -L -X POST 'http://0.0.0.0:4000/v1/messages' \ +-H 'content-type: application/json' \ +-H 'x-api-key: $LITELLM_API_KEY' \ +-H 'anthropic-version: 2023-06-01' \ +-d '{ + "model": "anthropic-claude", + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "List 5 important events in the XIX century" + } + ] + } + ], + "max_tokens": 4096 +}' +``` + + + +```python +from litellm.llms.anthropic.experimental_pass_through.messages.handler import anthropic_messages +import asyncio +import os + +# set env +os.environ["ANTHROPIC_API_KEY"] = "my-api-key" + +messages = [{"role": "user", "content": "Hello, can you tell me a short joke?"}] + +# Call the handler +async def call(): + response = await anthropic_messages( + messages=messages, + api_key=api_key, + model="claude-3-haiku-20240307", + max_tokens=100, + ) + +asyncio.run(call()) +``` + + + \ No newline at end of file diff --git a/docs/my-website/docs/assistants.md b/docs/my-website/docs/assistants.md index 5e68e8dded..4032c74557 100644 --- a/docs/my-website/docs/assistants.md +++ b/docs/my-website/docs/assistants.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Assistants API +# /assistants Covers Threads, Messages, Assistants. diff --git a/docs/my-website/docs/batches.md b/docs/my-website/docs/batches.md index 4ac9fa61e3..4918e30d1f 100644 --- a/docs/my-website/docs/batches.md +++ b/docs/my-website/docs/batches.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# [BETA] Batches API +# /batches Covers Batches, Files diff --git a/docs/my-website/docs/completion/vision.md b/docs/my-website/docs/completion/vision.md index efb988b76f..1e18109b3b 100644 --- a/docs/my-website/docs/completion/vision.md +++ b/docs/my-website/docs/completion/vision.md @@ -189,4 +189,138 @@ Expected Response ``` - \ No newline at end of file + + + +## Explicitly specify image type + +If you have images without a mime-type, or if litellm is incorrectly inferring the mime type of your image (e.g. calling `gs://` url's with vertex ai), you can set this explicity via the `format` param. + +```python +"image_url": { + "url": "gs://my-gs-image", + "format": "image/jpeg" +} +``` + +LiteLLM will use this for any API endpoint, which supports specifying mime-type (e.g. anthropic/bedrock/vertex ai). + +For others (e.g. openai), it will be ignored. + + + + +```python +import os +from litellm import completion + +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" + +# openai call +response = completion( + model = "claude-3-7-sonnet-latest", + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What’s in this image?" + }, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + "format": "image/jpeg" + } + } + ] + } + ], +) + +``` + + + + +1. Define vision models on config.yaml + +```yaml +model_list: + - model_name: gpt-4-vision-preview # OpenAI gpt-4-vision-preview + litellm_params: + model: openai/gpt-4-vision-preview + api_key: os.environ/OPENAI_API_KEY + - model_name: llava-hf # Custom OpenAI compatible model + litellm_params: + model: openai/llava-hf/llava-v1.6-vicuna-7b-hf + api_base: http://localhost:8000 + api_key: fake-key + model_info: + supports_vision: True # set supports_vision to True so /model/info returns this attribute as True + +``` + +2. Run proxy server + +```bash +litellm --config config.yaml +``` + +3. Test it using the OpenAI Python SDK + + +```python +import os +from openai import OpenAI + +client = OpenAI( + api_key="sk-1234", # your litellm proxy api key +) + +response = client.chat.completions.create( + model = "gpt-4-vision-preview", # use model="llava-hf" to test your custom OpenAI endpoint + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What’s in this image?" + }, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + "format": "image/jpeg" + } + } + ] + } + ], +) + +``` + + + + + + + + + +## Spec + +``` +"image_url": str + +OR + +"image_url": { + "url": "url OR base64 encoded str", + "detail": "openai-only param", + "format": "specify mime-type of image" +} +``` \ No newline at end of file diff --git a/docs/my-website/docs/embedding/supported_embedding.md b/docs/my-website/docs/embedding/supported_embedding.md index d0cb59b46e..06d4107372 100644 --- a/docs/my-website/docs/embedding/supported_embedding.md +++ b/docs/my-website/docs/embedding/supported_embedding.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Embeddings +# /embeddings ## Quick Start ```python diff --git a/docs/my-website/docs/extras/contributing_code.md b/docs/my-website/docs/extras/contributing_code.md new file mode 100644 index 0000000000..0fe7675ead --- /dev/null +++ b/docs/my-website/docs/extras/contributing_code.md @@ -0,0 +1,96 @@ +# Contributing Code + +## **Checklist before submitting a PR** + +Here are the core requirements for any PR submitted to LiteLLM + + +- [ ] Add testing, **Adding at least 1 test is a hard requirement** - [see details](#2-adding-testing-to-your-pr) +- [ ] Ensure your PR passes the following tests: + - [ ] [Unit Tests](#3-running-unit-tests) + - [ ] Formatting / Linting Tests +- [ ] Keep scope as isolated as possible. As a general rule, your changes should address 1 specific problem at a time + + + +## Quick start + +## 1. Setup your local dev environment + + +Here's how to modify the repo locally: + +Step 1: Clone the repo + +```shell +git clone https://github.com/BerriAI/litellm.git +``` + +Step 2: Install dev dependencies: + +```shell +poetry install --with dev --extras proxy +``` + +That's it, your local dev environment is ready! + +## 2. Adding Testing to your PR + +- Add your test to the [`tests/litellm/` directory](https://github.com/BerriAI/litellm/tree/main/tests/litellm) + +- This directory 1:1 maps the the `litellm/` directory, and can only contain mocked tests. +- Do not add real llm api calls to this directory. + +### 2.1 File Naming Convention for `tests/litellm/` + +The `tests/litellm/` directory follows the same directory structure as `litellm/`. + +- `litellm/proxy/test_caching_routes.py` maps to `litellm/proxy/caching_routes.py` +- `test_{filename}.py` maps to `litellm/{filename}.py` + +## 3. Running Unit Tests + +run the following command on the root of the litellm directory + +```shell +make test-unit +``` + +## 4. Submit a PR with your changes! + +- push your fork to your GitHub repo +- submit a PR from there + + +## Advanced +### Building LiteLLM Docker Image + +Some people might want to build the LiteLLM docker image themselves. Follow these instructions if you want to build / run the LiteLLM Docker Image yourself. + +Step 1: Clone the repo + +```shell +git clone https://github.com/BerriAI/litellm.git +``` + +Step 2: Build the Docker Image + +Build using Dockerfile.non_root + +```shell +docker build -f docker/Dockerfile.non_root -t litellm_test_image . +``` + +Step 3: Run the Docker Image + +Make sure config.yaml is present in the root directory. This is your litellm proxy config file. + +```shell +docker run \ + -v $(pwd)/proxy_config.yaml:/app/config.yaml \ + -e DATABASE_URL="postgresql://xxxxxxxx" \ + -e LITELLM_MASTER_KEY="sk-1234" \ + -p 4000:4000 \ + litellm_test_image \ + --config /app/config.yaml --detailed_debug +``` diff --git a/docs/my-website/docs/files_endpoints.md b/docs/my-website/docs/files_endpoints.md index cccb35daa9..7e20982ff4 100644 --- a/docs/my-website/docs/files_endpoints.md +++ b/docs/my-website/docs/files_endpoints.md @@ -2,7 +2,7 @@ import TabItem from '@theme/TabItem'; import Tabs from '@theme/Tabs'; -# Files API +# /files Files are used to upload documents that can be used with features like Assistants, Fine-tuning, and Batch API. diff --git a/docs/my-website/docs/fine_tuning.md b/docs/my-website/docs/fine_tuning.md index fd5d99a6a1..f9a9297e06 100644 --- a/docs/my-website/docs/fine_tuning.md +++ b/docs/my-website/docs/fine_tuning.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# [Beta] Fine-tuning API +# /fine_tuning :::info diff --git a/docs/my-website/docs/moderation.md b/docs/my-website/docs/moderation.md index 6dd092fb52..95fe8b2856 100644 --- a/docs/my-website/docs/moderation.md +++ b/docs/my-website/docs/moderation.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Moderation +# /moderations ### Usage diff --git a/docs/my-website/docs/observability/athina_integration.md b/docs/my-website/docs/observability/athina_integration.md index 4994d553c6..ba93ea4c98 100644 --- a/docs/my-website/docs/observability/athina_integration.md +++ b/docs/my-website/docs/observability/athina_integration.md @@ -78,6 +78,9 @@ Following are the allowed fields in metadata, their types, and their description * `context: Optional[Union[dict, str]]` - This is the context used as information for the prompt. For RAG applications, this is the "retrieved" data. You may log context as a string or as an object (dictionary). * `expected_response: Optional[str]` - This is the reference response to compare against for evaluation purposes. This is useful for segmenting inference calls by expected response. * `user_query: Optional[str]` - This is the user's query. For conversational applications, this is the user's last message. +* `tags: Optional[list]` - This is a list of tags. This is useful for segmenting inference calls by tags. +* `user_feedback: Optional[str]` - The end user’s feedback. +* `model_options: Optional[dict]` - This is a dictionary of model options. This is useful for getting insights into how model behavior affects your end users. * `custom_attributes: Optional[dict]` - This is a dictionary of custom attributes. This is useful for additional information about the inference. ## Using a self hosted deployment of Athina diff --git a/docs/my-website/docs/projects/PDL.md b/docs/my-website/docs/projects/PDL.md new file mode 100644 index 0000000000..5d6fd77555 --- /dev/null +++ b/docs/my-website/docs/projects/PDL.md @@ -0,0 +1,5 @@ +PDL - A YAML-based approach to prompt programming + +Github: https://github.com/IBM/prompt-declaration-language + +PDL is a declarative approach to prompt programming, helping users to accumulate messages implicitly, with support for model chaining and tool use. \ No newline at end of file diff --git a/docs/my-website/docs/projects/pgai.md b/docs/my-website/docs/projects/pgai.md new file mode 100644 index 0000000000..bece5baf6a --- /dev/null +++ b/docs/my-website/docs/projects/pgai.md @@ -0,0 +1,9 @@ +# pgai + +[pgai](https://github.com/timescale/pgai) is a suite of tools to develop RAG, semantic search, and other AI applications more easily with PostgreSQL. + +If you don't know what pgai is yet check out the [README](https://github.com/timescale/pgai)! + +If you're already familiar with pgai, you can find litellm specific docs here: +- Litellm for [model calling](https://github.com/timescale/pgai/blob/main/docs/model_calling/litellm.md) in pgai +- Use the [litellm provider](https://github.com/timescale/pgai/blob/main/docs/vectorizer/api-reference.md#aiembedding_litellm) to automatically create embeddings for your data via the pgai vectorizer. diff --git a/docs/my-website/docs/providers/bedrock.md b/docs/my-website/docs/providers/bedrock.md index d765da1e51..1416006bf1 100644 --- a/docs/my-website/docs/providers/bedrock.md +++ b/docs/my-website/docs/providers/bedrock.md @@ -63,9 +63,9 @@ model_list: - model_name: bedrock-claude-v1 litellm_params: model: bedrock/anthropic.claude-instant-v1 - aws_access_key_id: os.environ/CUSTOM_AWS_ACCESS_KEY_ID - aws_secret_access_key: os.environ/CUSTOM_AWS_SECRET_ACCESS_KEY - aws_region_name: os.environ/CUSTOM_AWS_REGION_NAME + aws_access_key_id: os.environ/AWS_ACCESS_KEY_ID + aws_secret_access_key: os.environ/AWS_SECRET_ACCESS_KEY + aws_region_name: os.environ/AWS_REGION_NAME ``` All possible auth params: @@ -286,9 +286,12 @@ print(response) -## Usage - Function Calling +## Usage - Function Calling / Tool calling -LiteLLM uses Bedrock's Converse API for making tool calls +LiteLLM supports tool calling via Bedrock's Converse and Invoke API's. + + + ```python from litellm import completion @@ -333,6 +336,69 @@ assert isinstance( response.choices[0].message.tool_calls[0].function.arguments, str ) ``` + + + +1. Setup config.yaml + +```yaml +model_list: + - model_name: bedrock-claude-3-7 + litellm_params: + model: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0 # for bedrock invoke, specify `bedrock/invoke/` +``` + +2. Start proxy + +```bash +litellm --config /path/to/config.yaml +``` + +3. Test it! + +```bash +curl http://0.0.0.0:4000/v1/chat/completions \ +-H "Content-Type: application/json" \ +-H "Authorization: Bearer $LITELLM_API_KEY" \ +-d '{ + "model": "bedrock-claude-3-7", + "messages": [ + { + "role": "user", + "content": "What'\''s the weather like in Boston today?" + } + ], + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "tool_choice": "auto" +}' + +``` + + + + ## Usage - Vision @@ -390,9 +456,9 @@ Returns 2 new fields in `message` and `delta` object: Each object has the following fields: - `type` - Literal["thinking"] - The type of thinking block - `thinking` - string - The thinking of the response. Also returned in `reasoning_content` -- `signature_delta` - string - A base64 encoded string, returned by Anthropic. +- `signature` - string - A base64 encoded string, returned by Anthropic. -The `signature_delta` is required by Anthropic on subsequent calls, if 'thinking' content is passed in (only required to use `thinking` with tool calling). [Learn more](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#understanding-thinking-blocks) +The `signature` is required by Anthropic on subsequent calls, if 'thinking' content is passed in (only required to use `thinking` with tool calling). [Learn more](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#understanding-thinking-blocks) @@ -475,7 +541,7 @@ Same as [Anthropic API response](../providers/anthropic#usage---thinking--reason { "type": "thinking", "thinking": "The capital of France is Paris. This is a straightforward factual question.", - "signature_delta": "EqoBCkgIARABGAIiQL2UoU0b1OHYi+yCHpBY7U6FQW8/FcoLewocJQPa2HnmLM+NECy50y44F/kD4SULFXi57buI9fAvyBwtyjlOiO0SDE3+r3spdg6PLOo9PBoMma2ku5OTAoR46j9VIjDRlvNmBvff7YW4WI9oU8XagaOBSxLPxElrhyuxppEn7m6bfT40dqBSTDrfiw4FYB4qEPETTI6TA6wtjGAAqmFqKTo=" + "signature": "EqoBCkgIARABGAIiQL2UoU0b1OHYi+yCHpBY7U6FQW8/FcoLewocJQPa2HnmLM+NECy50y44F/kD4SULFXi57buI9fAvyBwtyjlOiO0SDE3+r3spdg6PLOo9PBoMma2ku5OTAoR46j9VIjDRlvNmBvff7YW4WI9oU8XagaOBSxLPxElrhyuxppEn7m6bfT40dqBSTDrfiw4FYB4qEPETTI6TA6wtjGAAqmFqKTo=" } ] } @@ -492,6 +558,111 @@ Same as [Anthropic API response](../providers/anthropic#usage---thinking--reason ``` +## Usage - Structured Output / JSON mode + + + + +```python +from litellm import completion +import os +from pydantic import BaseModel + +# set env +os.environ["AWS_ACCESS_KEY_ID"] = "" +os.environ["AWS_SECRET_ACCESS_KEY"] = "" +os.environ["AWS_REGION_NAME"] = "" + +class CalendarEvent(BaseModel): + name: str + date: str + participants: list[str] + +class EventsList(BaseModel): + events: list[CalendarEvent] + +response = completion( + model="bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0", # specify invoke via `bedrock/invoke/anthropic.claude-3-7-sonnet-20250219-v1:0` + response_format=EventsList, + messages=[ + {"role": "system", "content": "You are a helpful assistant designed to output JSON."}, + {"role": "user", "content": "Who won the world series in 2020?"} + ], +) +print(response.choices[0].message.content) +``` + + + +1. Setup config.yaml + +```yaml +model_list: + - model_name: bedrock-claude-3-7 + litellm_params: + model: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0 # specify invoke via `bedrock/invoke/` + aws_access_key_id: os.environ/CUSTOM_AWS_ACCESS_KEY_ID + aws_secret_access_key: os.environ/CUSTOM_AWS_SECRET_ACCESS_KEY + aws_region_name: os.environ/CUSTOM_AWS_REGION_NAME +``` + +2. Start proxy + +```bash +litellm --config /path/to/config.yaml +``` + +3. Test it! + +```bash +curl http://0.0.0.0:4000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $LITELLM_KEY" \ + -d '{ + "model": "bedrock-claude-3-7", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant designed to output JSON." + }, + { + "role": "user", + "content": "Who won the worlde series in 2020?" + } + ], + "response_format": { + "type": "json_schema", + "json_schema": { + "name": "math_reasoning", + "description": "reason about maths", + "schema": { + "type": "object", + "properties": { + "steps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "explanation": { "type": "string" }, + "output": { "type": "string" } + }, + "required": ["explanation", "output"], + "additionalProperties": false + } + }, + "final_answer": { "type": "string" } + }, + "required": ["steps", "final_answer"], + "additionalProperties": false + }, + "strict": true + } + } + }' +``` + + + ## Usage - Bedrock Guardrails Example of using [Bedrock Guardrails with LiteLLM](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-use-converse-api.html) @@ -1621,10 +1792,14 @@ print(response) ### Advanced - [Pass model/provider-specific Params](https://docs.litellm.ai/docs/completion/provider_specific_params#proxy-usage) ## Image Generation -Use this for stable diffusion on bedrock +Use this for stable diffusion, and amazon nova canvas on bedrock ### Usage + + + + ```python import os from litellm import image_generation @@ -1659,6 +1834,41 @@ response = image_generation( ) print(f"response: {response}") ``` + + + +1. Setup config.yaml + +```yaml +model_list: + - model_name: amazon.nova-canvas-v1:0 + litellm_params: + model: bedrock/amazon.nova-canvas-v1:0 + aws_region_name: "us-east-1" + aws_secret_access_key: my-key # OPTIONAL - all boto3 auth params supported + aws_secret_access_id: my-id # OPTIONAL - all boto3 auth params supported +``` + +2. Start proxy + +```bash +litellm --config /path/to/config.yaml +``` + +3. Test it! + +```bash +curl -L -X POST 'http://0.0.0.0:4000/v1/images/generations' \ +-H 'Content-Type: application/json' \ +-H 'Authorization: Bearer $LITELLM_VIRTUAL_KEY' \ +-d '{ + "model": "amazon.nova-canvas-v1:0", + "prompt": "A cute baby sea otter" +}' +``` + + + ## Supported AWS Bedrock Image Generation Models @@ -1739,6 +1949,8 @@ curl http://0.0.0.0:4000/rerank \ "Capital punishment has existed in the United States since before it was a country." ], "top_n": 3 + + }' ``` diff --git a/docs/my-website/docs/providers/vertex.md b/docs/my-website/docs/providers/vertex.md index f2ed4650ac..10ac13ecaf 100644 --- a/docs/my-website/docs/providers/vertex.md +++ b/docs/my-website/docs/providers/vertex.md @@ -404,14 +404,16 @@ curl http://localhost:4000/v1/chat/completions \ If this was your initial VertexAI Grounding code, ```python -import vertexai +import vertexai +from vertexai.generative_models import GenerativeModel, GenerationConfig, Tool, grounding + vertexai.init(project=project_id, location="us-central1") model = GenerativeModel("gemini-1.5-flash-001") # Use Google Search for grounding -tool = Tool.from_google_search_retrieval(grounding.GoogleSearchRetrieval(disable_attributon=False)) +tool = Tool.from_google_search_retrieval(grounding.GoogleSearchRetrieval()) prompt = "When is the next total solar eclipse in US?" response = model.generate_content( @@ -428,7 +430,7 @@ print(response) then, this is what it looks like now ```python -from litellm import completion +from litellm import completion # !gcloud auth application-default login - run this to add vertex credentials to your env @@ -1686,6 +1688,14 @@ assert isinstance( Pass any file supported by Vertex AI, through LiteLLM. +LiteLLM Supports the following image types passed in url + +``` +Images with Cloud Storage URIs - gs://cloud-samples-data/generative-ai/image/boats.jpeg +Images with direct links - https://storage.googleapis.com/github-repo/img/gemini/intro/landmark3.jpg +Videos with Cloud Storage URIs - https://storage.googleapis.com/github-repo/img/gemini/multimodality_usecases_overview/pixel8.mp4 +Base64 Encoded Local Images +``` diff --git a/docs/my-website/docs/proxy/db_info.md b/docs/my-website/docs/proxy/db_info.md index 1b87aa1e54..946089bf14 100644 --- a/docs/my-website/docs/proxy/db_info.md +++ b/docs/my-website/docs/proxy/db_info.md @@ -46,18 +46,17 @@ You can see the full DB Schema [here](https://github.com/BerriAI/litellm/blob/ma | Table Name | Description | Row Insert Frequency | |------------|-------------|---------------------| -| LiteLLM_SpendLogs | Detailed logs of all API requests. Records token usage, spend, and timing information. Tracks which models and keys were used. | **High - every LLM API request** | -| LiteLLM_ErrorLogs | Captures failed requests and errors. Stores exception details and request information. Helps with debugging and monitoring. | **Medium - on errors only** | +| LiteLLM_SpendLogs | Detailed logs of all API requests. Records token usage, spend, and timing information. Tracks which models and keys were used. | **High - every LLM API request - Success or Failure** | | LiteLLM_AuditLog | Tracks changes to system configuration. Records who made changes and what was modified. Maintains history of updates to teams, users, and models. | **Off by default**, **High - when enabled** | -## Disable `LiteLLM_SpendLogs` & `LiteLLM_ErrorLogs` +## Disable `LiteLLM_SpendLogs` You can disable spend_logs and error_logs by setting `disable_spend_logs` and `disable_error_logs` to `True` on the `general_settings` section of your proxy_config.yaml file. ```yaml general_settings: disable_spend_logs: True # Disable writing spend logs to DB - disable_error_logs: True # Disable writing error logs to DB + disable_error_logs: True # Only disable writing error logs to DB, regular spend logs will still be written unless `disable_spend_logs: True` ``` ### What is the impact of disabling these logs? diff --git a/docs/my-website/docs/proxy/logging_spec.md b/docs/my-website/docs/proxy/logging_spec.md index 86ba907373..7da937e565 100644 --- a/docs/my-website/docs/proxy/logging_spec.md +++ b/docs/my-website/docs/proxy/logging_spec.md @@ -78,6 +78,7 @@ Inherits from `StandardLoggingUserAPIKeyMetadata` and adds: | `api_base` | `Optional[str]` | Optional API base URL | | `response_cost` | `Optional[str]` | Optional response cost | | `additional_headers` | `Optional[StandardLoggingAdditionalHeaders]` | Additional headers | +| `batch_models` | `Optional[List[str]]` | Only set for Batches API. Lists the models used for cost calculation | ## StandardLoggingModelInformation diff --git a/docs/my-website/docs/proxy/master_key_rotations.md b/docs/my-website/docs/proxy/master_key_rotations.md new file mode 100644 index 0000000000..1713679863 --- /dev/null +++ b/docs/my-website/docs/proxy/master_key_rotations.md @@ -0,0 +1,53 @@ +# Rotating Master Key + +Here are our recommended steps for rotating your master key. + + +**1. Backup your DB** +In case of any errors during the encryption/de-encryption process, this will allow you to revert back to current state without issues. + +**2. Call `/key/regenerate` with the new master key** + +```bash +curl -L -X POST 'http://localhost:4000/key/regenerate' \ +-H 'Authorization: Bearer sk-1234' \ +-H 'Content-Type: application/json' \ +-d '{ + "key": "sk-1234", + "new_master_key": "sk-PIp1h0RekR" +}' +``` + +This will re-encrypt any models in your Proxy_ModelTable with the new master key. + +Expect to start seeing decryption errors in logs, as your old master key is no longer able to decrypt the new values. + +```bash + raise Exception("Unable to decrypt value={}".format(v)) +Exception: Unable to decrypt value= +``` + +**3. Update LITELLM_MASTER_KEY** + +In your environment variables update the value of LITELLM_MASTER_KEY to the new_master_key from Step 2. + +This ensures the key used for decryption from db is the new key. + +**4. Test it** + +Make a test request to a model stored on proxy with a litellm key (new master key or virtual key) and see if it works + +```bash + curl -L -X POST 'http://0.0.0.0:4000/v1/chat/completions' \ +-H 'Content-Type: application/json' \ +-H 'Authorization: Bearer sk-1234' \ +-d '{ + "model": "gpt-4o-mini", # 👈 REPLACE with 'public model name' for any db-model + "messages": [ + { + "content": "Hey, how's it going", + "role": "user" + } + ], +}' +``` \ No newline at end of file diff --git a/docs/my-website/docs/proxy/prod.md b/docs/my-website/docs/proxy/prod.md index d0b8c48174..d3ba2d6224 100644 --- a/docs/my-website/docs/proxy/prod.md +++ b/docs/my-website/docs/proxy/prod.md @@ -107,9 +107,9 @@ general_settings: By default, LiteLLM writes several types of logs to the database: - Every LLM API request to the `LiteLLM_SpendLogs` table -- LLM Exceptions to the `LiteLLM_LogsErrors` table +- LLM Exceptions to the `LiteLLM_SpendLogs` table -If you're not viewing these logs on the LiteLLM UI (most users use Prometheus for monitoring), you can disable them by setting the following flags to `True`: +If you're not viewing these logs on the LiteLLM UI, you can disable them by setting the following flags to `True`: ```yaml general_settings: diff --git a/docs/my-website/docs/proxy/release_cycle.md b/docs/my-website/docs/proxy/release_cycle.md index bdc8ff3049..947a4ae6b3 100644 --- a/docs/my-website/docs/proxy/release_cycle.md +++ b/docs/my-website/docs/proxy/release_cycle.md @@ -4,7 +4,7 @@ Litellm Proxy has the following release cycle: - `v1.x.x-nightly`: These are releases which pass ci/cd. - `v1.x.x.rc`: These are releases which pass ci/cd + [manual review](https://github.com/BerriAI/litellm/discussions/8495#discussioncomment-12180711). -- `v1.x.x`: These are releases which pass ci/cd + manual review + 3 days of production testing. +- `v1.x.x` OR `v1.x.x-stable`: These are releases which pass ci/cd + manual review + 3 days of production testing. In production, we recommend using the latest `v1.x.x` release. diff --git a/docs/my-website/docs/realtime.md b/docs/my-website/docs/realtime.md index 28697f44b9..4611c8fdcd 100644 --- a/docs/my-website/docs/realtime.md +++ b/docs/my-website/docs/realtime.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Realtime Endpoints +# /realtime Use this to loadbalance across Azure + OpenAI. diff --git a/docs/my-website/docs/reasoning_content.md b/docs/my-website/docs/reasoning_content.md index 92c3fae61f..f0bcd30a9b 100644 --- a/docs/my-website/docs/reasoning_content.md +++ b/docs/my-website/docs/reasoning_content.md @@ -3,6 +3,12 @@ import TabItem from '@theme/TabItem'; # 'Thinking' / 'Reasoning Content' +:::info + +Requires LiteLLM v1.63.0+ + +::: + Supported Providers: - Deepseek (`deepseek/`) - Anthropic API (`anthropic/`) @@ -17,7 +23,7 @@ Supported Providers: { "type": "thinking", "thinking": "The capital of France is Paris.", - "signature_delta": "EqoBCkgIARABGAIiQL2UoU0b1OHYi+..." + "signature": "EqoBCkgIARABGAIiQL2UoU0b1OHYi+..." } ] } @@ -292,7 +298,7 @@ curl http://0.0.0.0:4000/v1/chat/completions \ { "type": "thinking", "thinking": "The user is asking for the current weather in three different locations: San Francisco, Tokyo, and Paris. I have access to the `get_current_weather` function that can provide this information.\n\nThe function requires a `location` parameter, and has an optional `unit` parameter. The user hasn't specified which unit they prefer (celsius or fahrenheit), so I'll use the default provided by the function.\n\nI need to make three separate function calls, one for each location:\n1. San Francisco\n2. Tokyo\n3. Paris\n\nThen I'll compile the results into a response with three distinct weather reports as requested by the user.", - "signature_delta": "EqoBCkgIARABGAIiQCkBXENoyB+HstUOs/iGjG+bvDbIQRrxPsPpOSt5yDxX6iulZ/4K/w9Rt4J5Nb2+3XUYsyOH+CpZMfADYvItFR4SDPb7CmzoGKoolCMAJRoM62p1ZRASZhrD3swqIjAVY7vOAFWKZyPEJglfX/60+bJphN9W1wXR6rWrqn3MwUbQ5Mb/pnpeb10HMploRgUqEGKOd6fRKTkUoNDuAnPb55c=" + "signature": "EqoBCkgIARABGAIiQCkBXENoyB+HstUOs/iGjG+bvDbIQRrxPsPpOSt5yDxX6iulZ/4K/w9Rt4J5Nb2+3XUYsyOH+CpZMfADYvItFR4SDPb7CmzoGKoolCMAJRoM62p1ZRASZhrD3swqIjAVY7vOAFWKZyPEJglfX/60+bJphN9W1wXR6rWrqn3MwUbQ5Mb/pnpeb10HMploRgUqEGKOd6fRKTkUoNDuAnPb55c=" } ], "provider_specific_fields": { @@ -353,5 +359,5 @@ These fields can be accessed via `response.choices[0].message.reasoning_content` - `thinking_blocks` - Optional[List[Dict[str, str]]]: A list of thinking blocks from the model. Only returned for Anthropic models. - `type` - str: The type of thinking block. - `thinking` - str: The thinking from the model. - - `signature_delta` - str: The signature delta from the model. + - `signature` - str: The signature delta from the model. diff --git a/docs/my-website/docs/rerank.md b/docs/my-website/docs/rerank.md index cc58c374c7..1e3cfd0fa5 100644 --- a/docs/my-website/docs/rerank.md +++ b/docs/my-website/docs/rerank.md @@ -1,4 +1,4 @@ -# Rerank +# /rerank :::tip diff --git a/docs/my-website/docs/response_api.md b/docs/my-website/docs/response_api.md new file mode 100644 index 0000000000..6f8aba13ba --- /dev/null +++ b/docs/my-website/docs/response_api.md @@ -0,0 +1,117 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# /responses [Beta] + +LiteLLM provides a BETA endpoint in the spec of [OpenAI's `/responses` API](https://platform.openai.com/docs/api-reference/responses) + +| Feature | Supported | Notes | +|---------|-----------|--------| +| Cost Tracking | ✅ | Works with all supported models | +| Logging | ✅ | Works across all integrations | +| End-user Tracking | ✅ | | +| Streaming | ✅ | | +| Fallbacks | ✅ | Works between supported models | +| Loadbalancing | ✅ | Works between supported models | +| Supported LiteLLM Versions | 1.63.8+ | | +| Supported LLM providers | `openai` | | + +## Usage + +## Create a model response + + + + +#### Non-streaming +```python +import litellm + +# Non-streaming response +response = litellm.responses( + model="gpt-4o", + input="Tell me a three sentence bedtime story about a unicorn.", + max_output_tokens=100 +) + +print(response) +``` + +#### Streaming +```python +import litellm + +# Streaming response +response = litellm.responses( + model="gpt-4o", + input="Tell me a three sentence bedtime story about a unicorn.", + stream=True +) + +for event in response: + print(event) +``` + + + + +First, add this to your litellm proxy config.yaml: +```yaml +model_list: + - model_name: gpt-4o + litellm_params: + model: openai/gpt-4 + api_key: os.environ/OPENAI_API_KEY +``` + +Start your LiteLLM proxy: +```bash +litellm --config /path/to/config.yaml + +# RUNNING on http://0.0.0.0:4000 +``` + +Then use the OpenAI SDK pointed to your proxy: + +#### Non-streaming +```python +from openai import OpenAI + +# Initialize client with your proxy URL +client = OpenAI( + base_url="http://localhost:4000", # Your proxy URL + api_key="your-api-key" # Your proxy API key +) + +# Non-streaming response +response = client.responses.create( + model="gpt-4o", + input="Tell me a three sentence bedtime story about a unicorn." +) + +print(response) +``` + +#### Streaming +```python +from openai import OpenAI + +# Initialize client with your proxy URL +client = OpenAI( + base_url="http://localhost:4000", # Your proxy URL + api_key="your-api-key" # Your proxy API key +) + +# Streaming response +response = client.responses.create( + model="gpt-4o", + input="Tell me a three sentence bedtime story about a unicorn.", + stream=True +) + +for event in response: + print(event) +``` + + + diff --git a/docs/my-website/docs/routing.md b/docs/my-website/docs/routing.md index 79fefcf754..0ad28b24f4 100644 --- a/docs/my-website/docs/routing.md +++ b/docs/my-website/docs/routing.md @@ -952,8 +952,8 @@ router_settings: ``` Defaults: -- allowed_fails: 0 -- cooldown_time: 60s +- allowed_fails: 3 +- cooldown_time: 5s (`DEFAULT_COOLDOWN_TIME_SECONDS` in constants.py) **Set Per Model** diff --git a/docs/my-website/docs/secret.md b/docs/my-website/docs/secret.md index a65c696f36..7676164259 100644 --- a/docs/my-website/docs/secret.md +++ b/docs/my-website/docs/secret.md @@ -96,6 +96,33 @@ litellm --config /path/to/config.yaml ``` +### Using K/V pairs in 1 AWS Secret + +You can read multiple keys from a single AWS Secret using the `primary_secret_name` parameter: + +```yaml +general_settings: + key_management_system: "aws_secret_manager" + key_management_settings: + hosted_keys: [ + "OPENAI_API_KEY_MODEL_1", + "OPENAI_API_KEY_MODEL_2", + ] + primary_secret_name: "litellm_secrets" # 👈 Read multiple keys from one JSON secret +``` + +The `primary_secret_name` allows you to read multiple keys from a single AWS Secret as a JSON object. For example, the "litellm_secrets" would contain: + +```json +{ + "OPENAI_API_KEY_MODEL_1": "sk-key1...", + "OPENAI_API_KEY_MODEL_2": "sk-key2..." +} +``` + +This reduces the number of AWS Secrets you need to manage. + + ## Hashicorp Vault @@ -353,4 +380,7 @@ general_settings: # Hosted Keys Settings hosted_keys: ["litellm_master_key"] # OPTIONAL. Specify which env keys you stored on AWS + + # K/V pairs in 1 AWS Secret Settings + primary_secret_name: "litellm_secrets" # OPTIONAL. Read multiple keys from one JSON secret on AWS Secret Manager ``` \ No newline at end of file diff --git a/docs/my-website/docs/text_completion.md b/docs/my-website/docs/text_completion.md index 8be40dfdcd..cbf2db00a0 100644 --- a/docs/my-website/docs/text_completion.md +++ b/docs/my-website/docs/text_completion.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Text Completion +# /completions ### Usage diff --git a/docs/my-website/docs/tutorials/litellm_proxy_aporia.md b/docs/my-website/docs/tutorials/litellm_proxy_aporia.md index 3b5bada2bc..143512f99c 100644 --- a/docs/my-website/docs/tutorials/litellm_proxy_aporia.md +++ b/docs/my-website/docs/tutorials/litellm_proxy_aporia.md @@ -2,9 +2,9 @@ import Image from '@theme/IdealImage'; import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Use LiteLLM AI Gateway with Aporia Guardrails +# Aporia Guardrails with LiteLLM Gateway -In this tutorial we will use LiteLLM Proxy with Aporia to detect PII in requests and profanity in responses +In this tutorial we will use LiteLLM AI Gateway with Aporia to detect PII in requests and profanity in responses ## 1. Setup guardrails on Aporia diff --git a/docs/my-website/docs/tutorials/openweb_ui.md b/docs/my-website/docs/tutorials/openweb_ui.md new file mode 100644 index 0000000000..ab1e2e121e --- /dev/null +++ b/docs/my-website/docs/tutorials/openweb_ui.md @@ -0,0 +1,103 @@ +import Image from '@theme/IdealImage'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# OpenWeb UI with LiteLLM + +This guide walks you through connecting OpenWeb UI to LiteLLM. Using LiteLLM with OpenWeb UI allows teams to +- Access 100+ LLMs on OpenWeb UI +- Track Spend / Usage, Set Budget Limits +- Send Request/Response Logs to logging destinations like langfuse, s3, gcs buckets, etc. +- Set access controls eg. Control what models OpenWebUI can access. + +## Quickstart + +- Make sure to setup LiteLLM with the [LiteLLM Getting Started Guide](https://docs.litellm.ai/docs/proxy/docker_quick_start) + + +## 1. Start LiteLLM & OpenWebUI + +- OpenWebUI starts running on [http://localhost:3000](http://localhost:3000) +- LiteLLM starts running on [http://localhost:4000](http://localhost:4000) + + +## 2. Create a Virtual Key on LiteLLM + +Virtual Keys are API Keys that allow you to authenticate to LiteLLM Proxy. We will create a Virtual Key that will allow OpenWebUI to access LiteLLM. + +### 2.1 LiteLLM User Management Hierarchy + +On LiteLLM, you can create Organizations, Teams, Users and Virtual Keys. For this tutorial, we will create a Team and a Virtual Key. + +- `Organization` - An Organization is a group of Teams. (US Engineering, EU Developer Tools) +- `Team` - A Team is a group of Users. (OpenWeb UI Team, Data Science Team, etc.) +- `User` - A User is an individual user (employee, developer, eg. `krrish@litellm.ai`) +- `Virtual Key` - A Virtual Key is an API Key that allows you to authenticate to LiteLLM Proxy. A Virtual Key is associated with a User or Team. + +Once the Team is created, you can invite Users to the Team. You can read more about LiteLLM's User Management [here](https://docs.litellm.ai/docs/proxy/user_management_heirarchy). + +### 2.2 Create a Team on LiteLLM + +Navigate to [http://localhost:4000/ui](http://localhost:4000/ui) and create a new team. + + + +### 2.2 Create a Virtual Key on LiteLLM + +Navigate to [http://localhost:4000/ui](http://localhost:4000/ui) and create a new virtual Key. + +LiteLLM allows you to specify what models are available on OpenWeb UI (by specifying the models the key will have access to). + + + +## 3. Connect OpenWeb UI to LiteLLM + +On OpenWeb UI, navigate to Settings -> Connections and create a new connection to LiteLLM + +Enter the following details: +- URL: `http://localhost:4000` (your litellm proxy base url) +- Key: `your-virtual-key` (the key you created in the previous step) + + + +### 3.1 Test Request + +On the top left corner, select models you should only see the models you gave the key access to in Step 2. + +Once you selected a model, enter your message content and click on `Submit` + + + +### 3.2 Tracking Spend / Usage + +After your request is made, navigate to `Logs` on the LiteLLM UI, you can see Team, Key, Model, Usage and Cost. + + + + + +## Render `thinking` content on OpenWeb UI + +OpenWebUI requires reasoning/thinking content to be rendered with `` tags. In order to render this for specific models, you can use the `merge_reasoning_content_in_choices` litellm parameter. + +Example litellm config.yaml: + +```yaml +model_list: + - model_name: thinking-anthropic-claude-3-7-sonnet + litellm_params: + model: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0 + thinking: {"type": "enabled", "budget_tokens": 1024} + max_tokens: 1080 + merge_reasoning_content_in_choices: true +``` + +### Test it on OpenWeb UI + +On the models dropdown select `thinking-anthropic-claude-3-7-sonnet` + + + + + + diff --git a/docs/my-website/docusaurus.config.js b/docs/my-website/docusaurus.config.js index cf20dfcd70..8d480131ff 100644 --- a/docs/my-website/docusaurus.config.js +++ b/docs/my-website/docusaurus.config.js @@ -44,7 +44,7 @@ const config = { path: './release_notes', routeBasePath: 'release_notes', blogTitle: 'Release Notes', - blogSidebarTitle: 'All Releases', + blogSidebarTitle: 'Releases', blogSidebarCount: 'ALL', postsPerPage: 'ALL', showReadingTime: false, diff --git a/docs/my-website/img/basic_litellm.gif b/docs/my-website/img/basic_litellm.gif new file mode 100644 index 0000000000..d4cf9fd52a Binary files /dev/null and b/docs/my-website/img/basic_litellm.gif differ diff --git a/docs/my-website/img/create_key_in_team_oweb.gif b/docs/my-website/img/create_key_in_team_oweb.gif new file mode 100644 index 0000000000..d24849b259 Binary files /dev/null and b/docs/my-website/img/create_key_in_team_oweb.gif differ diff --git a/docs/my-website/img/litellm_create_team.gif b/docs/my-website/img/litellm_create_team.gif new file mode 100644 index 0000000000..e2f12613ec Binary files /dev/null and b/docs/my-website/img/litellm_create_team.gif differ diff --git a/docs/my-website/img/litellm_setup_openweb.gif b/docs/my-website/img/litellm_setup_openweb.gif new file mode 100644 index 0000000000..5618660d6c Binary files /dev/null and b/docs/my-website/img/litellm_setup_openweb.gif differ diff --git a/docs/my-website/img/litellm_thinking_openweb.gif b/docs/my-website/img/litellm_thinking_openweb.gif new file mode 100644 index 0000000000..385db583a4 Binary files /dev/null and b/docs/my-website/img/litellm_thinking_openweb.gif differ diff --git a/docs/my-website/img/release_notes/anthropic_thinking.jpg b/docs/my-website/img/release_notes/anthropic_thinking.jpg new file mode 100644 index 0000000000..f10de06dec Binary files /dev/null and b/docs/my-website/img/release_notes/anthropic_thinking.jpg differ diff --git a/docs/my-website/img/release_notes/error_logs.jpg b/docs/my-website/img/release_notes/error_logs.jpg new file mode 100644 index 0000000000..6f2767e1fb Binary files /dev/null and b/docs/my-website/img/release_notes/error_logs.jpg differ diff --git a/docs/my-website/img/release_notes/v1632_release.jpg b/docs/my-website/img/release_notes/v1632_release.jpg new file mode 100644 index 0000000000..1770460b2a Binary files /dev/null and b/docs/my-website/img/release_notes/v1632_release.jpg differ diff --git a/docs/my-website/package-lock.json b/docs/my-website/package-lock.json index b5392b32b4..6c07e67d91 100644 --- a/docs/my-website/package-lock.json +++ b/docs/my-website/package-lock.json @@ -706,12 +706,13 @@ } }, "node_modules/@babel/helpers": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.0.tgz", - "integrity": "sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw==", + "version": "7.26.10", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.10.tgz", + "integrity": "sha512-UPYc3SauzZ3JGgj87GgZ89JVdC5dj0AoetR5Bw6wj4niittNyFh6+eOGonYvJ1ao6B8lEa3Q3klS7ADZ53bc5g==", + "license": "MIT", "dependencies": { - "@babel/template": "^7.25.9", - "@babel/types": "^7.26.0" + "@babel/template": "^7.26.9", + "@babel/types": "^7.26.10" }, "engines": { "node": ">=6.9.0" @@ -796,11 +797,12 @@ } }, "node_modules/@babel/parser": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.3.tgz", - "integrity": "sha512-WJ/CvmY8Mea8iDXo6a7RK2wbmJITT5fN3BEkRuFlxVyNx8jOKIIhmC4fSkTcPcf8JyavbBwIe6OpiCOBXt/IcA==", + "version": "7.26.10", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.10.tgz", + "integrity": "sha512-6aQR2zGE/QFi8JpDLjUZEPYOs7+mhKXm86VaKFiLP35JQwQb6bwUE+XbvkH0EptsYhbNBSUGaUBLKqxH1xSgsA==", + "license": "MIT", "dependencies": { - "@babel/types": "^7.26.3" + "@babel/types": "^7.26.10" }, "bin": { "parser": "bin/babel-parser.js" @@ -2157,9 +2159,10 @@ } }, "node_modules/@babel/runtime-corejs3": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.26.0.tgz", - "integrity": "sha512-YXHu5lN8kJCb1LOb9PgV6pvak43X2h4HvRApcN5SdWeaItQOzfn1hgP6jasD6KWQyJDBxrVmA9o9OivlnNJK/w==", + "version": "7.26.10", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.26.10.tgz", + "integrity": "sha512-uITFQYO68pMEYR46AHgQoyBg7KPPJDAbGn4jUTIRgCFJIp88MIBUianVOplhZDEec07bp9zIyr4Kp0FCyQzmWg==", + "license": "MIT", "dependencies": { "core-js-pure": "^3.30.2", "regenerator-runtime": "^0.14.0" @@ -2169,13 +2172,14 @@ } }, "node_modules/@babel/template": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.9.tgz", - "integrity": "sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.26.9.tgz", + "integrity": "sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA==", + "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.25.9", - "@babel/parser": "^7.25.9", - "@babel/types": "^7.25.9" + "@babel/code-frame": "^7.26.2", + "@babel/parser": "^7.26.9", + "@babel/types": "^7.26.9" }, "engines": { "node": ">=6.9.0" @@ -2199,9 +2203,10 @@ } }, "node_modules/@babel/types": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.3.tgz", - "integrity": "sha512-vN5p+1kl59GVKMvTHt55NzzmYVxprfJD+ql7U9NFIfKCBkYE55LYtS+WtPlaYOyzydrKI8Nezd+aZextrd+FMA==", + "version": "7.26.10", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.10.tgz", + "integrity": "sha512-emqcG3vHrpxUKTrxcblR36dcrcoRDvKmnL/dCL6ZsHaShW80qxCAcNhzQZrpeM765VzEos+xOi4s+r4IXzTwdQ==", + "license": "MIT", "dependencies": { "@babel/helper-string-parser": "^7.25.9", "@babel/helper-validator-identifier": "^7.25.9" diff --git a/docs/my-website/release_notes/v1.61.20-stable/index.md b/docs/my-website/release_notes/v1.61.20-stable/index.md index 4dda52e632..132c1aa318 100644 --- a/docs/my-website/release_notes/v1.61.20-stable/index.md +++ b/docs/my-website/release_notes/v1.61.20-stable/index.md @@ -20,12 +20,6 @@ import Image from '@theme/IdealImage'; # v1.61.20-stable -:::info - -`v1.61.20-stable` will be live on 2025-02-04. - -::: - These are the changes since `v1.61.13-stable`. This release is primarily focused on: diff --git a/docs/my-website/release_notes/v1.63.0/index.md b/docs/my-website/release_notes/v1.63.0/index.md new file mode 100644 index 0000000000..e74a2f9b86 --- /dev/null +++ b/docs/my-website/release_notes/v1.63.0/index.md @@ -0,0 +1,40 @@ +--- +title: v1.63.0 - Anthropic 'thinking' response update +slug: v1.63.0 +date: 2025-03-05T10:00:00 +authors: + - name: Krrish Dholakia + title: CEO, LiteLLM + url: https://www.linkedin.com/in/krish-d/ + image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI + - name: Ishaan Jaffer + title: CTO, LiteLLM + url: https://www.linkedin.com/in/reffajnaahsi/ + image_url: https://media.licdn.com/dms/image/v2/D4D03AQGiM7ZrUwqu_Q/profile-displayphoto-shrink_800_800/profile-displayphoto-shrink_800_800/0/1675971026692?e=1741824000&v=beta&t=eQnRdXPJo4eiINWTZARoYTfqh064pgZ-E21pQTSy8jc +tags: [llm translation, thinking, reasoning_content, claude-3-7-sonnet] +hide_table_of_contents: false +--- + +v1.63.0 fixes Anthropic 'thinking' response on streaming to return the `signature` block. [Github Issue](https://github.com/BerriAI/litellm/issues/8964) + + + +It also moves the response structure from `signature_delta` to `signature` to be the same as Anthropic. [Anthropic Docs](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#implementing-extended-thinking) + + +## Diff + +```bash +"message": { + ... + "reasoning_content": "The capital of France is Paris.", + "thinking_blocks": [ + { + "type": "thinking", + "thinking": "The capital of France is Paris.", +- "signature_delta": "EqoBCkgIARABGAIiQL2UoU0b1OHYi+..." # 👈 OLD FORMAT ++ "signature": "EqoBCkgIARABGAIiQL2UoU0b1OHYi+..." # 👈 KEY CHANGE + } + ] +} +``` diff --git a/docs/my-website/release_notes/v1.63.2-stable/index.md b/docs/my-website/release_notes/v1.63.2-stable/index.md new file mode 100644 index 0000000000..0c359452dc --- /dev/null +++ b/docs/my-website/release_notes/v1.63.2-stable/index.md @@ -0,0 +1,112 @@ +--- +title: v1.63.2-stable +slug: v1.63.2-stable +date: 2025-03-08T10:00:00 +authors: + - name: Krrish Dholakia + title: CEO, LiteLLM + url: https://www.linkedin.com/in/krish-d/ + image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI + - name: Ishaan Jaffer + title: CTO, LiteLLM + url: https://www.linkedin.com/in/reffajnaahsi/ + image_url: https://media.licdn.com/dms/image/v2/D4D03AQGiM7ZrUwqu_Q/profile-displayphoto-shrink_800_800/profile-displayphoto-shrink_800_800/0/1675971026692?e=1741824000&v=beta&t=eQnRdXPJo4eiINWTZARoYTfqh064pgZ-E21pQTSy8jc +tags: [llm translation, thinking, reasoning_content, claude-3-7-sonnet] +hide_table_of_contents: false +--- + +import Image from '@theme/IdealImage'; + + +These are the changes since `v1.61.20-stable`. + +This release is primarily focused on: +- LLM Translation improvements (more `thinking` content improvements) +- UI improvements (Error logs now shown on UI) + + +:::info + +This release will be live on 03/09/2025 + +::: + + + + +## Demo Instance + +Here's a Demo Instance to test changes: +- Instance: https://demo.litellm.ai/ +- Login Credentials: + - Username: admin + - Password: sk-1234 + + +## New Models / Updated Models + +1. Add `supports_pdf_input` for specific Bedrock Claude models [PR](https://github.com/BerriAI/litellm/commit/f63cf0030679fe1a43d03fb196e815a0f28dae92) +2. Add pricing for amazon `eu` models [PR](https://github.com/BerriAI/litellm/commits/main/model_prices_and_context_window.json) +3. Fix Azure O1 mini pricing [PR](https://github.com/BerriAI/litellm/commit/52de1949ef2f76b8572df751f9c868a016d4832c) + +## LLM Translation + + + +1. Support `/openai/` passthrough for Assistant endpoints. [Get Started](https://docs.litellm.ai/docs/pass_through/openai_passthrough) +2. Bedrock Claude - fix tool calling transformation on invoke route. [Get Started](../../docs/providers/bedrock#usage---function-calling--tool-calling) +3. Bedrock Claude - response_format support for claude on invoke route. [Get Started](../../docs/providers/bedrock#usage---structured-output--json-mode) +4. Bedrock - pass `description` if set in response_format. [Get Started](../../docs/providers/bedrock#usage---structured-output--json-mode) +5. Bedrock - Fix passing response_format: {"type": "text"}. [PR](https://github.com/BerriAI/litellm/commit/c84b489d5897755139aa7d4e9e54727ebe0fa540) +6. OpenAI - Handle sending image_url as str to openai. [Get Started](https://docs.litellm.ai/docs/completion/vision) +7. Deepseek - return 'reasoning_content' missing on streaming. [Get Started](https://docs.litellm.ai/docs/reasoning_content) +8. Caching - Support caching on reasoning content. [Get Started](https://docs.litellm.ai/docs/proxy/caching) +9. Bedrock - handle thinking blocks in assistant message. [Get Started](https://docs.litellm.ai/docs/providers/bedrock#usage---thinking--reasoning-content) +10. Anthropic - Return `signature` on streaming. [Get Started](https://docs.litellm.ai/docs/providers/bedrock#usage---thinking--reasoning-content) +- Note: We've also migrated from `signature_delta` to `signature`. [Read more](https://docs.litellm.ai/release_notes/v1.63.0) +11. Support format param for specifying image type. [Get Started](../../docs/completion/vision.md#explicitly-specify-image-type) +12. Anthropic - `/v1/messages` endpoint - `thinking` param support. [Get Started](../../docs/anthropic_unified.md) +- Note: this refactors the [BETA] unified `/v1/messages` endpoint, to just work for the Anthropic API. +13. Vertex AI - handle $id in response schema when calling vertex ai. [Get Started](https://docs.litellm.ai/docs/providers/vertex#json-schema) + +## Spend Tracking Improvements + +1. Batches API - Fix cost calculation to run on retrieve_batch. [Get Started](https://docs.litellm.ai/docs/batches) +2. Batches API - Log batch models in spend logs / standard logging payload. [Get Started](../../docs/proxy/logging_spec.md#standardlogginghiddenparams) + +## Management Endpoints / UI + + + +1. Virtual Keys Page + - Allow team/org filters to be searchable on the Create Key Page + - Add created_by and updated_by fields to Keys table + - Show 'user_email' on key table + - Show 100 Keys Per Page, Use full height, increase width of key alias +2. Logs Page + - Show Error Logs on LiteLLM UI + - Allow Internal Users to View their own logs +3. Internal Users Page + - Allow admin to control default model access for internal users +7. Fix session handling with cookies + +## Logging / Guardrail Integrations + +1. Fix prometheus metrics w/ custom metrics, when keys containing team_id make requests. [PR](https://github.com/BerriAI/litellm/pull/8935) + +## Performance / Loadbalancing / Reliability improvements + +1. Cooldowns - Support cooldowns on models called with client side credentials. [Get Started](https://docs.litellm.ai/docs/proxy/clientside_auth#pass-user-llm-api-keys--api-base) +2. Tag-based Routing - ensures tag-based routing across all endpoints (`/embeddings`, `/image_generation`, etc.). [Get Started](https://docs.litellm.ai/docs/proxy/tag_routing) + +## General Proxy Improvements + +1. Raise BadRequestError when unknown model passed in request +2. Enforce model access restrictions on Azure OpenAI proxy route +3. Reliability fix - Handle emoji’s in text - fix orjson error +4. Model Access Patch - don't overwrite litellm.anthropic_models when running auth checks +5. Enable setting timezone information in docker image + +## Complete Git Diff + +[Here's the complete git diff](https://github.com/BerriAI/litellm/compare/v1.61.20-stable...v1.63.2-stable) \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 8315537a17..385983fd33 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -46,6 +46,7 @@ const sidebars = { "proxy/health", "proxy/debugging", "proxy/spending_monitoring", + "proxy/master_key_rotations", ], }, "proxy/demo", @@ -257,17 +258,23 @@ const sidebars = { "completion/batching", "completion/mock_requests", "completion/reliable_completions", - 'tutorials/litellm_proxy_aporia', ] }, { type: "category", label: "Supported Endpoints", + link: { + type: "generated-index", + title: "Supported Endpoints", + description: + "Learn how to deploy + call models from different providers on LiteLLM", + slug: "/supported_endpoints", + }, items: [ { type: "category", - label: "Chat", + label: "/chat/completions", link: { type: "generated-index", title: "Chat Completions", @@ -280,11 +287,13 @@ const sidebars = { "completion/usage", ], }, + "response_api", "text_completion", "embedding/supported_embedding", + "anthropic_unified", { type: "category", - label: "Image", + label: "/images", items: [ "image_generation", "image_variations", @@ -292,7 +301,7 @@ const sidebars = { }, { type: "category", - label: "Audio", + label: "/audio", "items": [ "audio_transcription", "text_to_speech", @@ -351,23 +360,6 @@ const sidebars = { label: "LangChain, LlamaIndex, Instructor Integration", items: ["langchain/langchain", "tutorials/instructor"], }, - { - type: "category", - label: "Tutorials", - items: [ - - 'tutorials/azure_openai', - 'tutorials/instructor', - "tutorials/gradio_integration", - "tutorials/huggingface_codellama", - "tutorials/huggingface_tutorial", - "tutorials/TogetherAI_liteLLM", - "tutorials/finetuned_chat_gpt", - "tutorials/text_completion", - "tutorials/first_playground", - "tutorials/model_fallbacks", - ], - }, ], }, { @@ -384,13 +376,6 @@ const sidebars = { "load_test_rpm", ] }, - { - type: "category", - label: "Adding Providers", - items: [ - "adding_provider/directory_structure", - "adding_provider/new_rerank_provider"], - }, { type: "category", label: "Logging & Observability", @@ -425,12 +410,51 @@ const sidebars = { "observability/opik_integration", ], }, + { + type: "category", + label: "Tutorials", + items: [ + "tutorials/openweb_ui", + 'tutorials/litellm_proxy_aporia', + { + type: "category", + label: "LiteLLM Python SDK Tutorials", + items: [ + 'tutorials/azure_openai', + 'tutorials/instructor', + "tutorials/gradio_integration", + "tutorials/huggingface_codellama", + "tutorials/huggingface_tutorial", + "tutorials/TogetherAI_liteLLM", + "tutorials/finetuned_chat_gpt", + "tutorials/text_completion", + "tutorials/first_playground", + "tutorials/model_fallbacks", + ], + }, + ] + }, + { + type: "category", + label: "Contributing", + items: [ + "extras/contributing_code", + { + type: "category", + label: "Adding Providers", + items: [ + "adding_provider/directory_structure", + "adding_provider/new_rerank_provider"], + }, + "extras/contributing", + "contributing", + ] + }, { type: "category", label: "Extras", items: [ - "extras/contributing", "data_security", "data_retention", "migration_policy", @@ -447,6 +471,7 @@ const sidebars = { items: [ "projects/smolagents", "projects/Docq.AI", + "projects/PDL", "projects/OpenInterpreter", "projects/Elroy", "projects/dbally", @@ -462,9 +487,9 @@ const sidebars = { "projects/YiVal", "projects/LiteLLM Proxy", "projects/llm_cord", + "projects/pgai", ], }, - "contributing", "proxy/pii_masking", "extras/code_quality", "rules", diff --git a/enterprise/enterprise_hooks/aporia_ai.py b/enterprise/enterprise_hooks/aporia_ai.py index d258f00233..2b427bea5c 100644 --- a/enterprise/enterprise_hooks/aporia_ai.py +++ b/enterprise/enterprise_hooks/aporia_ai.py @@ -163,7 +163,7 @@ class AporiaGuardrail(CustomGuardrail): pass - async def async_moderation_hook( ### 👈 KEY CHANGE ### + async def async_moderation_hook( self, data: dict, user_api_key_dict: UserAPIKeyAuth, @@ -173,6 +173,7 @@ class AporiaGuardrail(CustomGuardrail): "image_generation", "moderation", "audio_transcription", + "responses", ], ): from litellm.proxy.common_utils.callback_utils import ( diff --git a/enterprise/enterprise_hooks/google_text_moderation.py b/enterprise/enterprise_hooks/google_text_moderation.py index af5ea35987..fe26a03207 100644 --- a/enterprise/enterprise_hooks/google_text_moderation.py +++ b/enterprise/enterprise_hooks/google_text_moderation.py @@ -94,6 +94,7 @@ class _ENTERPRISE_GoogleTextModeration(CustomLogger): "image_generation", "moderation", "audio_transcription", + "responses", ], ): """ diff --git a/enterprise/enterprise_hooks/llama_guard.py b/enterprise/enterprise_hooks/llama_guard.py index 8abbc996d3..2c53fafa5b 100644 --- a/enterprise/enterprise_hooks/llama_guard.py +++ b/enterprise/enterprise_hooks/llama_guard.py @@ -107,6 +107,7 @@ class _ENTERPRISE_LlamaGuard(CustomLogger): "image_generation", "moderation", "audio_transcription", + "responses", ], ): """ diff --git a/enterprise/enterprise_hooks/llm_guard.py b/enterprise/enterprise_hooks/llm_guard.py index 1b639b8a08..078b8e216e 100644 --- a/enterprise/enterprise_hooks/llm_guard.py +++ b/enterprise/enterprise_hooks/llm_guard.py @@ -126,6 +126,7 @@ class _ENTERPRISE_LLMGuard(CustomLogger): "image_generation", "moderation", "audio_transcription", + "responses", ], ): """ diff --git a/enterprise/enterprise_hooks/openai_moderation.py b/enterprise/enterprise_hooks/openai_moderation.py index 47506a00c4..1db932c853 100644 --- a/enterprise/enterprise_hooks/openai_moderation.py +++ b/enterprise/enterprise_hooks/openai_moderation.py @@ -31,7 +31,7 @@ class _ENTERPRISE_OpenAI_Moderation(CustomLogger): #### CALL HOOKS - proxy only #### - async def async_moderation_hook( ### 👈 KEY CHANGE ### + async def async_moderation_hook( self, data: dict, user_api_key_dict: UserAPIKeyAuth, @@ -41,6 +41,7 @@ class _ENTERPRISE_OpenAI_Moderation(CustomLogger): "image_generation", "moderation", "audio_transcription", + "responses", ], ): text = "" diff --git a/litellm/__init__.py b/litellm/__init__.py index 86c75d8b14..914796e269 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -8,12 +8,14 @@ import os from typing import Callable, List, Optional, Dict, Union, Any, Literal, get_args from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.caching.caching import Cache, DualCache, RedisCache, InMemoryCache +from litellm.caching.llm_caching_handler import LLMClientCache from litellm.types.llms.bedrock import COHERE_EMBEDDING_INPUT_TYPES from litellm.types.utils import ( ImageObject, BudgetConfig, all_litellm_params, all_litellm_params as _litellm_completion_params, + CredentialItem, ) # maintain backwards compatibility for root param from litellm._logging import ( set_verbose, @@ -190,15 +192,17 @@ ssl_verify: Union[str, bool] = True ssl_certificate: Optional[str] = None disable_streaming_logging: bool = False disable_add_transform_inline_image_block: bool = False -in_memory_llm_clients_cache: InMemoryCache = InMemoryCache() +in_memory_llm_clients_cache: LLMClientCache = LLMClientCache() safe_memory_mode: bool = False enable_azure_ad_token_refresh: Optional[bool] = False ### DEFAULT AZURE API VERSION ### -AZURE_DEFAULT_API_VERSION = "2024-08-01-preview" # this is updated to the latest +AZURE_DEFAULT_API_VERSION = "2025-02-01-preview" # this is updated to the latest ### DEFAULT WATSONX API VERSION ### WATSONX_DEFAULT_API_VERSION = "2024-03-13" ### COHERE EMBEDDINGS DEFAULT TYPE ### COHERE_DEFAULT_EMBEDDING_INPUT_TYPE: COHERE_EMBEDDING_INPUT_TYPES = "search_document" +### CREDENTIALS ### +credential_list: List[CredentialItem] = [] ### GUARDRAILS ### llamaguard_model_name: Optional[str] = None openai_moderations_model_name: Optional[str] = None @@ -278,8 +282,6 @@ disable_end_user_cost_tracking_prometheus_only: Optional[bool] = None custom_prometheus_metadata_labels: List[str] = [] #### REQUEST PRIORITIZATION #### priority_reservation: Optional[Dict[str, float]] = None - - force_ipv4: bool = ( False # when True, litellm will force ipv4 for all LLM requests. Some users have seen httpx ConnectionError when using ipv6. ) @@ -807,9 +809,6 @@ from .llms.oobabooga.chat.transformation import OobaboogaConfig from .llms.maritalk import MaritalkConfig from .llms.openrouter.chat.transformation import OpenrouterConfig from .llms.anthropic.chat.transformation import AnthropicConfig -from .llms.anthropic.experimental_pass_through.transformation import ( - AnthropicExperimentalPassThroughConfig, -) from .llms.groq.stt.transformation import GroqSTTConfig from .llms.anthropic.completion.transformation import AnthropicTextConfig from .llms.triton.completion.transformation import TritonConfig @@ -829,6 +828,9 @@ from .llms.infinity.rerank.transformation import InfinityRerankConfig from .llms.jina_ai.rerank.transformation import JinaAIRerankConfig from .llms.clarifai.chat.transformation import ClarifaiConfig from .llms.ai21.chat.transformation import AI21ChatConfig, AI21ChatConfig as AI21Config +from .llms.anthropic.experimental_pass_through.messages.transformation import ( + AnthropicMessagesConfig, +) from .llms.together_ai.chat import TogetherAIConfig from .llms.together_ai.completion.transformation import TogetherAITextCompletionConfig from .llms.cloudflare.chat.transformation import CloudflareChatConfig @@ -909,6 +911,7 @@ from .llms.bedrock.chat.invoke_transformations.base_invoke_transformation import from .llms.bedrock.image.amazon_stability1_transformation import AmazonStabilityConfig from .llms.bedrock.image.amazon_stability3_transformation import AmazonStability3Config +from .llms.bedrock.image.amazon_nova_canvas_transformation import AmazonNovaCanvasConfig from .llms.bedrock.embed.amazon_titan_g1_transformation import AmazonTitanG1Config from .llms.bedrock.embed.amazon_titan_multimodal_transformation import ( AmazonTitanMultimodalEmbeddingG1Config, @@ -931,6 +934,7 @@ from .llms.groq.chat.transformation import GroqChatConfig from .llms.voyage.embedding.transformation import VoyageEmbeddingConfig from .llms.azure_ai.chat.transformation import AzureAIStudioConfig from .llms.mistral.mistral_chat_transformation import MistralConfig +from .llms.openai.responses.transformation import OpenAIResponsesAPIConfig from .llms.openai.chat.o_series_transformation import ( OpenAIOSeriesConfig as OpenAIO1Config, # maintain backwards compatibility OpenAIOSeriesConfig, @@ -1021,6 +1025,8 @@ from .assistants.main import * from .batches.main import * from .batch_completion.main import * # type: ignore from .rerank_api.main import * +from .llms.anthropic.experimental_pass_through.messages.handler import * +from .responses.main import * from .realtime_api.main import _arealtime from .fine_tuning.main import * from .files.main import * diff --git a/litellm/adapters/anthropic_adapter.py b/litellm/adapters/anthropic_adapter.py deleted file mode 100644 index 961bc77527..0000000000 --- a/litellm/adapters/anthropic_adapter.py +++ /dev/null @@ -1,186 +0,0 @@ -# What is this? -## Translates OpenAI call to Anthropic `/v1/messages` format -import traceback -from typing import Any, Optional - -import litellm -from litellm import ChatCompletionRequest, verbose_logger -from litellm.integrations.custom_logger import CustomLogger -from litellm.types.llms.anthropic import AnthropicMessagesRequest, AnthropicResponse -from litellm.types.utils import AdapterCompletionStreamWrapper, ModelResponse - - -class AnthropicAdapter(CustomLogger): - def __init__(self) -> None: - super().__init__() - - def translate_completion_input_params( - self, kwargs - ) -> Optional[ChatCompletionRequest]: - """ - - translate params, where needed - - pass rest, as is - """ - request_body = AnthropicMessagesRequest(**kwargs) # type: ignore - - translated_body = litellm.AnthropicExperimentalPassThroughConfig().translate_anthropic_to_openai( - anthropic_message_request=request_body - ) - - return translated_body - - def translate_completion_output_params( - self, response: ModelResponse - ) -> Optional[AnthropicResponse]: - - return litellm.AnthropicExperimentalPassThroughConfig().translate_openai_response_to_anthropic( - response=response - ) - - def translate_completion_output_params_streaming( - self, completion_stream: Any - ) -> AdapterCompletionStreamWrapper | None: - return AnthropicStreamWrapper(completion_stream=completion_stream) - - -anthropic_adapter = AnthropicAdapter() - - -class AnthropicStreamWrapper(AdapterCompletionStreamWrapper): - """ - - first chunk return 'message_start' - - content block must be started and stopped - - finish_reason must map exactly to anthropic reason, else anthropic client won't be able to parse it. - """ - - sent_first_chunk: bool = False - sent_content_block_start: bool = False - sent_content_block_finish: bool = False - sent_last_message: bool = False - holding_chunk: Optional[Any] = None - - def __next__(self): - try: - if self.sent_first_chunk is False: - self.sent_first_chunk = True - return { - "type": "message_start", - "message": { - "id": "msg_1nZdL29xx5MUA1yADyHTEsnR8uuvGzszyY", - "type": "message", - "role": "assistant", - "content": [], - "model": "claude-3-5-sonnet-20240620", - "stop_reason": None, - "stop_sequence": None, - "usage": {"input_tokens": 25, "output_tokens": 1}, - }, - } - if self.sent_content_block_start is False: - self.sent_content_block_start = True - return { - "type": "content_block_start", - "index": 0, - "content_block": {"type": "text", "text": ""}, - } - - for chunk in self.completion_stream: - if chunk == "None" or chunk is None: - raise Exception - - processed_chunk = litellm.AnthropicExperimentalPassThroughConfig().translate_streaming_openai_response_to_anthropic( - response=chunk - ) - if ( - processed_chunk["type"] == "message_delta" - and self.sent_content_block_finish is False - ): - self.holding_chunk = processed_chunk - self.sent_content_block_finish = True - return { - "type": "content_block_stop", - "index": 0, - } - elif self.holding_chunk is not None: - return_chunk = self.holding_chunk - self.holding_chunk = processed_chunk - return return_chunk - else: - return processed_chunk - if self.holding_chunk is not None: - return_chunk = self.holding_chunk - self.holding_chunk = None - return return_chunk - if self.sent_last_message is False: - self.sent_last_message = True - return {"type": "message_stop"} - raise StopIteration - except StopIteration: - if self.sent_last_message is False: - self.sent_last_message = True - return {"type": "message_stop"} - raise StopIteration - except Exception as e: - verbose_logger.error( - "Anthropic Adapter - {}\n{}".format(e, traceback.format_exc()) - ) - - async def __anext__(self): - try: - if self.sent_first_chunk is False: - self.sent_first_chunk = True - return { - "type": "message_start", - "message": { - "id": "msg_1nZdL29xx5MUA1yADyHTEsnR8uuvGzszyY", - "type": "message", - "role": "assistant", - "content": [], - "model": "claude-3-5-sonnet-20240620", - "stop_reason": None, - "stop_sequence": None, - "usage": {"input_tokens": 25, "output_tokens": 1}, - }, - } - if self.sent_content_block_start is False: - self.sent_content_block_start = True - return { - "type": "content_block_start", - "index": 0, - "content_block": {"type": "text", "text": ""}, - } - async for chunk in self.completion_stream: - if chunk == "None" or chunk is None: - raise Exception - processed_chunk = litellm.AnthropicExperimentalPassThroughConfig().translate_streaming_openai_response_to_anthropic( - response=chunk - ) - if ( - processed_chunk["type"] == "message_delta" - and self.sent_content_block_finish is False - ): - self.holding_chunk = processed_chunk - self.sent_content_block_finish = True - return { - "type": "content_block_stop", - "index": 0, - } - elif self.holding_chunk is not None: - return_chunk = self.holding_chunk - self.holding_chunk = processed_chunk - return return_chunk - else: - return processed_chunk - if self.holding_chunk is not None: - return_chunk = self.holding_chunk - self.holding_chunk = None - return return_chunk - if self.sent_last_message is False: - self.sent_last_message = True - return {"type": "message_stop"} - raise StopIteration - except StopIteration: - if self.sent_last_message is False: - self.sent_last_message = True - return {"type": "message_stop"} - raise StopAsyncIteration diff --git a/litellm/assistants/main.py b/litellm/assistants/main.py index acb37b1e6f..28f4518f15 100644 --- a/litellm/assistants/main.py +++ b/litellm/assistants/main.py @@ -15,6 +15,7 @@ import litellm from litellm.types.router import GenericLiteLLMParams from litellm.utils import ( exception_type, + get_litellm_params, get_llm_provider, get_secret, supports_httpx_timeout, @@ -86,6 +87,7 @@ def get_assistants( optional_params = GenericLiteLLMParams( api_key=api_key, api_base=api_base, api_version=api_version, **kwargs ) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -169,6 +171,7 @@ def get_assistants( max_retries=optional_params.max_retries, client=client, aget_assistants=aget_assistants, # type: ignore + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -270,6 +273,7 @@ def create_assistants( optional_params = GenericLiteLLMParams( api_key=api_key, api_base=api_base, api_version=api_version, **kwargs ) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -371,6 +375,7 @@ def create_assistants( client=client, async_create_assistants=async_create_assistants, create_assistant_data=create_assistant_data, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -445,6 +450,8 @@ def delete_assistant( api_key=api_key, api_base=api_base, api_version=api_version, **kwargs ) + litellm_params_dict = get_litellm_params(**kwargs) + async_delete_assistants: Optional[bool] = kwargs.pop( "async_delete_assistants", None ) @@ -544,6 +551,7 @@ def delete_assistant( max_retries=optional_params.max_retries, client=client, async_delete_assistants=async_delete_assistants, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -639,6 +647,7 @@ def create_thread( """ acreate_thread = kwargs.get("acreate_thread", None) optional_params = GenericLiteLLMParams(**kwargs) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -731,6 +740,7 @@ def create_thread( max_retries=optional_params.max_retries, client=client, acreate_thread=acreate_thread, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -795,7 +805,7 @@ def get_thread( """Get the thread object, given a thread_id""" aget_thread = kwargs.pop("aget_thread", None) optional_params = GenericLiteLLMParams(**kwargs) - + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 # set timeout for 10 minutes by default @@ -884,6 +894,7 @@ def get_thread( max_retries=optional_params.max_retries, client=client, aget_thread=aget_thread, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -972,6 +983,7 @@ def add_message( _message_data = MessageData( role=role, content=content, attachments=attachments, metadata=metadata ) + litellm_params_dict = get_litellm_params(**kwargs) optional_params = GenericLiteLLMParams(**kwargs) message_data = get_optional_params_add_message( @@ -1068,6 +1080,7 @@ def add_message( max_retries=optional_params.max_retries, client=client, a_add_message=a_add_message, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -1139,6 +1152,7 @@ def get_messages( ) -> SyncCursorPage[OpenAIMessage]: aget_messages = kwargs.pop("aget_messages", None) optional_params = GenericLiteLLMParams(**kwargs) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -1225,6 +1239,7 @@ def get_messages( max_retries=optional_params.max_retries, client=client, aget_messages=aget_messages, + litellm_params=litellm_params_dict, ) else: raise litellm.exceptions.BadRequestError( @@ -1337,6 +1352,7 @@ def run_thread( """Run a given thread + assistant.""" arun_thread = kwargs.pop("arun_thread", None) optional_params = GenericLiteLLMParams(**kwargs) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -1437,6 +1453,7 @@ def run_thread( max_retries=optional_params.max_retries, client=client, arun_thread=arun_thread, + litellm_params=litellm_params_dict, ) # type: ignore else: raise litellm.exceptions.BadRequestError( diff --git a/litellm/batches/batch_utils.py b/litellm/batches/batch_utils.py index f24eda0432..af53304e5a 100644 --- a/litellm/batches/batch_utils.py +++ b/litellm/batches/batch_utils.py @@ -1,76 +1,16 @@ -import asyncio -import datetime import json -import threading -from typing import Any, List, Literal, Optional +from typing import Any, List, Literal, Tuple import litellm from litellm._logging import verbose_logger -from litellm.constants import ( - BATCH_STATUS_POLL_INTERVAL_SECONDS, - BATCH_STATUS_POLL_MAX_ATTEMPTS, -) -from litellm.files.main import afile_content -from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.types.llms.openai import Batch -from litellm.types.utils import StandardLoggingPayload, Usage - - -async def batches_async_logging( - batch_id: str, - custom_llm_provider: Literal["openai", "azure", "vertex_ai"] = "openai", - logging_obj: Optional[LiteLLMLoggingObj] = None, - **kwargs, -): - """ - Async Job waits for the batch to complete and then logs the completed batch usage - cost, total tokens, prompt tokens, completion tokens - - - Polls retrieve_batch until it returns a batch with status "completed" or "failed" - """ - from .main import aretrieve_batch - - verbose_logger.debug( - ".....in _batches_async_logging... polling retrieve to get batch status" - ) - if logging_obj is None: - raise ValueError( - "logging_obj is None cannot calculate cost / log batch creation event" - ) - for _ in range(BATCH_STATUS_POLL_MAX_ATTEMPTS): - try: - start_time = datetime.datetime.now() - batch: Batch = await aretrieve_batch(batch_id, custom_llm_provider) - verbose_logger.debug( - "in _batches_async_logging... batch status= %s", batch.status - ) - - if batch.status == "completed": - end_time = datetime.datetime.now() - await _handle_completed_batch( - batch=batch, - custom_llm_provider=custom_llm_provider, - logging_obj=logging_obj, - start_time=start_time, - end_time=end_time, - **kwargs, - ) - break - elif batch.status == "failed": - pass - except Exception as e: - verbose_logger.error("error in batches_async_logging", e) - await asyncio.sleep(BATCH_STATUS_POLL_INTERVAL_SECONDS) +from litellm.types.utils import CallTypes, Usage async def _handle_completed_batch( batch: Batch, custom_llm_provider: Literal["openai", "azure", "vertex_ai"], - logging_obj: LiteLLMLoggingObj, - start_time: datetime.datetime, - end_time: datetime.datetime, - **kwargs, -) -> None: +) -> Tuple[float, Usage, List[str]]: """Helper function to process a completed batch and handle logging""" # Get batch results file_content_dictionary = await _get_batch_output_file_content_as_dictionary( @@ -87,52 +27,25 @@ async def _handle_completed_batch( custom_llm_provider=custom_llm_provider, ) - # Handle logging - await _log_completed_batch( - logging_obj=logging_obj, - batch_usage=batch_usage, - batch_cost=batch_cost, - start_time=start_time, - end_time=end_time, - **kwargs, - ) + batch_models = _get_batch_models_from_file_content(file_content_dictionary) + + return batch_cost, batch_usage, batch_models -async def _log_completed_batch( - logging_obj: LiteLLMLoggingObj, - batch_usage: Usage, - batch_cost: float, - start_time: datetime.datetime, - end_time: datetime.datetime, - **kwargs, -) -> None: - """Helper function to handle all logging operations for a completed batch""" - logging_obj.call_type = "batch_success" - - standard_logging_object = _create_standard_logging_object_for_completed_batch( - kwargs=kwargs, - start_time=start_time, - end_time=end_time, - logging_obj=logging_obj, - batch_usage_object=batch_usage, - response_cost=batch_cost, - ) - - logging_obj.model_call_details["standard_logging_object"] = standard_logging_object - - # Launch async and sync logging handlers - asyncio.create_task( - logging_obj.async_success_handler( - result=None, - start_time=start_time, - end_time=end_time, - cache_hit=None, - ) - ) - threading.Thread( - target=logging_obj.success_handler, - args=(None, start_time, end_time), - ).start() +def _get_batch_models_from_file_content( + file_content_dictionary: List[dict], +) -> List[str]: + """ + Get the models from the file content + """ + batch_models = [] + for _item in file_content_dictionary: + if _batch_response_was_successful(_item): + _response_body = _get_response_from_batch_job_output_file(_item) + _model = _response_body.get("model") + if _model: + batch_models.append(_model) + return batch_models async def _batch_cost_calculator( @@ -159,6 +72,8 @@ async def _get_batch_output_file_content_as_dictionary( """ Get the batch output file content as a list of dictionaries """ + from litellm.files.main import afile_content + if custom_llm_provider == "vertex_ai": raise ValueError("Vertex AI does not support file content retrieval") @@ -208,6 +123,7 @@ def _get_batch_job_cost_from_file_content( total_cost += litellm.completion_cost( completion_response=_response_body, custom_llm_provider=custom_llm_provider, + call_type=CallTypes.aretrieve_batch.value, ) verbose_logger.debug("total_cost=%s", total_cost) return total_cost @@ -264,30 +180,3 @@ def _batch_response_was_successful(batch_job_output_file: dict) -> bool: """ _response: dict = batch_job_output_file.get("response", None) or {} return _response.get("status_code", None) == 200 - - -def _create_standard_logging_object_for_completed_batch( - kwargs: dict, - start_time: datetime.datetime, - end_time: datetime.datetime, - logging_obj: LiteLLMLoggingObj, - batch_usage_object: Usage, - response_cost: float, -) -> StandardLoggingPayload: - """ - Create a standard logging object for a completed batch - """ - standard_logging_object = logging_obj.model_call_details.get( - "standard_logging_object", None - ) - - if standard_logging_object is None: - raise ValueError("unable to create standard logging object for completed batch") - - # Add Completed Batch Job Usage and Response Cost - standard_logging_object["call_type"] = "batch_success" - standard_logging_object["response_cost"] = response_cost - standard_logging_object["total_tokens"] = batch_usage_object.total_tokens - standard_logging_object["prompt_tokens"] = batch_usage_object.prompt_tokens - standard_logging_object["completion_tokens"] = batch_usage_object.completion_tokens - return standard_logging_object diff --git a/litellm/batches/main.py b/litellm/batches/main.py index 32428c9c18..1ddcafce4c 100644 --- a/litellm/batches/main.py +++ b/litellm/batches/main.py @@ -31,10 +31,9 @@ from litellm.types.llms.openai import ( RetrieveBatchRequest, ) from litellm.types.router import GenericLiteLLMParams +from litellm.types.utils import LiteLLMBatch from litellm.utils import client, get_litellm_params, supports_httpx_timeout -from .batch_utils import batches_async_logging - ####### ENVIRONMENT VARIABLES ################### openai_batches_instance = OpenAIBatchesAPI() azure_batches_instance = AzureBatchesAPI() @@ -85,17 +84,6 @@ async def acreate_batch( else: response = init_response - # Start async logging job - if response is not None: - asyncio.create_task( - batches_async_logging( - logging_obj=kwargs.get("litellm_logging_obj", None), - batch_id=response.id, - custom_llm_provider=custom_llm_provider, - **kwargs, - ) - ) - return response except Exception as e: raise e @@ -111,7 +99,7 @@ def create_batch( extra_headers: Optional[Dict[str, str]] = None, extra_body: Optional[Dict[str, str]] = None, **kwargs, -) -> Union[Batch, Coroutine[Any, Any, Batch]]: +) -> Union[LiteLLMBatch, Coroutine[Any, Any, LiteLLMBatch]]: """ Creates and executes a batch from an uploaded file of request @@ -119,21 +107,27 @@ def create_batch( """ try: optional_params = GenericLiteLLMParams(**kwargs) + litellm_call_id = kwargs.get("litellm_call_id", None) + proxy_server_request = kwargs.get("proxy_server_request", None) + model_info = kwargs.get("model_info", None) _is_async = kwargs.pop("acreate_batch", False) is True + litellm_params = get_litellm_params(**kwargs) litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj", None) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 - litellm_params = get_litellm_params( - custom_llm_provider=custom_llm_provider, - litellm_call_id=kwargs.get("litellm_call_id", None), - litellm_trace_id=kwargs.get("litellm_trace_id"), - litellm_metadata=kwargs.get("litellm_metadata"), - ) litellm_logging_obj.update_environment_variables( model=None, user=None, optional_params=optional_params.model_dump(), - litellm_params=litellm_params, + litellm_params={ + "litellm_call_id": litellm_call_id, + "proxy_server_request": proxy_server_request, + "model_info": model_info, + "metadata": metadata, + "preset_cache_key": None, + "stream_response": {}, + **optional_params.model_dump(exclude_unset=True), + }, custom_llm_provider=custom_llm_provider, ) @@ -224,6 +218,7 @@ def create_batch( timeout=timeout, max_retries=optional_params.max_retries, create_batch_data=_create_batch_request, + litellm_params=litellm_params, ) elif custom_llm_provider == "vertex_ai": api_base = optional_params.api_base or "" @@ -261,7 +256,7 @@ def create_batch( response=httpx.Response( status_code=400, content="Unsupported provider", - request=httpx.Request(method="create_thread", url="https://github.com/BerriAI/litellm"), # type: ignore + request=httpx.Request(method="create_batch", url="https://github.com/BerriAI/litellm"), # type: ignore ), ) return response @@ -269,6 +264,7 @@ def create_batch( raise e +@client async def aretrieve_batch( batch_id: str, custom_llm_provider: Literal["openai", "azure", "vertex_ai"] = "openai", @@ -276,7 +272,7 @@ async def aretrieve_batch( extra_headers: Optional[Dict[str, str]] = None, extra_body: Optional[Dict[str, str]] = None, **kwargs, -) -> Batch: +) -> LiteLLMBatch: """ Async: Retrieves a batch. @@ -310,6 +306,7 @@ async def aretrieve_batch( raise e +@client def retrieve_batch( batch_id: str, custom_llm_provider: Literal["openai", "azure", "vertex_ai"] = "openai", @@ -317,7 +314,7 @@ def retrieve_batch( extra_headers: Optional[Dict[str, str]] = None, extra_body: Optional[Dict[str, str]] = None, **kwargs, -) -> Union[Batch, Coroutine[Any, Any, Batch]]: +) -> Union[LiteLLMBatch, Coroutine[Any, Any, LiteLLMBatch]]: """ Retrieves a batch. @@ -325,9 +322,20 @@ def retrieve_batch( """ try: optional_params = GenericLiteLLMParams(**kwargs) + litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj", None) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 - # set timeout for 10 minutes by default + litellm_params = get_litellm_params( + custom_llm_provider=custom_llm_provider, + **kwargs, + ) + litellm_logging_obj.update_environment_variables( + model=None, + user=None, + optional_params=optional_params.model_dump(), + litellm_params=litellm_params, + custom_llm_provider=custom_llm_provider, + ) if ( timeout is not None @@ -415,6 +423,7 @@ def retrieve_batch( timeout=timeout, max_retries=optional_params.max_retries, retrieve_batch_data=_retrieve_batch_request, + litellm_params=litellm_params, ) elif custom_llm_provider == "vertex_ai": api_base = optional_params.api_base or "" @@ -517,6 +526,10 @@ def list_batches( try: # set API KEY optional_params = GenericLiteLLMParams(**kwargs) + litellm_params = get_litellm_params( + custom_llm_provider=custom_llm_provider, + **kwargs, + ) api_key = ( optional_params.api_key or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there @@ -594,6 +607,7 @@ def list_batches( api_version=api_version, timeout=timeout, max_retries=optional_params.max_retries, + litellm_params=litellm_params, ) else: raise litellm.exceptions.BadRequestError( @@ -669,6 +683,10 @@ def cancel_batch( """ try: optional_params = GenericLiteLLMParams(**kwargs) + litellm_params = get_litellm_params( + custom_llm_provider=custom_llm_provider, + **kwargs, + ) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 # set timeout for 10 minutes by default @@ -756,6 +774,7 @@ def cancel_batch( timeout=timeout, max_retries=optional_params.max_retries, cancel_batch_data=_cancel_batch_request, + litellm_params=litellm_params, ) else: raise litellm.exceptions.BadRequestError( diff --git a/litellm/caching/caching_handler.py b/litellm/caching/caching_handler.py index 40c1001732..2a958c9eee 100644 --- a/litellm/caching/caching_handler.py +++ b/litellm/caching/caching_handler.py @@ -247,7 +247,6 @@ class LLMCachingHandler: pass else: call_type = original_function.__name__ - cached_result = self._convert_cached_result_to_model_response( cached_result=cached_result, call_type=call_type, @@ -725,6 +724,7 @@ class LLMCachingHandler: """ Sync internal method to add the result to the cache """ + new_kwargs = kwargs.copy() new_kwargs.update( convert_args_to_kwargs( @@ -738,6 +738,7 @@ class LLMCachingHandler: if self._should_store_result_in_cache( original_function=self.original_function, kwargs=new_kwargs ): + litellm.cache.add_cache(result, **new_kwargs) return diff --git a/litellm/caching/llm_caching_handler.py b/litellm/caching/llm_caching_handler.py new file mode 100644 index 0000000000..429634b7b1 --- /dev/null +++ b/litellm/caching/llm_caching_handler.py @@ -0,0 +1,40 @@ +""" +Add the event loop to the cache key, to prevent event loop closed errors. +""" + +import asyncio + +from .in_memory_cache import InMemoryCache + + +class LLMClientCache(InMemoryCache): + + def update_cache_key_with_event_loop(self, key): + """ + Add the event loop to the cache key, to prevent event loop closed errors. + If none, use the key as is. + """ + try: + event_loop = asyncio.get_event_loop() + stringified_event_loop = str(id(event_loop)) + return f"{key}-{stringified_event_loop}" + except Exception: # handle no current event loop + return key + + def set_cache(self, key, value, **kwargs): + key = self.update_cache_key_with_event_loop(key) + return super().set_cache(key, value, **kwargs) + + async def async_set_cache(self, key, value, **kwargs): + key = self.update_cache_key_with_event_loop(key) + return await super().async_set_cache(key, value, **kwargs) + + def get_cache(self, key, **kwargs): + key = self.update_cache_key_with_event_loop(key) + + return super().get_cache(key, **kwargs) + + async def async_get_cache(self, key, **kwargs): + key = self.update_cache_key_with_event_loop(key) + + return await super().async_get_cache(key, **kwargs) diff --git a/litellm/constants.py b/litellm/constants.py index 0288c45e40..b4551a78f5 100644 --- a/litellm/constants.py +++ b/litellm/constants.py @@ -18,6 +18,7 @@ SINGLE_DEPLOYMENT_TRAFFIC_FAILURE_THRESHOLD = 1000 # Minimum number of requests REPEATED_STREAMING_CHUNK_LIMIT = 100 # catch if model starts looping the same chunk while streaming. Uses high default to prevent false positives. #### Networking settings #### request_timeout: float = 6000 # time in seconds +STREAM_SSE_DONE_STRING: str = "[DONE]" LITELLM_CHAT_PROVIDERS = [ "openai", diff --git a/litellm/cost_calculator.py b/litellm/cost_calculator.py index 07676d8a83..58600ea14f 100644 --- a/litellm/cost_calculator.py +++ b/litellm/cost_calculator.py @@ -44,7 +44,12 @@ from litellm.llms.vertex_ai.cost_calculator import cost_router as google_cost_ro from litellm.llms.vertex_ai.image_generation.cost_calculator import ( cost_calculator as vertex_ai_image_cost_calculator, ) -from litellm.types.llms.openai import HttpxBinaryResponseContent +from litellm.responses.utils import ResponseAPILoggingUtils +from litellm.types.llms.openai import ( + HttpxBinaryResponseContent, + ResponseAPIUsage, + ResponsesAPIResponse, +) from litellm.types.rerank import RerankBilledUnits, RerankResponse from litellm.types.utils import ( CallTypesLiteral, @@ -239,6 +244,15 @@ def cost_per_token( # noqa: PLR0915 custom_llm_provider=custom_llm_provider, billed_units=rerank_billed_units, ) + elif ( + call_type == "aretrieve_batch" + or call_type == "retrieve_batch" + or call_type == CallTypes.aretrieve_batch + or call_type == CallTypes.retrieve_batch + ): + return batch_cost_calculator( + usage=usage_block, model=model, custom_llm_provider=custom_llm_provider + ) elif call_type == "atranscription" or call_type == "transcription": return openai_cost_per_second( model=model, @@ -399,9 +413,12 @@ def _select_model_name_for_cost_calc( if base_model is not None: return_model = base_model - completion_response_model: Optional[str] = getattr( - completion_response, "model", None - ) + completion_response_model: Optional[str] = None + if completion_response is not None: + if isinstance(completion_response, BaseModel): + completion_response_model = getattr(completion_response, "model", None) + elif isinstance(completion_response, dict): + completion_response_model = completion_response.get("model", None) hidden_params: Optional[dict] = getattr(completion_response, "_hidden_params", None) if completion_response_model is None and hidden_params is not None: if ( @@ -452,6 +469,13 @@ def _get_usage_object( return usage_obj +def _is_known_usage_objects(usage_obj): + """Returns True if the usage obj is a known Usage type""" + return isinstance(usage_obj, litellm.Usage) or isinstance( + usage_obj, ResponseAPIUsage + ) + + def _infer_call_type( call_type: Optional[CallTypesLiteral], completion_response: Any ) -> Optional[CallTypesLiteral]: @@ -561,9 +585,7 @@ def completion_cost( # noqa: PLR0915 base_model=base_model, ) - verbose_logger.debug( - f"completion_response _select_model_name_for_cost_calc: {model}" - ) + verbose_logger.info(f"selected model name for cost calculation: {model}") if completion_response is not None and ( isinstance(completion_response, BaseModel) @@ -575,8 +597,8 @@ def completion_cost( # noqa: PLR0915 ) else: usage_obj = getattr(completion_response, "usage", {}) - if isinstance(usage_obj, BaseModel) and not isinstance( - usage_obj, litellm.Usage + if isinstance(usage_obj, BaseModel) and not _is_known_usage_objects( + usage_obj=usage_obj ): setattr( completion_response, @@ -589,6 +611,14 @@ def completion_cost( # noqa: PLR0915 _usage = usage_obj.model_dump() else: _usage = usage_obj + + if ResponseAPILoggingUtils._is_response_api_usage(_usage): + _usage = ( + ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage( + _usage + ).model_dump() + ) + # get input/output tokens from completion_response prompt_tokens = _usage.get("prompt_tokens", 0) completion_tokens = _usage.get("completion_tokens", 0) @@ -787,6 +817,7 @@ def response_cost_calculator( TextCompletionResponse, HttpxBinaryResponseContent, RerankResponse, + ResponsesAPIResponse, ], model: str, custom_llm_provider: Optional[str], @@ -957,3 +988,54 @@ def default_image_cost_calculator( ) return cost_info["input_cost_per_pixel"] * height * width * n + + +def batch_cost_calculator( + usage: Usage, + model: str, + custom_llm_provider: Optional[str] = None, +) -> Tuple[float, float]: + """ + Calculate the cost of a batch job + """ + + _, custom_llm_provider, _, _ = litellm.get_llm_provider( + model=model, custom_llm_provider=custom_llm_provider + ) + + verbose_logger.info( + "Calculating batch cost per token. model=%s, custom_llm_provider=%s", + model, + custom_llm_provider, + ) + + try: + model_info: Optional[ModelInfo] = litellm.get_model_info( + model=model, custom_llm_provider=custom_llm_provider + ) + except Exception: + model_info = None + + if not model_info: + return 0.0, 0.0 + + input_cost_per_token_batches = model_info.get("input_cost_per_token_batches") + input_cost_per_token = model_info.get("input_cost_per_token") + output_cost_per_token_batches = model_info.get("output_cost_per_token_batches") + output_cost_per_token = model_info.get("output_cost_per_token") + total_prompt_cost = 0.0 + total_completion_cost = 0.0 + if input_cost_per_token_batches: + total_prompt_cost = usage.prompt_tokens * input_cost_per_token_batches + elif input_cost_per_token: + total_prompt_cost = ( + usage.prompt_tokens * (input_cost_per_token) / 2 + ) # batch cost is usually half of the regular token cost + if output_cost_per_token_batches: + total_completion_cost = usage.completion_tokens * output_cost_per_token_batches + elif output_cost_per_token: + total_completion_cost = ( + usage.completion_tokens * (output_cost_per_token) / 2 + ) # batch cost is usually half of the regular token cost + + return total_prompt_cost, total_completion_cost diff --git a/litellm/exceptions.py b/litellm/exceptions.py index f4166a5837..6a927f0712 100644 --- a/litellm/exceptions.py +++ b/litellm/exceptions.py @@ -118,6 +118,7 @@ class BadRequestError(openai.BadRequestError): # type: ignore litellm_debug_info: Optional[str] = None, max_retries: Optional[int] = None, num_retries: Optional[int] = None, + body: Optional[dict] = None, ): self.status_code = 400 self.message = "litellm.BadRequestError: {}".format(message) @@ -133,7 +134,7 @@ class BadRequestError(openai.BadRequestError): # type: ignore self.max_retries = max_retries self.num_retries = num_retries super().__init__( - self.message, response=response, body=None + self.message, response=response, body=body ) # Call the base class constructor with the parameters it needs def __str__(self): diff --git a/litellm/files/main.py b/litellm/files/main.py index 9f81b2e385..db9a11ced1 100644 --- a/litellm/files/main.py +++ b/litellm/files/main.py @@ -25,7 +25,7 @@ from litellm.types.llms.openai import ( HttpxBinaryResponseContent, ) from litellm.types.router import * -from litellm.utils import supports_httpx_timeout +from litellm.utils import get_litellm_params, supports_httpx_timeout ####### ENVIRONMENT VARIABLES ################### openai_files_instance = OpenAIFilesAPI() @@ -546,6 +546,7 @@ def create_file( try: _is_async = kwargs.pop("acreate_file", False) is True optional_params = GenericLiteLLMParams(**kwargs) + litellm_params_dict = get_litellm_params(**kwargs) ### TIMEOUT LOGIC ### timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 @@ -630,6 +631,7 @@ def create_file( timeout=timeout, max_retries=optional_params.max_retries, create_file_data=_create_file_request, + litellm_params=litellm_params_dict, ) elif custom_llm_provider == "vertex_ai": api_base = optional_params.api_base or "" @@ -816,7 +818,7 @@ def file_content( ) else: raise litellm.exceptions.BadRequestError( - message="LiteLLM doesn't support {} for 'file_content'. Only 'openai' and 'azure' are supported.".format( + message="LiteLLM doesn't support {} for 'custom_llm_provider'. Supported providers are 'openai', 'azure', 'vertex_ai'.".format( custom_llm_provider ), model="n/a", diff --git a/litellm/integrations/athina.py b/litellm/integrations/athina.py index 754e980c2a..705dc11f1d 100644 --- a/litellm/integrations/athina.py +++ b/litellm/integrations/athina.py @@ -23,6 +23,9 @@ class AthinaLogger: "context", "expected_response", "user_query", + "tags", + "user_feedback", + "model_options", "custom_attributes", ] @@ -81,7 +84,6 @@ class AthinaLogger: for key in self.additional_keys: if key in metadata: data[key] = metadata[key] - response = litellm.module_level_client.post( self.athina_logging_url, headers=self.headers, diff --git a/litellm/integrations/custom_logger.py b/litellm/integrations/custom_logger.py index 457c0537bd..e115b7496d 100644 --- a/litellm/integrations/custom_logger.py +++ b/litellm/integrations/custom_logger.py @@ -239,6 +239,7 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac "image_generation", "moderation", "audio_transcription", + "responses", ], ) -> Any: pass diff --git a/litellm/litellm_core_utils/core_helpers.py b/litellm/litellm_core_utils/core_helpers.py index ceb150946c..2036b93692 100644 --- a/litellm/litellm_core_utils/core_helpers.py +++ b/litellm/litellm_core_utils/core_helpers.py @@ -73,8 +73,19 @@ def remove_index_from_tool_calls( def get_litellm_metadata_from_kwargs(kwargs: dict): """ Helper to get litellm metadata from all litellm request kwargs + + Return `litellm_metadata` if it exists, otherwise return `metadata` """ - return kwargs.get("litellm_params", {}).get("metadata", {}) + litellm_params = kwargs.get("litellm_params", {}) + if litellm_params: + metadata = litellm_params.get("metadata", {}) + litellm_metadata = litellm_params.get("litellm_metadata", {}) + if litellm_metadata: + return litellm_metadata + elif metadata: + return metadata + + return {} # Helper functions used for OTEL logging diff --git a/litellm/litellm_core_utils/credential_accessor.py b/litellm/litellm_core_utils/credential_accessor.py new file mode 100644 index 0000000000..d87dcc116b --- /dev/null +++ b/litellm/litellm_core_utils/credential_accessor.py @@ -0,0 +1,34 @@ +"""Utils for accessing credentials.""" + +from typing import List + +import litellm +from litellm.types.utils import CredentialItem + + +class CredentialAccessor: + @staticmethod + def get_credential_values(credential_name: str) -> dict: + """Safe accessor for credentials.""" + if not litellm.credential_list: + return {} + for credential in litellm.credential_list: + if credential.credential_name == credential_name: + return credential.credential_values.copy() + return {} + + @staticmethod + def upsert_credentials(credentials: List[CredentialItem]): + """Add a credential to the list of credentials.""" + + credential_names = [cred.credential_name for cred in litellm.credential_list] + + for credential in credentials: + if credential.credential_name in credential_names: + # Find and replace the existing credential in the list + for i, existing_cred in enumerate(litellm.credential_list): + if existing_cred.credential_name == credential.credential_name: + litellm.credential_list[i] = credential + break + else: + litellm.credential_list.append(credential) diff --git a/litellm/litellm_core_utils/exception_mapping_utils.py b/litellm/litellm_core_utils/exception_mapping_utils.py index 1f5e0147b9..7a0cffab7b 100644 --- a/litellm/litellm_core_utils/exception_mapping_utils.py +++ b/litellm/litellm_core_utils/exception_mapping_utils.py @@ -331,6 +331,7 @@ def exception_type( # type: ignore # noqa: PLR0915 model=model, response=getattr(original_exception, "response", None), litellm_debug_info=extra_information, + body=getattr(original_exception, "body", None), ) elif ( "Web server is returning an unknown error" in error_str @@ -421,6 +422,7 @@ def exception_type( # type: ignore # noqa: PLR0915 llm_provider=custom_llm_provider, response=getattr(original_exception, "response", None), litellm_debug_info=extra_information, + body=getattr(original_exception, "body", None), ) elif original_exception.status_code == 429: exception_mapping_worked = True @@ -1960,6 +1962,7 @@ def exception_type( # type: ignore # noqa: PLR0915 model=model, litellm_debug_info=extra_information, response=getattr(original_exception, "response", None), + body=getattr(original_exception, "body", None), ) elif ( "The api_key client option must be set either by passing api_key to the client or by setting" @@ -1991,6 +1994,7 @@ def exception_type( # type: ignore # noqa: PLR0915 model=model, litellm_debug_info=extra_information, response=getattr(original_exception, "response", None), + body=getattr(original_exception, "body", None), ) elif original_exception.status_code == 401: exception_mapping_worked = True diff --git a/litellm/litellm_core_utils/get_litellm_params.py b/litellm/litellm_core_utils/get_litellm_params.py index c0fbb1cb97..4f2f43f0de 100644 --- a/litellm/litellm_core_utils/get_litellm_params.py +++ b/litellm/litellm_core_utils/get_litellm_params.py @@ -57,6 +57,9 @@ def get_litellm_params( prompt_variables: Optional[dict] = None, async_call: Optional[bool] = None, ssl_verify: Optional[bool] = None, + merge_reasoning_content_in_choices: Optional[bool] = None, + api_version: Optional[str] = None, + max_retries: Optional[int] = None, **kwargs, ) -> dict: litellm_params = { @@ -97,5 +100,15 @@ def get_litellm_params( "prompt_variables": prompt_variables, "async_call": async_call, "ssl_verify": ssl_verify, + "merge_reasoning_content_in_choices": merge_reasoning_content_in_choices, + "api_version": api_version, + "azure_ad_token": kwargs.get("azure_ad_token"), + "tenant_id": kwargs.get("tenant_id"), + "client_id": kwargs.get("client_id"), + "client_secret": kwargs.get("client_secret"), + "azure_username": kwargs.get("azure_username"), + "azure_password": kwargs.get("azure_password"), + "max_retries": max_retries, + "timeout": kwargs.get("timeout"), } return litellm_params diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index d2fd5b6ca8..18af639918 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -25,6 +25,7 @@ from litellm import ( turn_off_message_logging, ) from litellm._logging import _is_debugging_on, verbose_logger +from litellm.batches.batch_utils import _handle_completed_batch from litellm.caching.caching import DualCache, InMemoryCache from litellm.caching.caching_handler import LLMCachingHandler from litellm.cost_calculator import _select_model_name_for_cost_calc @@ -38,11 +39,14 @@ from litellm.litellm_core_utils.redact_messages import ( redact_message_input_output_from_custom_logger, redact_message_input_output_from_logging, ) +from litellm.responses.utils import ResponseAPILoggingUtils from litellm.types.llms.openai import ( AllMessageValues, Batch, FineTuningJob, HttpxBinaryResponseContent, + ResponseCompletedEvent, + ResponsesAPIResponse, ) from litellm.types.rerank import RerankResponse from litellm.types.router import SPECIAL_MODEL_INFO_PARAMS @@ -50,9 +54,11 @@ from litellm.types.utils import ( CallTypes, EmbeddingResponse, ImageResponse, + LiteLLMBatch, LiteLLMLoggingBaseClass, ModelResponse, ModelResponseStream, + RawRequestTypedDict, StandardCallbackDynamicParams, StandardLoggingAdditionalHeaders, StandardLoggingHiddenParams, @@ -203,6 +209,7 @@ class Logging(LiteLLMLoggingBaseClass): ] = None, applied_guardrails: Optional[List[str]] = None, kwargs: Optional[Dict] = None, + log_raw_request_response: bool = False, ): _input: Optional[str] = messages # save original value of messages if messages is not None: @@ -231,6 +238,7 @@ class Logging(LiteLLMLoggingBaseClass): self.sync_streaming_chunks: List[Any] = ( [] ) # for generating complete stream response + self.log_raw_request_response = log_raw_request_response # Initialize dynamic callbacks self.dynamic_input_callbacks: Optional[ @@ -451,6 +459,18 @@ class Logging(LiteLLMLoggingBaseClass): return model, messages, non_default_params + def _get_raw_request_body(self, data: Optional[Union[dict, str]]) -> dict: + if data is None: + return {"error": "Received empty dictionary for raw request body"} + if isinstance(data, str): + try: + return json.loads(data) + except Exception: + return { + "error": "Unable to parse raw request body. Got - {}".format(data) + } + return data + def _pre_call(self, input, api_key, model=None, additional_args={}): """ Common helper function across the sync + async pre-call function @@ -466,6 +486,7 @@ class Logging(LiteLLMLoggingBaseClass): self.model_call_details["model"] = model def pre_call(self, input, api_key, model=None, additional_args={}): # noqa: PLR0915 + # Log the exact input to the LLM API litellm.error_logs["PRE_CALL"] = locals() try: @@ -483,28 +504,54 @@ class Logging(LiteLLMLoggingBaseClass): additional_args=additional_args, ) # log raw request to provider (like LangFuse) -- if opted in. - if log_raw_request_response is True: + if ( + self.log_raw_request_response is True + or log_raw_request_response is True + ): + _litellm_params = self.model_call_details.get("litellm_params", {}) _metadata = _litellm_params.get("metadata", {}) or {} try: # [Non-blocking Extra Debug Information in metadata] - if ( - turn_off_message_logging is not None - and turn_off_message_logging is True - ): + if turn_off_message_logging is True: + _metadata["raw_request"] = ( "redacted by litellm. \ 'litellm.turn_off_message_logging=True'" ) else: + curl_command = self._get_request_curl_command( api_base=additional_args.get("api_base", ""), headers=additional_args.get("headers", {}), additional_args=additional_args, data=additional_args.get("complete_input_dict", {}), ) + _metadata["raw_request"] = str(curl_command) + # split up, so it's easier to parse in the UI + self.model_call_details["raw_request_typed_dict"] = ( + RawRequestTypedDict( + raw_request_api_base=str( + additional_args.get("api_base") or "" + ), + raw_request_body=self._get_raw_request_body( + additional_args.get("complete_input_dict", {}) + ), + raw_request_headers=self._get_masked_headers( + additional_args.get("headers", {}) or {}, + ignore_sensitive_headers=True, + ), + error=None, + ) + ) except Exception as e: + self.model_call_details["raw_request_typed_dict"] = ( + RawRequestTypedDict( + error=str(e), + ) + ) + traceback.print_exc() _metadata["raw_request"] = ( "Unable to Log \ raw request: {}".format( @@ -637,9 +684,14 @@ class Logging(LiteLLMLoggingBaseClass): ) verbose_logger.debug(f"\033[92m{curl_command}\033[0m\n") + def _get_request_body(self, data: dict) -> str: + return str(data) + def _get_request_curl_command( - self, api_base: str, headers: dict, additional_args: dict, data: dict + self, api_base: str, headers: Optional[dict], additional_args: dict, data: dict ) -> str: + if headers is None: + headers = {} curl_command = "\n\nPOST Request Sent from LiteLLM:\n" curl_command += "curl -X POST \\\n" curl_command += f"{api_base} \\\n" @@ -647,11 +699,10 @@ class Logging(LiteLLMLoggingBaseClass): formatted_headers = " ".join( [f"-H '{k}: {v}'" for k, v in masked_headers.items()] ) - curl_command += ( f"{formatted_headers} \\\n" if formatted_headers.strip() != "" else "" ) - curl_command += f"-d '{str(data)}'\n" + curl_command += f"-d '{self._get_request_body(data)}'\n" if additional_args.get("request_str", None) is not None: # print the sagemaker / bedrock client request curl_command = "\nRequest Sent from LiteLLM:\n" @@ -660,12 +711,20 @@ class Logging(LiteLLMLoggingBaseClass): curl_command = str(self.model_call_details) return curl_command - def _get_masked_headers(self, headers: dict): + def _get_masked_headers( + self, headers: dict, ignore_sensitive_headers: bool = False + ) -> dict: """ Internal debugging helper function Masks the headers of the request sent from LiteLLM """ + sensitive_keywords = [ + "authorization", + "token", + "key", + "secret", + ] return { k: ( (v[:-44] + "*" * 44) @@ -673,6 +732,11 @@ class Logging(LiteLLMLoggingBaseClass): else "*****" ) for k, v in headers.items() + if not ignore_sensitive_headers + or not any( + sensitive_keyword in k.lower() + for sensitive_keyword in sensitive_keywords + ) } def post_call( @@ -790,6 +854,8 @@ class Logging(LiteLLMLoggingBaseClass): RerankResponse, Batch, FineTuningJob, + ResponsesAPIResponse, + ResponseCompletedEvent, ], cache_hit: Optional[bool] = None, ) -> Optional[float]: @@ -871,6 +937,24 @@ class Logging(LiteLLMLoggingBaseClass): return None + async def _response_cost_calculator_async( + self, + result: Union[ + ModelResponse, + ModelResponseStream, + EmbeddingResponse, + ImageResponse, + TranscriptionResponse, + TextCompletionResponse, + HttpxBinaryResponseContent, + RerankResponse, + Batch, + FineTuningJob, + ], + cache_hit: Optional[bool] = None, + ) -> Optional[float]: + return self._response_cost_calculator(result=result, cache_hit=cache_hit) + def should_run_callback( self, callback: litellm.CALLBACK_TYPES, litellm_params: dict, event_hook: str ) -> bool: @@ -912,13 +996,16 @@ class Logging(LiteLLMLoggingBaseClass): self.model_call_details["log_event_type"] = "successful_api_call" self.model_call_details["end_time"] = end_time self.model_call_details["cache_hit"] = cache_hit + + if self.call_type == CallTypes.anthropic_messages.value: + result = self._handle_anthropic_messages_response_logging(result=result) ## if model in model cost map - log the response cost ## else set cost to None if ( standard_logging_object is None and result is not None and self.stream is not True - ): # handle streaming separately + ): if ( isinstance(result, ModelResponse) or isinstance(result, ModelResponseStream) @@ -928,8 +1015,9 @@ class Logging(LiteLLMLoggingBaseClass): or isinstance(result, TextCompletionResponse) or isinstance(result, HttpxBinaryResponseContent) # tts or isinstance(result, RerankResponse) - or isinstance(result, Batch) or isinstance(result, FineTuningJob) + or isinstance(result, LiteLLMBatch) + or isinstance(result, ResponsesAPIResponse) ): ## HIDDEN PARAMS ## hidden_params = getattr(result, "_hidden_params", {}) @@ -1029,7 +1117,7 @@ class Logging(LiteLLMLoggingBaseClass): ## BUILD COMPLETE STREAMED RESPONSE complete_streaming_response: Optional[ - Union[ModelResponse, TextCompletionResponse] + Union[ModelResponse, TextCompletionResponse, ResponsesAPIResponse] ] = None if "complete_streaming_response" in self.model_call_details: return # break out of this. @@ -1525,6 +1613,20 @@ class Logging(LiteLLMLoggingBaseClass): print_verbose( "Logging Details LiteLLM-Async Success Call, cache_hit={}".format(cache_hit) ) + + ## CALCULATE COST FOR BATCH JOBS + if self.call_type == CallTypes.aretrieve_batch.value and isinstance( + result, LiteLLMBatch + ): + + response_cost, batch_usage, batch_models = await _handle_completed_batch( + batch=result, custom_llm_provider=self.custom_llm_provider + ) + + result._hidden_params["response_cost"] = response_cost + result._hidden_params["batch_models"] = batch_models + result.usage = batch_usage + start_time, end_time, result = self._success_handler_helper_fn( start_time=start_time, end_time=end_time, @@ -1532,11 +1634,12 @@ class Logging(LiteLLMLoggingBaseClass): cache_hit=cache_hit, standard_logging_object=kwargs.get("standard_logging_object", None), ) + ## BUILD COMPLETE STREAMED RESPONSE if "async_complete_streaming_response" in self.model_call_details: return # break out of this. complete_streaming_response: Optional[ - Union[ModelResponse, TextCompletionResponse] + Union[ModelResponse, TextCompletionResponse, ResponsesAPIResponse] ] = self._get_assembled_streaming_response( result=result, start_time=start_time, @@ -2246,16 +2349,24 @@ class Logging(LiteLLMLoggingBaseClass): def _get_assembled_streaming_response( self, - result: Union[ModelResponse, TextCompletionResponse, ModelResponseStream, Any], + result: Union[ + ModelResponse, + TextCompletionResponse, + ModelResponseStream, + ResponseCompletedEvent, + Any, + ], start_time: datetime.datetime, end_time: datetime.datetime, is_async: bool, streaming_chunks: List[Any], - ) -> Optional[Union[ModelResponse, TextCompletionResponse]]: + ) -> Optional[Union[ModelResponse, TextCompletionResponse, ResponsesAPIResponse]]: if isinstance(result, ModelResponse): return result elif isinstance(result, TextCompletionResponse): return result + elif isinstance(result, ResponseCompletedEvent): + return result.response elif isinstance(result, ModelResponseStream): complete_streaming_response: Optional[ Union[ModelResponse, TextCompletionResponse] @@ -2270,6 +2381,37 @@ class Logging(LiteLLMLoggingBaseClass): return complete_streaming_response return None + def _handle_anthropic_messages_response_logging(self, result: Any) -> ModelResponse: + """ + Handles logging for Anthropic messages responses. + + Args: + result: The response object from the model call + + Returns: + The the response object from the model call + + - For Non-streaming responses, we need to transform the response to a ModelResponse object. + - For streaming responses, anthropic_messages handler calls success_handler with a assembled ModelResponse. + """ + if self.stream and isinstance(result, ModelResponse): + return result + + result = litellm.AnthropicConfig().transform_response( + raw_response=self.model_call_details["httpx_response"], + model_response=litellm.ModelResponse(), + model=self.model, + messages=[], + logging_obj=self, + optional_params={}, + api_key="", + request_data={}, + encoding=litellm.encoding, + json_mode=False, + litellm_params={}, + ) + return result + def set_callbacks(callback_list, function_id=None): # noqa: PLR0915 """ @@ -2983,6 +3125,12 @@ class StandardLoggingPayloadSetup: elif isinstance(usage, Usage): return usage elif isinstance(usage, dict): + if ResponseAPILoggingUtils._is_response_api_usage(usage): + return ( + ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage( + usage + ) + ) return Usage(**usage) raise ValueError(f"usage is required, got={usage} of type {type(usage)}") @@ -3086,6 +3234,7 @@ class StandardLoggingPayloadSetup: response_cost=None, additional_headers=None, litellm_overhead_time_ms=None, + batch_models=None, ) if hidden_params is not None: for key in StandardLoggingHiddenParams.__annotations__.keys(): @@ -3199,6 +3348,7 @@ def get_standard_logging_object_payload( api_base=None, response_cost=None, litellm_overhead_time_ms=None, + batch_models=None, ) ) @@ -3483,6 +3633,7 @@ def create_dummy_standard_logging_payload() -> StandardLoggingPayload: response_cost=None, additional_headers=None, litellm_overhead_time_ms=None, + batch_models=None, ) # Convert numeric values to appropriate types diff --git a/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py b/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py index 7db1411f84..ebb1032a19 100644 --- a/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py +++ b/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py @@ -9,6 +9,7 @@ from typing import Dict, Iterable, List, Literal, Optional, Tuple, Union import litellm from litellm._logging import verbose_logger from litellm.constants import RESPONSE_FORMAT_TOOL_NAME +from litellm.types.llms.openai import ChatCompletionThinkingBlock from litellm.types.utils import ( ChatCompletionDeltaToolCall, ChatCompletionMessageToolCall, @@ -128,12 +129,7 @@ def convert_to_streaming_response(response_object: Optional[dict] = None): model_response_object = ModelResponse(stream=True) choice_list = [] for idx, choice in enumerate(response_object["choices"]): - delta = Delta( - content=choice["message"].get("content", None), - role=choice["message"]["role"], - function_call=choice["message"].get("function_call", None), - tool_calls=choice["message"].get("tool_calls", None), - ) + delta = Delta(**choice["message"]) finish_reason = choice.get("finish_reason", None) if finish_reason is None: # gpt-4 vision can return 'finish_reason' or 'finish_details' @@ -243,6 +239,24 @@ def _parse_content_for_reasoning( return None, message_text +def _extract_reasoning_content(message: dict) -> Tuple[Optional[str], Optional[str]]: + """ + Extract reasoning content and main content from a message. + + Args: + message (dict): The message dictionary that may contain reasoning_content + + Returns: + tuple[Optional[str], Optional[str]]: A tuple of (reasoning_content, content) + """ + if "reasoning_content" in message: + return message["reasoning_content"], message["content"] + elif "reasoning" in message: + return message["reasoning"], message["content"] + else: + return _parse_content_for_reasoning(message.get("content")) + + class LiteLLMResponseObjectHandler: @staticmethod @@ -456,11 +470,16 @@ def convert_to_model_response_object( # noqa: PLR0915 provider_specific_fields[field] = choice["message"][field] # Handle reasoning models that display `reasoning_content` within `content` - - reasoning_content, content = _parse_content_for_reasoning( - choice["message"].get("content") + reasoning_content, content = _extract_reasoning_content( + choice["message"] ) + # Handle thinking models that display `thinking_blocks` within `content` + thinking_blocks: Optional[List[ChatCompletionThinkingBlock]] = None + if "thinking_blocks" in choice["message"]: + thinking_blocks = choice["message"]["thinking_blocks"] + provider_specific_fields["thinking_blocks"] = thinking_blocks + if reasoning_content: provider_specific_fields["reasoning_content"] = ( reasoning_content @@ -474,6 +493,7 @@ def convert_to_model_response_object( # noqa: PLR0915 audio=choice["message"].get("audio", None), provider_specific_fields=provider_specific_fields, reasoning_content=reasoning_content, + thinking_blocks=thinking_blocks, ) finish_reason = choice.get("finish_reason", None) if finish_reason is None: diff --git a/litellm/litellm_core_utils/prompt_templates/factory.py b/litellm/litellm_core_utils/prompt_templates/factory.py index 03d64dd4b7..df7aa2cbd0 100644 --- a/litellm/litellm_core_utils/prompt_templates/factory.py +++ b/litellm/litellm_core_utils/prompt_templates/factory.py @@ -187,53 +187,125 @@ def ollama_pt( final_prompt_value="### Response:", messages=messages, ) - elif "llava" in model: - prompt = "" - images = [] - for message in messages: - if isinstance(message["content"], str): - prompt += message["content"] - elif isinstance(message["content"], list): - # see https://docs.litellm.ai/docs/providers/openai#openai-vision-models - for element in message["content"]: - if isinstance(element, dict): - if element["type"] == "text": - prompt += element["text"] - elif element["type"] == "image_url": - base64_image = convert_to_ollama_image( - element["image_url"]["url"] - ) - images.append(base64_image) - return {"prompt": prompt, "images": images} else: + user_message_types = {"user", "tool", "function"} + msg_i = 0 + images = [] prompt = "" - for message in messages: - role = message["role"] - content = message.get("content", "") + while msg_i < len(messages): + init_msg_i = msg_i + user_content_str = "" + ## MERGE CONSECUTIVE USER CONTENT ## + while ( + msg_i < len(messages) and messages[msg_i]["role"] in user_message_types + ): + msg_content = messages[msg_i].get("content") + if msg_content: + if isinstance(msg_content, list): + for m in msg_content: + if m.get("type", "") == "image_url": + if isinstance(m["image_url"], str): + images.append(m["image_url"]) + elif isinstance(m["image_url"], dict): + images.append(m["image_url"]["url"]) + elif m.get("type", "") == "text": + user_content_str += m["text"] + else: + # Tool message content will always be a string + user_content_str += msg_content - if "tool_calls" in message: - tool_calls = [] + msg_i += 1 - for call in message["tool_calls"]: - call_id: str = call["id"] - function_name: str = call["function"]["name"] - arguments = json.loads(call["function"]["arguments"]) + if user_content_str: + prompt += f"### User:\n{user_content_str}\n\n" - tool_calls.append( - { - "id": call_id, - "type": "function", - "function": {"name": function_name, "arguments": arguments}, - } + assistant_content_str = "" + ## MERGE CONSECUTIVE ASSISTANT CONTENT ## + while msg_i < len(messages) and messages[msg_i]["role"] == "assistant": + msg_content = messages[msg_i].get("content") + if msg_content: + if isinstance(msg_content, list): + for m in msg_content: + if m.get("type", "") == "text": + assistant_content_str += m["text"] + elif isinstance(msg_content, str): + # Tool message content will always be a string + assistant_content_str += msg_content + + tool_calls = messages[msg_i].get("tool_calls") + ollama_tool_calls = [] + if tool_calls: + for call in tool_calls: + call_id: str = call["id"] + function_name: str = call["function"]["name"] + arguments = json.loads(call["function"]["arguments"]) + + ollama_tool_calls.append( + { + "id": call_id, + "type": "function", + "function": { + "name": function_name, + "arguments": arguments, + }, + } + ) + + if ollama_tool_calls: + assistant_content_str += ( + f"Tool Calls: {json.dumps(ollama_tool_calls, indent=2)}" ) - prompt += f"### Assistant:\nTool Calls: {json.dumps(tool_calls, indent=2)}\n\n" + msg_i += 1 - elif "tool_call_id" in message: - prompt += f"### User:\n{message['content']}\n\n" + if assistant_content_str: + prompt += f"### Assistant:\n{assistant_content_str}\n\n" - elif content: - prompt += f"### {role.capitalize()}:\n{content}\n\n" + if msg_i == init_msg_i: # prevent infinite loops + raise litellm.BadRequestError( + message=BAD_MESSAGE_ERROR_STR + f"passed in {messages[msg_i]}", + model=model, + llm_provider="ollama", + ) + # prompt = "" + # images = [] + # for message in messages: + # if isinstance(message["content"], str): + # prompt += message["content"] + # elif isinstance(message["content"], list): + # # see https://docs.litellm.ai/docs/providers/openai#openai-vision-models + # for element in message["content"]: + # if isinstance(element, dict): + # if element["type"] == "text": + # prompt += element["text"] + # elif element["type"] == "image_url": + # base64_image = convert_to_ollama_image( + # element["image_url"]["url"] + # ) + # images.append(base64_image) + + # if "tool_calls" in message: + # tool_calls = [] + + # for call in message["tool_calls"]: + # call_id: str = call["id"] + # function_name: str = call["function"]["name"] + # arguments = json.loads(call["function"]["arguments"]) + + # tool_calls.append( + # { + # "id": call_id, + # "type": "function", + # "function": {"name": function_name, "arguments": arguments}, + # } + # ) + + # prompt += f"### Assistant:\nTool Calls: {json.dumps(tool_calls, indent=2)}\n\n" + + # elif "tool_call_id" in message: + # prompt += f"### User:\n{message['content']}\n\n" + + return {"prompt": prompt, "images": images} return prompt @@ -680,12 +752,13 @@ def convert_generic_image_chunk_to_openai_image_obj( Return: "data:image/jpeg;base64,{base64_image}" """ - return "data:{};{},{}".format( - image_chunk["media_type"], image_chunk["type"], image_chunk["data"] - ) + media_type = image_chunk["media_type"] + return "data:{};{},{}".format(media_type, image_chunk["type"], image_chunk["data"]) -def convert_to_anthropic_image_obj(openai_image_url: str) -> GenericImageParsingChunk: +def convert_to_anthropic_image_obj( + openai_image_url: str, format: Optional[str] +) -> GenericImageParsingChunk: """ Input: "image_url": "data:image/jpeg;base64,{base64_image}", @@ -702,7 +775,11 @@ def convert_to_anthropic_image_obj(openai_image_url: str) -> GenericImageParsing openai_image_url = convert_url_to_base64(url=openai_image_url) # Extract the media type and base64 data media_type, base64_data = openai_image_url.split("data:")[1].split(";base64,") - media_type = media_type.replace("\\/", "/") + + if format: + media_type = format + else: + media_type = media_type.replace("\\/", "/") return GenericImageParsingChunk( type="base64", @@ -820,11 +897,12 @@ def anthropic_messages_pt_xml(messages: list): if isinstance(messages[msg_i]["content"], list): for m in messages[msg_i]["content"]: if m.get("type", "") == "image_url": + format = m["image_url"].get("format") user_content.append( { "type": "image", "source": convert_to_anthropic_image_obj( - m["image_url"]["url"] + m["image_url"]["url"], format=format ), } ) @@ -1156,10 +1234,13 @@ def convert_to_anthropic_tool_result( ) elif content["type"] == "image_url": if isinstance(content["image_url"], str): - image_chunk = convert_to_anthropic_image_obj(content["image_url"]) - else: image_chunk = convert_to_anthropic_image_obj( - content["image_url"]["url"] + content["image_url"], format=None + ) + else: + format = content["image_url"].get("format") + image_chunk = convert_to_anthropic_image_obj( + content["image_url"]["url"], format=format ) anthropic_content_list.append( AnthropicMessagesImageParam( @@ -1282,6 +1363,7 @@ def add_cache_control_to_content( AnthropicMessagesImageParam, AnthropicMessagesTextParam, AnthropicMessagesDocumentParam, + ChatCompletionThinkingBlock, ], orignal_content_element: Union[dict, AllMessageValues], ): @@ -1317,6 +1399,7 @@ def _anthropic_content_element_factory( data=image_chunk["data"], ), ) + return _anthropic_content_element @@ -1368,13 +1451,16 @@ def anthropic_messages_pt( # noqa: PLR0915 for m in user_message_types_block["content"]: if m.get("type", "") == "image_url": m = cast(ChatCompletionImageObject, m) + format: Optional[str] = None if isinstance(m["image_url"], str): image_chunk = convert_to_anthropic_image_obj( - openai_image_url=m["image_url"] + openai_image_url=m["image_url"], format=None ) else: + format = m["image_url"].get("format") image_chunk = convert_to_anthropic_image_obj( - openai_image_url=m["image_url"]["url"] + openai_image_url=m["image_url"]["url"], + format=format, ) _anthropic_content_element = ( @@ -1454,12 +1540,23 @@ def anthropic_messages_pt( # noqa: PLR0915 assistant_content_block["content"], list ): for m in assistant_content_block["content"]: - # handle text + # handle thinking blocks + thinking_block = cast(str, m.get("thinking", "")) + text_block = cast(str, m.get("text", "")) if ( - m.get("type", "") == "text" and len(m.get("text", "")) > 0 + m.get("type", "") == "thinking" and len(thinking_block) > 0 + ): # don't pass empty text blocks. anthropic api raises errors. + anthropic_message: Union[ + ChatCompletionThinkingBlock, + AnthropicMessagesTextParam, + ] = cast(ChatCompletionThinkingBlock, m) + assistant_content.append(anthropic_message) + # handle text + elif ( + m.get("type", "") == "text" and len(text_block) > 0 ): # don't pass empty text blocks. anthropic api raises errors. anthropic_message = AnthropicMessagesTextParam( - type="text", text=m.get("text") + type="text", text=text_block ) _cached_message = add_cache_control_to_content( anthropic_content_element=anthropic_message, @@ -1512,6 +1609,7 @@ def anthropic_messages_pt( # noqa: PLR0915 msg_i += 1 if assistant_content: + new_messages.append({"role": "assistant", "content": assistant_content}) if msg_i == init_msg_i: # prevent infinite loops @@ -1520,17 +1618,6 @@ def anthropic_messages_pt( # noqa: PLR0915 model=model, llm_provider=llm_provider, ) - if not new_messages or new_messages[0]["role"] != "user": - if litellm.modify_params: - new_messages.insert( - 0, {"role": "user", "content": [{"type": "text", "text": "."}]} - ) - else: - raise Exception( - "Invalid first message={}. Should always start with 'role'='user' for Anthropic. System prompt is sent separately for Anthropic. set 'litellm.modify_params = True' or 'litellm_settings:modify_params = True' on proxy, to insert a placeholder user message - '.' as the first message, ".format( - new_messages - ) - ) if new_messages[-1]["role"] == "assistant": if isinstance(new_messages[-1]["content"], str): @@ -2301,8 +2388,11 @@ class BedrockImageProcessor: ) @classmethod - def process_image_sync(cls, image_url: str) -> BedrockContentBlock: + def process_image_sync( + cls, image_url: str, format: Optional[str] = None + ) -> BedrockContentBlock: """Synchronous image processing.""" + if "base64" in image_url: img_bytes, mime_type, image_format = cls._parse_base64_image(image_url) elif "http://" in image_url or "https://" in image_url: @@ -2313,11 +2403,17 @@ class BedrockImageProcessor: "Unsupported image type. Expected either image url or base64 encoded string" ) + if format: + mime_type = format + image_format = mime_type.split("/")[1] + image_format = cls._validate_format(mime_type, image_format) return cls._create_bedrock_block(img_bytes, mime_type, image_format) @classmethod - async def process_image_async(cls, image_url: str) -> BedrockContentBlock: + async def process_image_async( + cls, image_url: str, format: Optional[str] + ) -> BedrockContentBlock: """Asynchronous image processing.""" if "base64" in image_url: @@ -2332,6 +2428,10 @@ class BedrockImageProcessor: "Unsupported image type. Expected either image url or base64 encoded string" ) + if format: # override with user-defined params + mime_type = format + image_format = mime_type.split("/")[1] + image_format = cls._validate_format(mime_type, image_format) return cls._create_bedrock_block(img_bytes, mime_type, image_format) @@ -2819,12 +2919,14 @@ class BedrockConverseMessagesProcessor: _part = BedrockContentBlock(text=element["text"]) _parts.append(_part) elif element["type"] == "image_url": + format: Optional[str] = None if isinstance(element["image_url"], dict): image_url = element["image_url"]["url"] + format = element["image_url"].get("format") else: image_url = element["image_url"] _part = await BedrockImageProcessor.process_image_async( # type: ignore - image_url=image_url + image_url=image_url, format=format ) _parts.append(_part) # type: ignore _cache_point_block = ( @@ -2924,7 +3026,14 @@ class BedrockConverseMessagesProcessor: assistants_parts: List[BedrockContentBlock] = [] for element in _assistant_content: if isinstance(element, dict): - if element["type"] == "text": + if element["type"] == "thinking": + thinking_block = BedrockConverseMessagesProcessor.translate_thinking_blocks_to_reasoning_content_blocks( + thinking_blocks=[ + cast(ChatCompletionThinkingBlock, element) + ] + ) + assistants_parts.extend(thinking_block) + elif element["type"] == "text": assistants_part = BedrockContentBlock( text=element["text"] ) @@ -2974,7 +3083,7 @@ class BedrockConverseMessagesProcessor: reasoning_content_blocks: List[BedrockContentBlock] = [] for thinking_block in thinking_blocks: reasoning_text = thinking_block.get("thinking") - reasoning_signature = thinking_block.get("signature_delta") + reasoning_signature = thinking_block.get("signature") text_block = BedrockConverseReasoningTextBlock( text=reasoning_text or "", ) @@ -3050,12 +3159,15 @@ def _bedrock_converse_messages_pt( # noqa: PLR0915 _part = BedrockContentBlock(text=element["text"]) _parts.append(_part) elif element["type"] == "image_url": + format: Optional[str] = None if isinstance(element["image_url"], dict): image_url = element["image_url"]["url"] + format = element["image_url"].get("format") else: image_url = element["image_url"] _part = BedrockImageProcessor.process_image_sync( # type: ignore - image_url=image_url + image_url=image_url, + format=format, ) _parts.append(_part) # type: ignore _cache_point_block = ( @@ -3157,7 +3269,14 @@ def _bedrock_converse_messages_pt( # noqa: PLR0915 assistants_parts: List[BedrockContentBlock] = [] for element in _assistant_content: if isinstance(element, dict): - if element["type"] == "text": + if element["type"] == "thinking": + thinking_block = BedrockConverseMessagesProcessor.translate_thinking_blocks_to_reasoning_content_blocks( + thinking_blocks=[ + cast(ChatCompletionThinkingBlock, element) + ] + ) + assistants_parts.extend(thinking_block) + elif element["type"] == "text": assistants_part = BedrockContentBlock(text=element["text"]) assistants_parts.append(assistants_part) elif element["type"] == "image_url": diff --git a/litellm/litellm_core_utils/streaming_handler.py b/litellm/litellm_core_utils/streaming_handler.py index 4ce27bfeca..5d5a8bf256 100644 --- a/litellm/litellm_core_utils/streaming_handler.py +++ b/litellm/litellm_core_utils/streaming_handler.py @@ -15,6 +15,7 @@ from litellm import verbose_logger from litellm.litellm_core_utils.redact_messages import LiteLLMLoggingObject from litellm.litellm_core_utils.thread_pool_executor import executor from litellm.types.llms.openai import ChatCompletionChunk +from litellm.types.router import GenericLiteLLMParams from litellm.types.utils import Delta from litellm.types.utils import GenericStreamingChunk as GChunk from litellm.types.utils import ( @@ -70,6 +71,17 @@ class CustomStreamWrapper: self.completion_stream = completion_stream self.sent_first_chunk = False self.sent_last_chunk = False + + litellm_params: GenericLiteLLMParams = GenericLiteLLMParams( + **self.logging_obj.model_call_details.get("litellm_params", {}) + ) + self.merge_reasoning_content_in_choices: bool = ( + litellm_params.merge_reasoning_content_in_choices or False + ) + self.sent_first_thinking_block = False + self.sent_last_thinking_block = False + self.thinking_content = "" + self.system_fingerprint: Optional[str] = None self.received_finish_reason: Optional[str] = None self.intermittent_finish_reason: Optional[str] = ( @@ -87,12 +99,7 @@ class CustomStreamWrapper: self.holding_chunk = "" self.complete_response = "" self.response_uptil_now = "" - _model_info = ( - self.logging_obj.model_call_details.get("litellm_params", {}).get( - "model_info", {} - ) - or {} - ) + _model_info: Dict = litellm_params.model_info or {} _api_base = get_api_base( model=model or "", @@ -630,7 +637,10 @@ class CustomStreamWrapper: if isinstance(chunk, bytes): chunk = chunk.decode("utf-8") if "text_output" in chunk: - response = chunk.replace("data: ", "").strip() + response = ( + CustomStreamWrapper._strip_sse_data_from_chunk(chunk) or "" + ) + response = response.strip() parsed_response = json.loads(response) else: return { @@ -873,6 +883,10 @@ class CustomStreamWrapper: _index: Optional[int] = completion_obj.get("index") if _index is not None: model_response.choices[0].index = _index + + self._optional_combine_thinking_block_in_choices( + model_response=model_response + ) print_verbose(f"returning model_response: {model_response}") return model_response else: @@ -929,6 +943,48 @@ class CustomStreamWrapper: self.chunks.append(model_response) return + def _optional_combine_thinking_block_in_choices( + self, model_response: ModelResponseStream + ) -> None: + """ + UI's Like OpenWebUI expect to get 1 chunk with ... tags in the chunk content + + In place updates the model_response object with reasoning_content in content with ... tags + + Enabled when `merge_reasoning_content_in_choices=True` passed in request params + + + """ + if self.merge_reasoning_content_in_choices is True: + reasoning_content = getattr( + model_response.choices[0].delta, "reasoning_content", None + ) + if reasoning_content: + if self.sent_first_thinking_block is False: + model_response.choices[0].delta.content += ( + "" + reasoning_content + ) + self.sent_first_thinking_block = True + elif ( + self.sent_first_thinking_block is True + and hasattr(model_response.choices[0].delta, "reasoning_content") + and model_response.choices[0].delta.reasoning_content + ): + model_response.choices[0].delta.content = reasoning_content + elif ( + self.sent_first_thinking_block is True + and not self.sent_last_thinking_block + and model_response.choices[0].delta.content + ): + model_response.choices[0].delta.content = ( + "" + model_response.choices[0].delta.content + ) + self.sent_last_thinking_block = True + + if hasattr(model_response.choices[0].delta, "reasoning_content"): + del model_response.choices[0].delta.reasoning_content + return + def chunk_creator(self, chunk: Any): # type: ignore # noqa: PLR0915 model_response = self.model_response_creator() response_obj: Dict[str, Any] = {} @@ -1775,6 +1831,42 @@ class CustomStreamWrapper: extra_kwargs={}, ) + @staticmethod + def _strip_sse_data_from_chunk(chunk: Optional[str]) -> Optional[str]: + """ + Strips the 'data: ' prefix from Server-Sent Events (SSE) chunks. + + Some providers like sagemaker send it as `data:`, need to handle both + + SSE messages are prefixed with 'data: ' which is part of the protocol, + not the actual content from the LLM. This method removes that prefix + and returns the actual content. + + Args: + chunk: The SSE chunk that may contain the 'data: ' prefix (string or bytes) + + Returns: + The chunk with the 'data: ' prefix removed, or the original chunk + if no prefix was found. Returns None if input is None. + + See OpenAI Python Ref for this: https://github.com/openai/openai-python/blob/041bf5a8ec54da19aad0169671793c2078bd6173/openai/api_requestor.py#L100 + """ + if chunk is None: + return None + + if isinstance(chunk, str): + # OpenAI sends `data: ` + if chunk.startswith("data: "): + # Strip the prefix and any leading whitespace that might follow it + _length_of_sse_data_prefix = len("data: ") + return chunk[_length_of_sse_data_prefix:] + elif chunk.startswith("data:"): + # Sagemaker sends `data:`, no trailing whitespace + _length_of_sse_data_prefix = len("data:") + return chunk[_length_of_sse_data_prefix:] + + return chunk + def calculate_total_usage(chunks: List[ModelResponse]) -> Usage: """Assume most recent usage chunk has total usage uptil then.""" diff --git a/litellm/llms/aiohttp_openai/chat/transformation.py b/litellm/llms/aiohttp_openai/chat/transformation.py index 625704dbea..212db1853b 100644 --- a/litellm/llms/aiohttp_openai/chat/transformation.py +++ b/litellm/llms/aiohttp_openai/chat/transformation.py @@ -29,6 +29,7 @@ class AiohttpOpenAIChatConfig(OpenAILikeChatConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/anthropic/chat/handler.py b/litellm/llms/anthropic/chat/handler.py index 46c8edae03..f2c5f390d7 100644 --- a/litellm/llms/anthropic/chat/handler.py +++ b/litellm/llms/anthropic/chat/handler.py @@ -474,7 +474,10 @@ class ModelResponseIterator: if len(self.content_blocks) == 0: return False - if self.content_blocks[0]["delta"]["type"] == "text_delta": + if ( + self.content_blocks[0]["delta"]["type"] == "text_delta" + or self.content_blocks[0]["delta"]["type"] == "thinking_delta" + ): return False for block in self.content_blocks: @@ -527,6 +530,7 @@ class ModelResponseIterator: provider_specific_fields = {} content_block = ContentBlockDelta(**chunk) # type: ignore thinking_blocks: List[ChatCompletionThinkingBlock] = [] + self.content_blocks.append(content_block) if "text" in content_block["delta"]: text = content_block["delta"]["text"] @@ -544,13 +548,13 @@ class ModelResponseIterator: provider_specific_fields["citation"] = content_block["delta"]["citation"] elif ( "thinking" in content_block["delta"] - or "signature_delta" == content_block["delta"] + or "signature" in content_block["delta"] ): thinking_blocks = [ ChatCompletionThinkingBlock( type="thinking", - thinking=content_block["delta"].get("thinking"), - signature_delta=content_block["delta"].get("signature"), + thinking=content_block["delta"].get("thinking") or "", + signature=content_block["delta"].get("signature"), ) ] provider_specific_fields["thinking_blocks"] = thinking_blocks @@ -616,9 +620,11 @@ class ModelResponseIterator: "index": self.tool_index, } elif type_chunk == "content_block_stop": + ContentBlockStop(**chunk) # type: ignore # check if tool call content block is_empty = self.check_empty_tool_call_args() + if is_empty: tool_use = { "id": None, diff --git a/litellm/llms/anthropic/experimental_pass_through/messages/handler.py b/litellm/llms/anthropic/experimental_pass_through/messages/handler.py new file mode 100644 index 0000000000..a7dfff74d9 --- /dev/null +++ b/litellm/llms/anthropic/experimental_pass_through/messages/handler.py @@ -0,0 +1,179 @@ +""" +- call /messages on Anthropic API +- Make streaming + non-streaming request - just pass it through direct to Anthropic. No need to do anything special here +- Ensure requests are logged in the DB - stream + non-stream + +""" + +import json +from typing import Any, AsyncIterator, Dict, Optional, Union, cast + +import httpx + +import litellm +from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj +from litellm.llms.base_llm.anthropic_messages.transformation import ( + BaseAnthropicMessagesConfig, +) +from litellm.llms.custom_httpx.http_handler import ( + AsyncHTTPHandler, + get_async_httpx_client, +) +from litellm.types.router import GenericLiteLLMParams +from litellm.types.utils import ProviderSpecificHeader +from litellm.utils import ProviderConfigManager, client + + +class AnthropicMessagesHandler: + + @staticmethod + async def _handle_anthropic_streaming( + response: httpx.Response, + request_body: dict, + litellm_logging_obj: LiteLLMLoggingObj, + ) -> AsyncIterator: + """Helper function to handle Anthropic streaming responses using the existing logging handlers""" + from datetime import datetime + + from litellm.proxy.pass_through_endpoints.streaming_handler import ( + PassThroughStreamingHandler, + ) + from litellm.proxy.pass_through_endpoints.success_handler import ( + PassThroughEndpointLogging, + ) + from litellm.proxy.pass_through_endpoints.types import EndpointType + + # Create success handler object + passthrough_success_handler_obj = PassThroughEndpointLogging() + + # Use the existing streaming handler for Anthropic + start_time = datetime.now() + return PassThroughStreamingHandler.chunk_processor( + response=response, + request_body=request_body, + litellm_logging_obj=litellm_logging_obj, + endpoint_type=EndpointType.ANTHROPIC, + start_time=start_time, + passthrough_success_handler_obj=passthrough_success_handler_obj, + url_route="/v1/messages", + ) + + +@client +async def anthropic_messages( + api_key: str, + model: str, + stream: bool = False, + api_base: Optional[str] = None, + client: Optional[AsyncHTTPHandler] = None, + custom_llm_provider: Optional[str] = None, + **kwargs, +) -> Union[Dict[str, Any], AsyncIterator]: + """ + Makes Anthropic `/v1/messages` API calls In the Anthropic API Spec + """ + # Use provided client or create a new one + optional_params = GenericLiteLLMParams(**kwargs) + model, _custom_llm_provider, dynamic_api_key, dynamic_api_base = ( + litellm.get_llm_provider( + model=model, + custom_llm_provider=custom_llm_provider, + api_base=optional_params.api_base, + api_key=optional_params.api_key, + ) + ) + anthropic_messages_provider_config: Optional[BaseAnthropicMessagesConfig] = ( + ProviderConfigManager.get_provider_anthropic_messages_config( + model=model, + provider=litellm.LlmProviders(_custom_llm_provider), + ) + ) + if anthropic_messages_provider_config is None: + raise ValueError( + f"Anthropic messages provider config not found for model: {model}" + ) + if client is None or not isinstance(client, AsyncHTTPHandler): + async_httpx_client = get_async_httpx_client( + llm_provider=litellm.LlmProviders.ANTHROPIC + ) + else: + async_httpx_client = client + + litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj", None) + + # Prepare headers + provider_specific_header = cast( + Optional[ProviderSpecificHeader], kwargs.get("provider_specific_header", None) + ) + extra_headers = ( + provider_specific_header.get("extra_headers", {}) + if provider_specific_header + else {} + ) + headers = anthropic_messages_provider_config.validate_environment( + headers=extra_headers or {}, + model=model, + api_key=api_key, + ) + + litellm_logging_obj.update_environment_variables( + model=model, + optional_params=dict(optional_params), + litellm_params={ + "metadata": kwargs.get("metadata", {}), + "preset_cache_key": None, + "stream_response": {}, + **optional_params.model_dump(exclude_unset=True), + }, + custom_llm_provider=_custom_llm_provider, + ) + litellm_logging_obj.model_call_details.update(kwargs) + + # Prepare request body + request_body = kwargs.copy() + request_body = { + k: v + for k, v in request_body.items() + if k + in anthropic_messages_provider_config.get_supported_anthropic_messages_params( + model=model + ) + } + request_body["stream"] = stream + request_body["model"] = model + litellm_logging_obj.stream = stream + + # Make the request + request_url = anthropic_messages_provider_config.get_complete_url( + api_base=api_base, model=model + ) + + litellm_logging_obj.pre_call( + input=[{"role": "user", "content": json.dumps(request_body)}], + api_key="", + additional_args={ + "complete_input_dict": request_body, + "api_base": str(request_url), + "headers": headers, + }, + ) + + response = await async_httpx_client.post( + url=request_url, + headers=headers, + data=json.dumps(request_body), + stream=stream, + ) + response.raise_for_status() + + # used for logging + cost tracking + litellm_logging_obj.model_call_details["httpx_response"] = response + + if stream: + return await AnthropicMessagesHandler._handle_anthropic_streaming( + response=response, + request_body=request_body, + litellm_logging_obj=litellm_logging_obj, + ) + else: + return response.json() diff --git a/litellm/llms/anthropic/experimental_pass_through/messages/transformation.py b/litellm/llms/anthropic/experimental_pass_through/messages/transformation.py new file mode 100644 index 0000000000..e9b598f18d --- /dev/null +++ b/litellm/llms/anthropic/experimental_pass_through/messages/transformation.py @@ -0,0 +1,47 @@ +from typing import Optional + +from litellm.llms.base_llm.anthropic_messages.transformation import ( + BaseAnthropicMessagesConfig, +) + +DEFAULT_ANTHROPIC_API_BASE = "https://api.anthropic.com" +DEFAULT_ANTHROPIC_API_VERSION = "2023-06-01" + + +class AnthropicMessagesConfig(BaseAnthropicMessagesConfig): + def get_supported_anthropic_messages_params(self, model: str) -> list: + return [ + "messages", + "model", + "system", + "max_tokens", + "stop_sequences", + "temperature", + "top_p", + "top_k", + "tools", + "tool_choice", + "thinking", + # TODO: Add Anthropic `metadata` support + # "metadata", + ] + + def get_complete_url(self, api_base: Optional[str], model: str) -> str: + api_base = api_base or DEFAULT_ANTHROPIC_API_BASE + if not api_base.endswith("/v1/messages"): + api_base = f"{api_base}/v1/messages" + return api_base + + def validate_environment( + self, + headers: dict, + model: str, + api_key: Optional[str] = None, + ) -> dict: + if "x-api-key" not in headers: + headers["x-api-key"] = api_key + if "anthropic-version" not in headers: + headers["anthropic-version"] = DEFAULT_ANTHROPIC_API_VERSION + if "content-type" not in headers: + headers["content-type"] = "application/json" + return headers diff --git a/litellm/llms/anthropic/experimental_pass_through/transformation.py b/litellm/llms/anthropic/experimental_pass_through/transformation.py deleted file mode 100644 index b24cf47ad4..0000000000 --- a/litellm/llms/anthropic/experimental_pass_through/transformation.py +++ /dev/null @@ -1,412 +0,0 @@ -import json -from typing import List, Literal, Optional, Tuple, Union - -from openai.types.chat.chat_completion_chunk import Choice as OpenAIStreamingChoice - -from litellm.types.llms.anthropic import ( - AllAnthropicToolsValues, - AnthopicMessagesAssistantMessageParam, - AnthropicFinishReason, - AnthropicMessagesRequest, - AnthropicMessagesToolChoice, - AnthropicMessagesUserMessageParam, - AnthropicResponse, - AnthropicResponseContentBlockText, - AnthropicResponseContentBlockToolUse, - AnthropicResponseUsageBlock, - ContentBlockDelta, - ContentJsonBlockDelta, - ContentTextBlockDelta, - MessageBlockDelta, - MessageDelta, - UsageDelta, -) -from litellm.types.llms.openai import ( - AllMessageValues, - ChatCompletionAssistantMessage, - ChatCompletionAssistantToolCall, - ChatCompletionImageObject, - ChatCompletionImageUrlObject, - ChatCompletionRequest, - ChatCompletionSystemMessage, - ChatCompletionTextObject, - ChatCompletionToolCallFunctionChunk, - ChatCompletionToolChoiceFunctionParam, - ChatCompletionToolChoiceObjectParam, - ChatCompletionToolChoiceValues, - ChatCompletionToolMessage, - ChatCompletionToolParam, - ChatCompletionToolParamFunctionChunk, - ChatCompletionUserMessage, -) -from litellm.types.utils import Choices, ModelResponse, Usage - - -class AnthropicExperimentalPassThroughConfig: - def __init__(self): - pass - - ### FOR [BETA] `/v1/messages` endpoint support - - def translatable_anthropic_params(self) -> List: - """ - Which anthropic params, we need to translate to the openai format. - """ - return ["messages", "metadata", "system", "tool_choice", "tools"] - - def translate_anthropic_messages_to_openai( # noqa: PLR0915 - self, - messages: List[ - Union[ - AnthropicMessagesUserMessageParam, - AnthopicMessagesAssistantMessageParam, - ] - ], - ) -> List: - new_messages: List[AllMessageValues] = [] - for m in messages: - user_message: Optional[ChatCompletionUserMessage] = None - tool_message_list: List[ChatCompletionToolMessage] = [] - new_user_content_list: List[ - Union[ChatCompletionTextObject, ChatCompletionImageObject] - ] = [] - ## USER MESSAGE ## - if m["role"] == "user": - ## translate user message - message_content = m.get("content") - if message_content and isinstance(message_content, str): - user_message = ChatCompletionUserMessage( - role="user", content=message_content - ) - elif message_content and isinstance(message_content, list): - for content in message_content: - if content["type"] == "text": - text_obj = ChatCompletionTextObject( - type="text", text=content["text"] - ) - new_user_content_list.append(text_obj) - elif content["type"] == "image": - image_url = ChatCompletionImageUrlObject( - url=f"data:{content['type']};base64,{content['source']}" - ) - image_obj = ChatCompletionImageObject( - type="image_url", image_url=image_url - ) - - new_user_content_list.append(image_obj) - elif content["type"] == "tool_result": - if "content" not in content: - tool_result = ChatCompletionToolMessage( - role="tool", - tool_call_id=content["tool_use_id"], - content="", - ) - tool_message_list.append(tool_result) - elif isinstance(content["content"], str): - tool_result = ChatCompletionToolMessage( - role="tool", - tool_call_id=content["tool_use_id"], - content=content["content"], - ) - tool_message_list.append(tool_result) - elif isinstance(content["content"], list): - for c in content["content"]: - if c["type"] == "text": - tool_result = ChatCompletionToolMessage( - role="tool", - tool_call_id=content["tool_use_id"], - content=c["text"], - ) - tool_message_list.append(tool_result) - elif c["type"] == "image": - image_str = ( - f"data:{c['type']};base64,{c['source']}" - ) - tool_result = ChatCompletionToolMessage( - role="tool", - tool_call_id=content["tool_use_id"], - content=image_str, - ) - tool_message_list.append(tool_result) - - if user_message is not None: - new_messages.append(user_message) - - if len(new_user_content_list) > 0: - new_messages.append({"role": "user", "content": new_user_content_list}) # type: ignore - - if len(tool_message_list) > 0: - new_messages.extend(tool_message_list) - - ## ASSISTANT MESSAGE ## - assistant_message_str: Optional[str] = None - tool_calls: List[ChatCompletionAssistantToolCall] = [] - if m["role"] == "assistant": - if isinstance(m["content"], str): - assistant_message_str = m["content"] - elif isinstance(m["content"], list): - for content in m["content"]: - if content["type"] == "text": - if assistant_message_str is None: - assistant_message_str = content["text"] - else: - assistant_message_str += content["text"] - elif content["type"] == "tool_use": - function_chunk = ChatCompletionToolCallFunctionChunk( - name=content["name"], - arguments=json.dumps(content["input"]), - ) - - tool_calls.append( - ChatCompletionAssistantToolCall( - id=content["id"], - type="function", - function=function_chunk, - ) - ) - - if assistant_message_str is not None or len(tool_calls) > 0: - assistant_message = ChatCompletionAssistantMessage( - role="assistant", - content=assistant_message_str, - ) - if len(tool_calls) > 0: - assistant_message["tool_calls"] = tool_calls - new_messages.append(assistant_message) - - return new_messages - - def translate_anthropic_tool_choice_to_openai( - self, tool_choice: AnthropicMessagesToolChoice - ) -> ChatCompletionToolChoiceValues: - if tool_choice["type"] == "any": - return "required" - elif tool_choice["type"] == "auto": - return "auto" - elif tool_choice["type"] == "tool": - tc_function_param = ChatCompletionToolChoiceFunctionParam( - name=tool_choice.get("name", "") - ) - return ChatCompletionToolChoiceObjectParam( - type="function", function=tc_function_param - ) - else: - raise ValueError( - "Incompatible tool choice param submitted - {}".format(tool_choice) - ) - - def translate_anthropic_tools_to_openai( - self, tools: List[AllAnthropicToolsValues] - ) -> List[ChatCompletionToolParam]: - new_tools: List[ChatCompletionToolParam] = [] - mapped_tool_params = ["name", "input_schema", "description"] - for tool in tools: - function_chunk = ChatCompletionToolParamFunctionChunk( - name=tool["name"], - ) - if "input_schema" in tool: - function_chunk["parameters"] = tool["input_schema"] # type: ignore - if "description" in tool: - function_chunk["description"] = tool["description"] # type: ignore - - for k, v in tool.items(): - if k not in mapped_tool_params: # pass additional computer kwargs - function_chunk.setdefault("parameters", {}).update({k: v}) - new_tools.append( - ChatCompletionToolParam(type="function", function=function_chunk) - ) - - return new_tools - - def translate_anthropic_to_openai( - self, anthropic_message_request: AnthropicMessagesRequest - ) -> ChatCompletionRequest: - """ - This is used by the beta Anthropic Adapter, for translating anthropic `/v1/messages` requests to the openai format. - """ - new_messages: List[AllMessageValues] = [] - - ## CONVERT ANTHROPIC MESSAGES TO OPENAI - new_messages = self.translate_anthropic_messages_to_openai( - messages=anthropic_message_request["messages"] - ) - ## ADD SYSTEM MESSAGE TO MESSAGES - if "system" in anthropic_message_request: - new_messages.insert( - 0, - ChatCompletionSystemMessage( - role="system", content=anthropic_message_request["system"] - ), - ) - - new_kwargs: ChatCompletionRequest = { - "model": anthropic_message_request["model"], - "messages": new_messages, - } - ## CONVERT METADATA (user_id) - if "metadata" in anthropic_message_request: - if "user_id" in anthropic_message_request["metadata"]: - new_kwargs["user"] = anthropic_message_request["metadata"]["user_id"] - - # Pass litellm proxy specific metadata - if "litellm_metadata" in anthropic_message_request: - # metadata will be passed to litellm.acompletion(), it's a litellm_param - new_kwargs["metadata"] = anthropic_message_request.pop("litellm_metadata") - - ## CONVERT TOOL CHOICE - if "tool_choice" in anthropic_message_request: - new_kwargs["tool_choice"] = self.translate_anthropic_tool_choice_to_openai( - tool_choice=anthropic_message_request["tool_choice"] - ) - ## CONVERT TOOLS - if "tools" in anthropic_message_request: - new_kwargs["tools"] = self.translate_anthropic_tools_to_openai( - tools=anthropic_message_request["tools"] - ) - - translatable_params = self.translatable_anthropic_params() - for k, v in anthropic_message_request.items(): - if k not in translatable_params: # pass remaining params as is - new_kwargs[k] = v # type: ignore - - return new_kwargs - - def _translate_openai_content_to_anthropic( - self, choices: List[Choices] - ) -> List[ - Union[AnthropicResponseContentBlockText, AnthropicResponseContentBlockToolUse] - ]: - new_content: List[ - Union[ - AnthropicResponseContentBlockText, AnthropicResponseContentBlockToolUse - ] - ] = [] - for choice in choices: - if ( - choice.message.tool_calls is not None - and len(choice.message.tool_calls) > 0 - ): - for tool_call in choice.message.tool_calls: - new_content.append( - AnthropicResponseContentBlockToolUse( - type="tool_use", - id=tool_call.id, - name=tool_call.function.name or "", - input=json.loads(tool_call.function.arguments), - ) - ) - elif choice.message.content is not None: - new_content.append( - AnthropicResponseContentBlockText( - type="text", text=choice.message.content - ) - ) - - return new_content - - def _translate_openai_finish_reason_to_anthropic( - self, openai_finish_reason: str - ) -> AnthropicFinishReason: - if openai_finish_reason == "stop": - return "end_turn" - elif openai_finish_reason == "length": - return "max_tokens" - elif openai_finish_reason == "tool_calls": - return "tool_use" - return "end_turn" - - def translate_openai_response_to_anthropic( - self, response: ModelResponse - ) -> AnthropicResponse: - ## translate content block - anthropic_content = self._translate_openai_content_to_anthropic(choices=response.choices) # type: ignore - ## extract finish reason - anthropic_finish_reason = self._translate_openai_finish_reason_to_anthropic( - openai_finish_reason=response.choices[0].finish_reason # type: ignore - ) - # extract usage - usage: Usage = getattr(response, "usage") - anthropic_usage = AnthropicResponseUsageBlock( - input_tokens=usage.prompt_tokens or 0, - output_tokens=usage.completion_tokens or 0, - ) - translated_obj = AnthropicResponse( - id=response.id, - type="message", - role="assistant", - model=response.model or "unknown-model", - stop_sequence=None, - usage=anthropic_usage, - content=anthropic_content, - stop_reason=anthropic_finish_reason, - ) - - return translated_obj - - def _translate_streaming_openai_chunk_to_anthropic( - self, choices: List[OpenAIStreamingChoice] - ) -> Tuple[ - Literal["text_delta", "input_json_delta"], - Union[ContentTextBlockDelta, ContentJsonBlockDelta], - ]: - text: str = "" - partial_json: Optional[str] = None - for choice in choices: - if choice.delta.content is not None: - text += choice.delta.content - elif choice.delta.tool_calls is not None: - partial_json = "" - for tool in choice.delta.tool_calls: - if ( - tool.function is not None - and tool.function.arguments is not None - ): - partial_json += tool.function.arguments - - if partial_json is not None: - return "input_json_delta", ContentJsonBlockDelta( - type="input_json_delta", partial_json=partial_json - ) - else: - return "text_delta", ContentTextBlockDelta(type="text_delta", text=text) - - def translate_streaming_openai_response_to_anthropic( - self, response: ModelResponse - ) -> Union[ContentBlockDelta, MessageBlockDelta]: - ## base case - final chunk w/ finish reason - if response.choices[0].finish_reason is not None: - delta = MessageDelta( - stop_reason=self._translate_openai_finish_reason_to_anthropic( - response.choices[0].finish_reason - ), - ) - if getattr(response, "usage", None) is not None: - litellm_usage_chunk: Optional[Usage] = response.usage # type: ignore - elif ( - hasattr(response, "_hidden_params") - and "usage" in response._hidden_params - ): - litellm_usage_chunk = response._hidden_params["usage"] - else: - litellm_usage_chunk = None - if litellm_usage_chunk is not None: - usage_delta = UsageDelta( - input_tokens=litellm_usage_chunk.prompt_tokens or 0, - output_tokens=litellm_usage_chunk.completion_tokens or 0, - ) - else: - usage_delta = UsageDelta(input_tokens=0, output_tokens=0) - return MessageBlockDelta( - type="message_delta", delta=delta, usage=usage_delta - ) - ( - type_of_content, - content_block_delta, - ) = self._translate_streaming_openai_chunk_to_anthropic( - choices=response.choices # type: ignore - ) - return ContentBlockDelta( - type="content_block_delta", - index=response.choices[0].index, - delta=content_block_delta, - ) diff --git a/litellm/llms/azure/assistants.py b/litellm/llms/azure/assistants.py index 2f67b5506f..1328eb1fea 100644 --- a/litellm/llms/azure/assistants.py +++ b/litellm/llms/azure/assistants.py @@ -1,4 +1,4 @@ -from typing import Coroutine, Iterable, Literal, Optional, Union +from typing import Any, Coroutine, Dict, Iterable, Literal, Optional, Union import httpx from openai import AsyncAzureOpenAI, AzureOpenAI @@ -18,10 +18,10 @@ from ...types.llms.openai import ( SyncCursorPage, Thread, ) -from ..base import BaseLLM +from .common_utils import BaseAzureLLM -class AzureAssistantsAPI(BaseLLM): +class AzureAssistantsAPI(BaseAzureLLM): def __init__(self) -> None: super().__init__() @@ -34,18 +34,17 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AzureOpenAI] = None, + litellm_params: Optional[dict] = None, ) -> AzureOpenAI: - received_args = locals() if client is None: - data = {} - for k, v in received_args.items(): - if k == "self" or k == "client": - pass - elif k == "api_base" and v is not None: - data["azure_endpoint"] = v - elif v is not None: - data[k] = v - azure_openai_client = AzureOpenAI(**data) # type: ignore + azure_client_params = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + api_base=api_base, + model_name="", + api_version=api_version, + ) + azure_openai_client = AzureOpenAI(**azure_client_params) # type: ignore else: azure_openai_client = client @@ -60,18 +59,18 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI] = None, + litellm_params: Optional[dict] = None, ) -> AsyncAzureOpenAI: - received_args = locals() if client is None: - data = {} - for k, v in received_args.items(): - if k == "self" or k == "client": - pass - elif k == "api_base" and v is not None: - data["azure_endpoint"] = v - elif v is not None: - data[k] = v - azure_openai_client = AsyncAzureOpenAI(**data) + azure_client_params = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + api_base=api_base, + model_name="", + api_version=api_version, + ) + + azure_openai_client = AsyncAzureOpenAI(**azure_client_params) # azure_openai_client = AsyncAzureOpenAI(**data) # type: ignore else: azure_openai_client = client @@ -89,6 +88,7 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], + litellm_params: Optional[dict] = None, ) -> AsyncCursorPage[Assistant]: azure_openai_client = self.async_get_azure_client( api_key=api_key, @@ -98,6 +98,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = await azure_openai_client.beta.assistants.list() @@ -146,6 +147,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client=None, aget_assistants=None, + litellm_params: Optional[dict] = None, ): if aget_assistants is not None and aget_assistants is True: return self.async_get_assistants( @@ -156,6 +158,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) azure_openai_client = self.get_azure_client( api_key=api_key, @@ -165,6 +168,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries=max_retries, client=client, api_version=api_version, + litellm_params=litellm_params, ) response = azure_openai_client.beta.assistants.list() @@ -184,6 +188,7 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI] = None, + litellm_params: Optional[dict] = None, ) -> OpenAIMessage: openai_client = self.async_get_azure_client( api_key=api_key, @@ -193,6 +198,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) thread_message: OpenAIMessage = await openai_client.beta.threads.messages.create( # type: ignore @@ -222,6 +228,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], a_add_message: Literal[True], + litellm_params: Optional[dict] = None, ) -> Coroutine[None, None, OpenAIMessage]: ... @@ -238,6 +245,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AzureOpenAI], a_add_message: Optional[Literal[False]], + litellm_params: Optional[dict] = None, ) -> OpenAIMessage: ... @@ -255,6 +263,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client=None, a_add_message: Optional[bool] = None, + litellm_params: Optional[dict] = None, ): if a_add_message is not None and a_add_message is True: return self.a_add_message( @@ -267,6 +276,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) openai_client = self.get_azure_client( api_key=api_key, @@ -300,6 +310,7 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI] = None, + litellm_params: Optional[dict] = None, ) -> AsyncCursorPage[OpenAIMessage]: openai_client = self.async_get_azure_client( api_key=api_key, @@ -309,6 +320,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = await openai_client.beta.threads.messages.list(thread_id=thread_id) @@ -329,6 +341,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], aget_messages: Literal[True], + litellm_params: Optional[dict] = None, ) -> Coroutine[None, None, AsyncCursorPage[OpenAIMessage]]: ... @@ -344,6 +357,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AzureOpenAI], aget_messages: Optional[Literal[False]], + litellm_params: Optional[dict] = None, ) -> SyncCursorPage[OpenAIMessage]: ... @@ -360,6 +374,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client=None, aget_messages=None, + litellm_params: Optional[dict] = None, ): if aget_messages is not None and aget_messages is True: return self.async_get_messages( @@ -371,6 +386,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) openai_client = self.get_azure_client( api_key=api_key, @@ -380,6 +396,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = openai_client.beta.threads.messages.list(thread_id=thread_id) @@ -399,6 +416,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], messages: Optional[Iterable[OpenAICreateThreadParamsMessage]], + litellm_params: Optional[dict] = None, ) -> Thread: openai_client = self.async_get_azure_client( api_key=api_key, @@ -408,6 +426,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) data = {} @@ -435,6 +454,7 @@ class AzureAssistantsAPI(BaseLLM): messages: Optional[Iterable[OpenAICreateThreadParamsMessage]], client: Optional[AsyncAzureOpenAI], acreate_thread: Literal[True], + litellm_params: Optional[dict] = None, ) -> Coroutine[None, None, Thread]: ... @@ -451,6 +471,7 @@ class AzureAssistantsAPI(BaseLLM): messages: Optional[Iterable[OpenAICreateThreadParamsMessage]], client: Optional[AzureOpenAI], acreate_thread: Optional[Literal[False]], + litellm_params: Optional[dict] = None, ) -> Thread: ... @@ -468,6 +489,7 @@ class AzureAssistantsAPI(BaseLLM): messages: Optional[Iterable[OpenAICreateThreadParamsMessage]], client=None, acreate_thread=None, + litellm_params: Optional[dict] = None, ): """ Here's an example: @@ -490,6 +512,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries=max_retries, client=client, messages=messages, + litellm_params=litellm_params, ) azure_openai_client = self.get_azure_client( api_key=api_key, @@ -499,6 +522,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) data = {} @@ -521,6 +545,7 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], + litellm_params: Optional[dict] = None, ) -> Thread: openai_client = self.async_get_azure_client( api_key=api_key, @@ -530,6 +555,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = await openai_client.beta.threads.retrieve(thread_id=thread_id) @@ -550,6 +576,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], aget_thread: Literal[True], + litellm_params: Optional[dict] = None, ) -> Coroutine[None, None, Thread]: ... @@ -565,6 +592,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AzureOpenAI], aget_thread: Optional[Literal[False]], + litellm_params: Optional[dict] = None, ) -> Thread: ... @@ -581,6 +609,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client=None, aget_thread=None, + litellm_params: Optional[dict] = None, ): if aget_thread is not None and aget_thread is True: return self.async_get_thread( @@ -592,6 +621,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) openai_client = self.get_azure_client( api_key=api_key, @@ -601,6 +631,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = openai_client.beta.threads.retrieve(thread_id=thread_id) @@ -618,7 +649,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -629,6 +660,7 @@ class AzureAssistantsAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], + litellm_params: Optional[dict] = None, ) -> Run: openai_client = self.async_get_azure_client( api_key=api_key, @@ -638,6 +670,7 @@ class AzureAssistantsAPI(BaseLLM): api_version=api_version, azure_ad_token=azure_ad_token, client=client, + litellm_params=litellm_params, ) response = await openai_client.beta.threads.runs.create_and_poll( # type: ignore @@ -645,7 +678,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id=assistant_id, additional_instructions=additional_instructions, instructions=instructions, - metadata=metadata, + metadata=metadata, # type: ignore model=model, tools=tools, ) @@ -659,12 +692,13 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], tools: Optional[Iterable[AssistantToolParam]], event_handler: Optional[AssistantEventHandler], + litellm_params: Optional[dict] = None, ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]: - data = { + data: Dict[str, Any] = { "thread_id": thread_id, "assistant_id": assistant_id, "additional_instructions": additional_instructions, @@ -684,12 +718,13 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], tools: Optional[Iterable[AssistantToolParam]], event_handler: Optional[AssistantEventHandler], + litellm_params: Optional[dict] = None, ) -> AssistantStreamManager[AssistantEventHandler]: - data = { + data: Dict[str, Any] = { "thread_id": thread_id, "assistant_id": assistant_id, "additional_instructions": additional_instructions, @@ -711,7 +746,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -733,7 +768,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -756,7 +791,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -769,6 +804,7 @@ class AzureAssistantsAPI(BaseLLM): client=None, arun_thread=None, event_handler: Optional[AssistantEventHandler] = None, + litellm_params: Optional[dict] = None, ): if arun_thread is not None and arun_thread is True: if stream is not None and stream is True: @@ -780,6 +816,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) return self.async_run_thread_stream( client=azure_client, @@ -791,13 +828,14 @@ class AzureAssistantsAPI(BaseLLM): model=model, tools=tools, event_handler=event_handler, + litellm_params=litellm_params, ) return self.arun_thread( thread_id=thread_id, assistant_id=assistant_id, additional_instructions=additional_instructions, instructions=instructions, - metadata=metadata, + metadata=metadata, # type: ignore model=model, stream=stream, tools=tools, @@ -808,6 +846,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) openai_client = self.get_azure_client( api_key=api_key, @@ -817,6 +856,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) if stream is not None and stream is True: @@ -830,6 +870,7 @@ class AzureAssistantsAPI(BaseLLM): model=model, tools=tools, event_handler=event_handler, + litellm_params=litellm_params, ) response = openai_client.beta.threads.runs.create_and_poll( # type: ignore @@ -837,7 +878,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id=assistant_id, additional_instructions=additional_instructions, instructions=instructions, - metadata=metadata, + metadata=metadata, # type: ignore model=model, tools=tools, ) @@ -855,6 +896,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], create_assistant_data: dict, + litellm_params: Optional[dict] = None, ) -> Assistant: azure_openai_client = self.async_get_azure_client( api_key=api_key, @@ -864,6 +906,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = await azure_openai_client.beta.assistants.create( @@ -882,6 +925,7 @@ class AzureAssistantsAPI(BaseLLM): create_assistant_data: dict, client=None, async_create_assistants=None, + litellm_params: Optional[dict] = None, ): if async_create_assistants is not None and async_create_assistants is True: return self.async_create_assistants( @@ -893,6 +937,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries=max_retries, client=client, create_assistant_data=create_assistant_data, + litellm_params=litellm_params, ) azure_openai_client = self.get_azure_client( api_key=api_key, @@ -902,6 +947,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = azure_openai_client.beta.assistants.create(**create_assistant_data) @@ -918,6 +964,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries: Optional[int], client: Optional[AsyncAzureOpenAI], assistant_id: str, + litellm_params: Optional[dict] = None, ): azure_openai_client = self.async_get_azure_client( api_key=api_key, @@ -927,6 +974,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = await azure_openai_client.beta.assistants.delete( @@ -945,6 +993,7 @@ class AzureAssistantsAPI(BaseLLM): assistant_id: str, async_delete_assistants: Optional[bool] = None, client=None, + litellm_params: Optional[dict] = None, ): if async_delete_assistants is not None and async_delete_assistants is True: return self.async_delete_assistant( @@ -956,6 +1005,7 @@ class AzureAssistantsAPI(BaseLLM): max_retries=max_retries, client=client, assistant_id=assistant_id, + litellm_params=litellm_params, ) azure_openai_client = self.get_azure_client( api_key=api_key, @@ -965,6 +1015,7 @@ class AzureAssistantsAPI(BaseLLM): timeout=timeout, max_retries=max_retries, client=client, + litellm_params=litellm_params, ) response = azure_openai_client.beta.assistants.delete(assistant_id=assistant_id) diff --git a/litellm/llms/azure/audio_transcriptions.py b/litellm/llms/azure/audio_transcriptions.py index 94793295ca..52a3d780fb 100644 --- a/litellm/llms/azure/audio_transcriptions.py +++ b/litellm/llms/azure/audio_transcriptions.py @@ -7,14 +7,14 @@ from pydantic import BaseModel import litellm from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_name from litellm.types.utils import FileTypes -from litellm.utils import TranscriptionResponse, convert_to_model_response_object - -from .azure import ( - AzureChatCompletion, - get_azure_ad_token_from_oidc, - select_azure_base_url_or_endpoint, +from litellm.utils import ( + TranscriptionResponse, + convert_to_model_response_object, + extract_duration_from_srt_or_vtt, ) +from .azure import AzureChatCompletion + class AzureAudioTranscription(AzureChatCompletion): def audio_transcriptions( @@ -32,29 +32,18 @@ class AzureAudioTranscription(AzureChatCompletion): client=None, azure_ad_token: Optional[str] = None, atranscription: bool = False, + litellm_params: Optional[dict] = None, ) -> TranscriptionResponse: data = {"model": model, "file": audio_file, **optional_params} # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "timeout": timeout, - } - - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params + azure_client_params = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + model_name=model, + api_version=api_version, + api_base=api_base, ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - - if max_retries is not None: - azure_client_params["max_retries"] = max_retries if atranscription is True: return self.async_audio_transcriptions( # type: ignore @@ -124,7 +113,6 @@ class AzureAudioTranscription(AzureChatCompletion): if client is None: async_azure_client = AsyncAzureOpenAI( **azure_client_params, - http_client=litellm.aclient_session, ) else: async_azure_client = client @@ -156,6 +144,8 @@ class AzureAudioTranscription(AzureChatCompletion): stringified_response = response.model_dump() else: stringified_response = TranscriptionResponse(text=response).model_dump() + duration = extract_duration_from_srt_or_vtt(response) + stringified_response["duration"] = duration ## LOGGING logging_obj.post_call( diff --git a/litellm/llms/azure/azure.py b/litellm/llms/azure/azure.py index 5294bd7141..e0c1079a2a 100644 --- a/litellm/llms/azure/azure.py +++ b/litellm/llms/azure/azure.py @@ -1,6 +1,5 @@ import asyncio import json -import os import time from typing import Any, Callable, Dict, List, Literal, Optional, Union @@ -8,7 +7,6 @@ import httpx # type: ignore from openai import APITimeoutError, AsyncAzureOpenAI, AzureOpenAI import litellm -from litellm.caching.caching import DualCache from litellm.constants import DEFAULT_MAX_RETRIES from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.llms.custom_httpx.http_handler import ( @@ -25,15 +23,18 @@ from litellm.types.utils import ( from litellm.utils import ( CustomStreamWrapper, convert_to_model_response_object, - get_secret, modify_url, ) from ...types.llms.openai import HttpxBinaryResponseContent from ..base import BaseLLM -from .common_utils import AzureOpenAIError, process_azure_headers - -azure_ad_cache = DualCache() +from .common_utils import ( + AzureOpenAIError, + BaseAzureLLM, + get_azure_ad_token_from_oidc, + process_azure_headers, + select_azure_base_url_or_endpoint, +) class AzureOpenAIAssistantsAPIConfig: @@ -98,93 +99,6 @@ class AzureOpenAIAssistantsAPIConfig: return optional_params -def select_azure_base_url_or_endpoint(azure_client_params: dict): - azure_endpoint = azure_client_params.get("azure_endpoint", None) - if azure_endpoint is not None: - # see : https://github.com/openai/openai-python/blob/3d61ed42aba652b547029095a7eb269ad4e1e957/src/openai/lib/azure.py#L192 - if "/openai/deployments" in azure_endpoint: - # this is base_url, not an azure_endpoint - azure_client_params["base_url"] = azure_endpoint - azure_client_params.pop("azure_endpoint") - - return azure_client_params - - -def get_azure_ad_token_from_oidc(azure_ad_token: str): - azure_client_id = os.getenv("AZURE_CLIENT_ID", None) - azure_tenant_id = os.getenv("AZURE_TENANT_ID", None) - azure_authority_host = os.getenv( - "AZURE_AUTHORITY_HOST", "https://login.microsoftonline.com" - ) - - if azure_client_id is None or azure_tenant_id is None: - raise AzureOpenAIError( - status_code=422, - message="AZURE_CLIENT_ID and AZURE_TENANT_ID must be set", - ) - - oidc_token = get_secret(azure_ad_token) - - if oidc_token is None: - raise AzureOpenAIError( - status_code=401, - message="OIDC token could not be retrieved from secret manager.", - ) - - azure_ad_token_cache_key = json.dumps( - { - "azure_client_id": azure_client_id, - "azure_tenant_id": azure_tenant_id, - "azure_authority_host": azure_authority_host, - "oidc_token": oidc_token, - } - ) - - azure_ad_token_access_token = azure_ad_cache.get_cache(azure_ad_token_cache_key) - if azure_ad_token_access_token is not None: - return azure_ad_token_access_token - - client = litellm.module_level_client - req_token = client.post( - f"{azure_authority_host}/{azure_tenant_id}/oauth2/v2.0/token", - data={ - "client_id": azure_client_id, - "grant_type": "client_credentials", - "scope": "https://cognitiveservices.azure.com/.default", - "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", - "client_assertion": oidc_token, - }, - ) - - if req_token.status_code != 200: - raise AzureOpenAIError( - status_code=req_token.status_code, - message=req_token.text, - ) - - azure_ad_token_json = req_token.json() - azure_ad_token_access_token = azure_ad_token_json.get("access_token", None) - azure_ad_token_expires_in = azure_ad_token_json.get("expires_in", None) - - if azure_ad_token_access_token is None: - raise AzureOpenAIError( - status_code=422, message="Azure AD Token access_token not returned" - ) - - if azure_ad_token_expires_in is None: - raise AzureOpenAIError( - status_code=422, message="Azure AD Token expires_in not returned" - ) - - azure_ad_cache.set_cache( - key=azure_ad_token_cache_key, - value=azure_ad_token_access_token, - ttl=azure_ad_token_expires_in, - ) - - return azure_ad_token_access_token - - def _check_dynamic_azure_params( azure_client_params: dict, azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]], @@ -206,7 +120,7 @@ def _check_dynamic_azure_params( return False -class AzureChatCompletion(BaseLLM): +class AzureChatCompletion(BaseAzureLLM, BaseLLM): def __init__(self) -> None: super().__init__() @@ -238,27 +152,16 @@ class AzureChatCompletion(BaseLLM): timeout: Union[float, httpx.Timeout], client: Optional[Any], client_type: Literal["sync", "async"], + litellm_params: Optional[dict] = None, ): # init AzureOpenAI Client - azure_client_params: Dict[str, Any] = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params + azure_client_params: Dict[str, Any] = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + model_name=model, + api_version=api_version, + api_base=api_base, ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider if client is None: if client_type == "sync": azure_client = AzureOpenAI(**azure_client_params) # type: ignore @@ -357,6 +260,13 @@ class AzureChatCompletion(BaseLLM): max_retries = DEFAULT_MAX_RETRIES json_mode: Optional[bool] = optional_params.pop("json_mode", False) + azure_client_params = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + api_base=api_base, + model_name=model, + api_version=api_version, + ) ### CHECK IF CLOUDFLARE AI GATEWAY ### ### if so - set the model as part of the base url if "gateway.ai.cloudflare.com" in api_base: @@ -417,6 +327,7 @@ class AzureChatCompletion(BaseLLM): timeout=timeout, client=client, max_retries=max_retries, + azure_client_params=azure_client_params, ) else: return self.acompletion( @@ -434,6 +345,7 @@ class AzureChatCompletion(BaseLLM): logging_obj=logging_obj, max_retries=max_retries, convert_tool_call_to_json_mode=json_mode, + azure_client_params=azure_client_params, ) elif "stream" in optional_params and optional_params["stream"] is True: return self.streaming( @@ -470,28 +382,6 @@ class AzureChatCompletion(BaseLLM): status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = ( - azure_ad_token_provider - ) - if ( client is None or not isinstance(client, AzureOpenAI) @@ -540,10 +430,14 @@ class AzureChatCompletion(BaseLLM): status_code = getattr(e, "status_code", 500) error_headers = getattr(e, "headers", None) error_response = getattr(e, "response", None) + error_body = getattr(e, "body", None) if error_headers is None and error_response: error_headers = getattr(error_response, "headers", None) raise AzureOpenAIError( - status_code=status_code, message=str(e), headers=error_headers + status_code=status_code, + message=str(e), + headers=error_headers, + body=error_body, ) async def acompletion( @@ -562,30 +456,10 @@ class AzureChatCompletion(BaseLLM): azure_ad_token_provider: Optional[Callable] = None, convert_tool_call_to_json_mode: Optional[bool] = None, client=None, # this is the AsyncAzureOpenAI + azure_client_params: dict = {}, ): response = None try: - # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.aclient_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider - # setting Azure client if client is None or dynamic_params: azure_client = AsyncAzureOpenAI(**azure_client_params) @@ -649,6 +523,7 @@ class AzureChatCompletion(BaseLLM): raise AzureOpenAIError(status_code=500, message=str(e)) except Exception as e: message = getattr(e, "message", str(e)) + body = getattr(e, "body", None) ## LOGGING logging_obj.post_call( input=data["messages"], @@ -659,7 +534,7 @@ class AzureChatCompletion(BaseLLM): if hasattr(e, "status_code"): raise e else: - raise AzureOpenAIError(status_code=500, message=message) + raise AzureOpenAIError(status_code=500, message=message, body=body) def streaming( self, @@ -742,28 +617,9 @@ class AzureChatCompletion(BaseLLM): azure_ad_token: Optional[str] = None, azure_ad_token_provider: Optional[Callable] = None, client=None, + azure_client_params: dict = {}, ): try: - # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.aclient_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider if client is None or dynamic_params: azure_client = AsyncAzureOpenAI(**azure_client_params) else: @@ -805,10 +661,14 @@ class AzureChatCompletion(BaseLLM): error_headers = getattr(e, "headers", None) error_response = getattr(e, "response", None) message = getattr(e, "message", str(e)) + error_body = getattr(e, "body", None) if error_headers is None and error_response: error_headers = getattr(error_response, "headers", None) raise AzureOpenAIError( - status_code=status_code, message=message, headers=error_headers + status_code=status_code, + message=message, + headers=error_headers, + body=error_body, ) async def aembedding( @@ -824,6 +684,7 @@ class AzureChatCompletion(BaseLLM): ): response = None try: + if client is None: openai_aclient = AsyncAzureOpenAI(**azure_client_params) else: @@ -875,6 +736,7 @@ class AzureChatCompletion(BaseLLM): client=None, aembedding=None, headers: Optional[dict] = None, + litellm_params: Optional[dict] = None, ) -> EmbeddingResponse: if headers: optional_params["extra_headers"] = headers @@ -890,29 +752,14 @@ class AzureChatCompletion(BaseLLM): ) # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if aembedding: - azure_client_params["http_client"] = litellm.aclient_session - else: - azure_client_params["http_client"] = litellm.client_session - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider + azure_client_params = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + model_name=model, + api_version=api_version, + api_base=api_base, + ) ## LOGGING logging_obj.pre_call( input=input, @@ -1272,6 +1119,7 @@ class AzureChatCompletion(BaseLLM): azure_ad_token_provider: Optional[Callable] = None, client=None, aimg_generation=None, + litellm_params: Optional[dict] = None, ) -> ImageResponse: try: if model and len(model) > 0: @@ -1296,25 +1144,13 @@ class AzureChatCompletion(BaseLLM): ) # init AzureOpenAI Client - azure_client_params: Dict[str, Any] = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params + azure_client_params: Dict[str, Any] = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + model_name=model or "", + api_version=api_version, + api_base=api_base, ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - if azure_ad_token.startswith("oidc/"): - azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) - azure_client_params["azure_ad_token"] = azure_ad_token - elif azure_ad_token_provider is not None: - azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider - if aimg_generation is True: return self.aimage_generation(data=data, input=input, logging_obj=logging_obj, model_response=model_response, api_key=api_key, client=client, azure_client_params=azure_client_params, timeout=timeout, headers=headers) # type: ignore @@ -1377,6 +1213,7 @@ class AzureChatCompletion(BaseLLM): azure_ad_token_provider: Optional[Callable] = None, aspeech: Optional[bool] = None, client=None, + litellm_params: Optional[dict] = None, ) -> HttpxBinaryResponseContent: max_retries = optional_params.pop("max_retries", 2) @@ -1395,6 +1232,7 @@ class AzureChatCompletion(BaseLLM): max_retries=max_retries, timeout=timeout, client=client, + litellm_params=litellm_params, ) # type: ignore azure_client: AzureOpenAI = self._get_sync_azure_client( @@ -1408,6 +1246,7 @@ class AzureChatCompletion(BaseLLM): timeout=timeout, client=client, client_type="sync", + litellm_params=litellm_params, ) # type: ignore response = azure_client.audio.speech.create( @@ -1432,6 +1271,7 @@ class AzureChatCompletion(BaseLLM): max_retries: int, timeout: Union[float, httpx.Timeout], client=None, + litellm_params: Optional[dict] = None, ) -> HttpxBinaryResponseContent: azure_client: AsyncAzureOpenAI = self._get_sync_azure_client( @@ -1445,6 +1285,7 @@ class AzureChatCompletion(BaseLLM): timeout=timeout, client=client, client_type="async", + litellm_params=litellm_params, ) # type: ignore azure_response = await azure_client.audio.speech.create( diff --git a/litellm/llms/azure/batches/handler.py b/litellm/llms/azure/batches/handler.py index 5fae527670..1b93c526d5 100644 --- a/litellm/llms/azure/batches/handler.py +++ b/litellm/llms/azure/batches/handler.py @@ -2,11 +2,10 @@ Azure Batches API Handler """ -from typing import Any, Coroutine, Optional, Union +from typing import Any, Coroutine, Optional, Union, cast import httpx -import litellm from litellm.llms.azure.azure import AsyncAzureOpenAI, AzureOpenAI from litellm.types.llms.openai import ( Batch, @@ -14,9 +13,12 @@ from litellm.types.llms.openai import ( CreateBatchRequest, RetrieveBatchRequest, ) +from litellm.types.utils import LiteLLMBatch + +from ..common_utils import BaseAzureLLM -class AzureBatchesAPI: +class AzureBatchesAPI(BaseAzureLLM): """ Azure methods to support for batches - create_batch() @@ -28,45 +30,13 @@ class AzureBatchesAPI: def __init__(self) -> None: super().__init__() - def get_azure_openai_client( - self, - api_key: Optional[str], - api_base: Optional[str], - timeout: Union[float, httpx.Timeout], - max_retries: Optional[int], - api_version: Optional[str] = None, - client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, - _is_async: bool = False, - ) -> Optional[Union[AzureOpenAI, AsyncAzureOpenAI]]: - received_args = locals() - openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None - if client is None: - data = {} - for k, v in received_args.items(): - if k == "self" or k == "client" or k == "_is_async": - pass - elif k == "api_base" and v is not None: - data["azure_endpoint"] = v - elif v is not None: - data[k] = v - if "api_version" not in data: - data["api_version"] = litellm.AZURE_DEFAULT_API_VERSION - if _is_async is True: - openai_client = AsyncAzureOpenAI(**data) - else: - openai_client = AzureOpenAI(**data) # type: ignore - else: - openai_client = client - - return openai_client - async def acreate_batch( self, create_batch_data: CreateBatchRequest, azure_client: AsyncAzureOpenAI, - ) -> Batch: + ) -> LiteLLMBatch: response = await azure_client.batches.create(**create_batch_data) - return response + return LiteLLMBatch(**response.model_dump()) def create_batch( self, @@ -78,16 +48,16 @@ class AzureBatchesAPI: timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, - ) -> Union[Batch, Coroutine[Any, Any, Batch]]: + litellm_params: Optional[dict] = None, + ) -> Union[LiteLLMBatch, Coroutine[Any, Any, LiteLLMBatch]]: azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( self.get_azure_openai_client( api_key=api_key, api_base=api_base, - timeout=timeout, api_version=api_version, - max_retries=max_retries, client=client, _is_async=_is_async, + litellm_params=litellm_params or {}, ) ) if azure_client is None: @@ -103,16 +73,16 @@ class AzureBatchesAPI: return self.acreate_batch( # type: ignore create_batch_data=create_batch_data, azure_client=azure_client ) - response = azure_client.batches.create(**create_batch_data) - return response + response = cast(AzureOpenAI, azure_client).batches.create(**create_batch_data) + return LiteLLMBatch(**response.model_dump()) async def aretrieve_batch( self, retrieve_batch_data: RetrieveBatchRequest, client: AsyncAzureOpenAI, - ) -> Batch: + ) -> LiteLLMBatch: response = await client.batches.retrieve(**retrieve_batch_data) - return response + return LiteLLMBatch(**response.model_dump()) def retrieve_batch( self, @@ -124,16 +94,16 @@ class AzureBatchesAPI: timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AzureOpenAI] = None, + litellm_params: Optional[dict] = None, ): azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( self.get_azure_openai_client( api_key=api_key, api_base=api_base, api_version=api_version, - timeout=timeout, - max_retries=max_retries, client=client, _is_async=_is_async, + litellm_params=litellm_params or {}, ) ) if azure_client is None: @@ -149,8 +119,10 @@ class AzureBatchesAPI: return self.aretrieve_batch( # type: ignore retrieve_batch_data=retrieve_batch_data, client=azure_client ) - response = azure_client.batches.retrieve(**retrieve_batch_data) - return response + response = cast(AzureOpenAI, azure_client).batches.retrieve( + **retrieve_batch_data + ) + return LiteLLMBatch(**response.model_dump()) async def acancel_batch( self, @@ -170,16 +142,16 @@ class AzureBatchesAPI: timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[AzureOpenAI] = None, + litellm_params: Optional[dict] = None, ): azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( self.get_azure_openai_client( api_key=api_key, api_base=api_base, api_version=api_version, - timeout=timeout, - max_retries=max_retries, client=client, _is_async=_is_async, + litellm_params=litellm_params or {}, ) ) if azure_client is None: @@ -209,16 +181,16 @@ class AzureBatchesAPI: after: Optional[str] = None, limit: Optional[int] = None, client: Optional[AzureOpenAI] = None, + litellm_params: Optional[dict] = None, ): azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( self.get_azure_openai_client( api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, api_version=api_version, client=client, _is_async=_is_async, + litellm_params=litellm_params or {}, ) ) if azure_client is None: diff --git a/litellm/llms/azure/chat/o_series_handler.py b/litellm/llms/azure/chat/o_series_handler.py index a2042b3e2a..2f3e9e6399 100644 --- a/litellm/llms/azure/chat/o_series_handler.py +++ b/litellm/llms/azure/chat/o_series_handler.py @@ -4,50 +4,69 @@ Handler file for calls to Azure OpenAI's o1/o3 family of models Written separately to handle faking streaming for o1 and o3 models. """ -from typing import Optional, Union +from typing import Any, Callable, Optional, Union import httpx -from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI + +from litellm.types.utils import ModelResponse from ...openai.openai import OpenAIChatCompletion -from ..common_utils import get_azure_openai_client +from ..common_utils import BaseAzureLLM -class AzureOpenAIO1ChatCompletion(OpenAIChatCompletion): - def _get_openai_client( +class AzureOpenAIO1ChatCompletion(BaseAzureLLM, OpenAIChatCompletion): + def completion( self, - is_async: bool, + model_response: ModelResponse, + timeout: Union[float, httpx.Timeout], + optional_params: dict, + litellm_params: dict, + logging_obj: Any, + model: Optional[str] = None, + messages: Optional[list] = None, + print_verbose: Optional[Callable] = None, api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, - timeout: Union[float, httpx.Timeout] = httpx.Timeout(None), - max_retries: Optional[int] = 2, + dynamic_params: Optional[bool] = None, + azure_ad_token: Optional[str] = None, + acompletion: bool = False, + logger_fn=None, + headers: Optional[dict] = None, + custom_prompt_dict: dict = {}, + client=None, organization: Optional[str] = None, - client: Optional[ - Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI] - ] = None, - ) -> Optional[ - Union[ - OpenAI, - AsyncOpenAI, - AzureOpenAI, - AsyncAzureOpenAI, - ] - ]: - - # Override to use Azure-specific client initialization - if not isinstance(client, AzureOpenAI) and not isinstance( - client, AsyncAzureOpenAI - ): - client = None - - return get_azure_openai_client( + custom_llm_provider: Optional[str] = None, + drop_params: Optional[bool] = None, + ): + client = self.get_azure_openai_client( + litellm_params=litellm_params, api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, - organization=organization, api_version=api_version, client=client, - _is_async=is_async, + _is_async=acompletion, + ) + return super().completion( + model_response=model_response, + timeout=timeout, + optional_params=optional_params, + litellm_params=litellm_params, + logging_obj=logging_obj, + model=model, + messages=messages, + print_verbose=print_verbose, + api_key=api_key, + api_base=api_base, + api_version=api_version, + dynamic_params=dynamic_params, + azure_ad_token=azure_ad_token, + acompletion=acompletion, + logger_fn=logger_fn, + headers=headers, + custom_prompt_dict=custom_prompt_dict, + client=client, + organization=organization, + custom_llm_provider=custom_llm_provider, + drop_params=drop_params, ) diff --git a/litellm/llms/azure/common_utils.py b/litellm/llms/azure/common_utils.py index 2a96f5c39c..909fcd88a5 100644 --- a/litellm/llms/azure/common_utils.py +++ b/litellm/llms/azure/common_utils.py @@ -1,3 +1,5 @@ +import json +import os from typing import Callable, Optional, Union import httpx @@ -5,9 +7,15 @@ from openai import AsyncAzureOpenAI, AzureOpenAI import litellm from litellm._logging import verbose_logger +from litellm.caching.caching import DualCache from litellm.llms.base_llm.chat.transformation import BaseLLMException +from litellm.secret_managers.get_azure_ad_token_provider import ( + get_azure_ad_token_provider, +) from litellm.secret_managers.main import get_secret_str +azure_ad_cache = DualCache() + class AzureOpenAIError(BaseLLMException): def __init__( @@ -17,6 +25,7 @@ class AzureOpenAIError(BaseLLMException): request: Optional[httpx.Request] = None, response: Optional[httpx.Response] = None, headers: Optional[Union[httpx.Headers, dict]] = None, + body: Optional[dict] = None, ): super().__init__( status_code=status_code, @@ -24,42 +33,10 @@ class AzureOpenAIError(BaseLLMException): request=request, response=response, headers=headers, + body=body, ) -def get_azure_openai_client( - api_key: Optional[str], - api_base: Optional[str], - timeout: Union[float, httpx.Timeout], - max_retries: Optional[int], - api_version: Optional[str] = None, - organization: Optional[str] = None, - client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, - _is_async: bool = False, -) -> Optional[Union[AzureOpenAI, AsyncAzureOpenAI]]: - received_args = locals() - openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None - if client is None: - data = {} - for k, v in received_args.items(): - if k == "self" or k == "client" or k == "_is_async": - pass - elif k == "api_base" and v is not None: - data["azure_endpoint"] = v - elif v is not None: - data[k] = v - if "api_version" not in data: - data["api_version"] = litellm.AZURE_DEFAULT_API_VERSION - if _is_async is True: - openai_client = AsyncAzureOpenAI(**data) - else: - openai_client = AzureOpenAI(**data) # type: ignore - else: - openai_client = client - - return openai_client - - def process_azure_headers(headers: Union[httpx.Headers, dict]) -> dict: openai_headers = {} if "x-ratelimit-limit-requests" in headers: @@ -178,3 +155,199 @@ def get_azure_ad_token_from_username_password( verbose_logger.debug("token_provider %s", token_provider) return token_provider + + +def get_azure_ad_token_from_oidc(azure_ad_token: str): + azure_client_id = os.getenv("AZURE_CLIENT_ID", None) + azure_tenant_id = os.getenv("AZURE_TENANT_ID", None) + azure_authority_host = os.getenv( + "AZURE_AUTHORITY_HOST", "https://login.microsoftonline.com" + ) + + if azure_client_id is None or azure_tenant_id is None: + raise AzureOpenAIError( + status_code=422, + message="AZURE_CLIENT_ID and AZURE_TENANT_ID must be set", + ) + + oidc_token = get_secret_str(azure_ad_token) + + if oidc_token is None: + raise AzureOpenAIError( + status_code=401, + message="OIDC token could not be retrieved from secret manager.", + ) + + azure_ad_token_cache_key = json.dumps( + { + "azure_client_id": azure_client_id, + "azure_tenant_id": azure_tenant_id, + "azure_authority_host": azure_authority_host, + "oidc_token": oidc_token, + } + ) + + azure_ad_token_access_token = azure_ad_cache.get_cache(azure_ad_token_cache_key) + if azure_ad_token_access_token is not None: + return azure_ad_token_access_token + + client = litellm.module_level_client + req_token = client.post( + f"{azure_authority_host}/{azure_tenant_id}/oauth2/v2.0/token", + data={ + "client_id": azure_client_id, + "grant_type": "client_credentials", + "scope": "https://cognitiveservices.azure.com/.default", + "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", + "client_assertion": oidc_token, + }, + ) + + if req_token.status_code != 200: + raise AzureOpenAIError( + status_code=req_token.status_code, + message=req_token.text, + ) + + azure_ad_token_json = req_token.json() + azure_ad_token_access_token = azure_ad_token_json.get("access_token", None) + azure_ad_token_expires_in = azure_ad_token_json.get("expires_in", None) + + if azure_ad_token_access_token is None: + raise AzureOpenAIError( + status_code=422, message="Azure AD Token access_token not returned" + ) + + if azure_ad_token_expires_in is None: + raise AzureOpenAIError( + status_code=422, message="Azure AD Token expires_in not returned" + ) + + azure_ad_cache.set_cache( + key=azure_ad_token_cache_key, + value=azure_ad_token_access_token, + ttl=azure_ad_token_expires_in, + ) + + return azure_ad_token_access_token + + +def select_azure_base_url_or_endpoint(azure_client_params: dict): + azure_endpoint = azure_client_params.get("azure_endpoint", None) + if azure_endpoint is not None: + # see : https://github.com/openai/openai-python/blob/3d61ed42aba652b547029095a7eb269ad4e1e957/src/openai/lib/azure.py#L192 + if "/openai/deployments" in azure_endpoint: + # this is base_url, not an azure_endpoint + azure_client_params["base_url"] = azure_endpoint + azure_client_params.pop("azure_endpoint") + + return azure_client_params + + +class BaseAzureLLM: + def get_azure_openai_client( + self, + litellm_params: dict, + api_key: Optional[str], + api_base: Optional[str], + api_version: Optional[str] = None, + client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + _is_async: bool = False, + ) -> Optional[Union[AzureOpenAI, AsyncAzureOpenAI]]: + openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None + if client is None: + azure_client_params = self.initialize_azure_sdk_client( + litellm_params=litellm_params, + api_key=api_key, + api_base=api_base, + model_name="", + api_version=api_version, + ) + if _is_async is True: + openai_client = AsyncAzureOpenAI(**azure_client_params) + else: + openai_client = AzureOpenAI(**azure_client_params) # type: ignore + else: + openai_client = client + + return openai_client + + def initialize_azure_sdk_client( + self, + litellm_params: dict, + api_key: Optional[str], + api_base: Optional[str], + model_name: str, + api_version: Optional[str], + ) -> dict: + + azure_ad_token_provider: Optional[Callable[[], str]] = None + # If we have api_key, then we have higher priority + azure_ad_token = litellm_params.get("azure_ad_token") + tenant_id = litellm_params.get("tenant_id") + client_id = litellm_params.get("client_id") + client_secret = litellm_params.get("client_secret") + azure_username = litellm_params.get("azure_username") + azure_password = litellm_params.get("azure_password") + max_retries = litellm_params.get("max_retries") + timeout = litellm_params.get("timeout") + if not api_key and tenant_id and client_id and client_secret: + verbose_logger.debug("Using Azure AD Token Provider for Azure Auth") + azure_ad_token_provider = get_azure_ad_token_from_entrata_id( + tenant_id=tenant_id, + client_id=client_id, + client_secret=client_secret, + ) + if azure_username and azure_password and client_id: + azure_ad_token_provider = get_azure_ad_token_from_username_password( + azure_username=azure_username, + azure_password=azure_password, + client_id=client_id, + ) + + if azure_ad_token is not None and azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) + elif ( + not api_key + and azure_ad_token_provider is None + and litellm.enable_azure_ad_token_refresh is True + ): + try: + azure_ad_token_provider = get_azure_ad_token_provider() + except ValueError: + verbose_logger.debug("Azure AD Token Provider could not be used.") + if api_version is None: + api_version = os.getenv( + "AZURE_API_VERSION", litellm.AZURE_DEFAULT_API_VERSION + ) + + _api_key = api_key + if _api_key is not None and isinstance(_api_key, str): + # only show first 5 chars of api_key + _api_key = _api_key[:8] + "*" * 15 + verbose_logger.debug( + f"Initializing Azure OpenAI Client for {model_name}, Api Base: {str(api_base)}, Api Key:{_api_key}" + ) + azure_client_params = { + "api_key": api_key, + "azure_endpoint": api_base, + "api_version": api_version, + "azure_ad_token": azure_ad_token, + "azure_ad_token_provider": azure_ad_token_provider, + "http_client": litellm.client_session, + } + if max_retries is not None: + azure_client_params["max_retries"] = max_retries + if timeout is not None: + azure_client_params["timeout"] = timeout + + if azure_ad_token_provider is not None: + azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider + # this decides if we should set azure_endpoint or base_url on Azure OpenAI Client + # required to support GPT-4 vision enhancements, since base_url needs to be set on Azure OpenAI Client + + azure_client_params = select_azure_base_url_or_endpoint( + azure_client_params=azure_client_params + ) + + return azure_client_params diff --git a/litellm/llms/azure/completion/handler.py b/litellm/llms/azure/completion/handler.py index fafa5665bb..4ec5c435da 100644 --- a/litellm/llms/azure/completion/handler.py +++ b/litellm/llms/azure/completion/handler.py @@ -6,9 +6,8 @@ import litellm from litellm.litellm_core_utils.prompt_templates.factory import prompt_factory from litellm.utils import CustomStreamWrapper, ModelResponse, TextCompletionResponse -from ...base import BaseLLM from ...openai.completion.transformation import OpenAITextCompletionConfig -from ..common_utils import AzureOpenAIError +from ..common_utils import AzureOpenAIError, BaseAzureLLM openai_text_completion_config = OpenAITextCompletionConfig() @@ -25,7 +24,7 @@ def select_azure_base_url_or_endpoint(azure_client_params: dict): return azure_client_params -class AzureTextCompletion(BaseLLM): +class AzureTextCompletion(BaseAzureLLM): def __init__(self) -> None: super().__init__() @@ -60,7 +59,6 @@ class AzureTextCompletion(BaseLLM): headers: Optional[dict] = None, client=None, ): - super().completion() try: if model is None or messages is None: raise AzureOpenAIError( @@ -72,6 +70,14 @@ class AzureTextCompletion(BaseLLM): messages=messages, model=model, custom_llm_provider="azure_text" ) + azure_client_params = self.initialize_azure_sdk_client( + litellm_params=litellm_params or {}, + api_key=api_key, + model_name=model, + api_version=api_version, + api_base=api_base, + ) + ### CHECK IF CLOUDFLARE AI GATEWAY ### ### if so - set the model as part of the base url if "gateway.ai.cloudflare.com" in api_base: @@ -118,6 +124,7 @@ class AzureTextCompletion(BaseLLM): azure_ad_token=azure_ad_token, timeout=timeout, client=client, + azure_client_params=azure_client_params, ) else: return self.acompletion( @@ -132,6 +139,7 @@ class AzureTextCompletion(BaseLLM): client=client, logging_obj=logging_obj, max_retries=max_retries, + azure_client_params=azure_client_params, ) elif "stream" in optional_params and optional_params["stream"] is True: return self.streaming( @@ -144,6 +152,7 @@ class AzureTextCompletion(BaseLLM): azure_ad_token=azure_ad_token, timeout=timeout, client=client, + azure_client_params=azure_client_params, ) else: ## LOGGING @@ -165,22 +174,6 @@ class AzureTextCompletion(BaseLLM): status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - "azure_ad_token_provider": azure_ad_token_provider, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) else: @@ -240,26 +233,11 @@ class AzureTextCompletion(BaseLLM): max_retries: int, azure_ad_token: Optional[str] = None, client=None, # this is the AsyncAzureOpenAI + azure_client_params: dict = {}, ): response = None try: # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - azure_client_params["azure_ad_token"] = azure_ad_token - # setting Azure client if client is None: azure_client = AsyncAzureOpenAI(**azure_client_params) @@ -312,6 +290,7 @@ class AzureTextCompletion(BaseLLM): timeout: Any, azure_ad_token: Optional[str] = None, client=None, + azure_client_params: dict = {}, ): max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): @@ -319,21 +298,6 @@ class AzureTextCompletion(BaseLLM): status_code=422, message="max retries must be an int" ) # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": max_retries, - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) else: @@ -375,24 +339,10 @@ class AzureTextCompletion(BaseLLM): timeout: Any, azure_ad_token: Optional[str] = None, client=None, + azure_client_params: dict = {}, ): try: # init AzureOpenAI Client - azure_client_params = { - "api_version": api_version, - "azure_endpoint": api_base, - "azure_deployment": model, - "http_client": litellm.client_session, - "max_retries": data.pop("max_retries", 2), - "timeout": timeout, - } - azure_client_params = select_azure_base_url_or_endpoint( - azure_client_params=azure_client_params - ) - if api_key is not None: - azure_client_params["api_key"] = api_key - elif azure_ad_token is not None: - azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AsyncAzureOpenAI(**azure_client_params) else: diff --git a/litellm/llms/azure/files/handler.py b/litellm/llms/azure/files/handler.py index f442af855e..d45ac9a315 100644 --- a/litellm/llms/azure/files/handler.py +++ b/litellm/llms/azure/files/handler.py @@ -5,13 +5,12 @@ from openai import AsyncAzureOpenAI, AzureOpenAI from openai.types.file_deleted import FileDeleted from litellm._logging import verbose_logger -from litellm.llms.base import BaseLLM from litellm.types.llms.openai import * -from ..common_utils import get_azure_openai_client +from ..common_utils import BaseAzureLLM -class AzureOpenAIFilesAPI(BaseLLM): +class AzureOpenAIFilesAPI(BaseAzureLLM): """ AzureOpenAI methods to support for batches - create_file() @@ -45,14 +44,15 @@ class AzureOpenAIFilesAPI(BaseLLM): timeout: Union[float, httpx.Timeout], max_retries: Optional[int], client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ) -> Union[FileObject, Coroutine[Any, Any, FileObject]]: + openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( - get_azure_openai_client( + self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, api_version=api_version, - timeout=timeout, - max_retries=max_retries, client=client, _is_async=_is_async, ) @@ -91,17 +91,16 @@ class AzureOpenAIFilesAPI(BaseLLM): max_retries: Optional[int], api_version: Optional[str] = None, client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ) -> Union[ HttpxBinaryResponseContent, Coroutine[Any, Any, HttpxBinaryResponseContent] ]: openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( - get_azure_openai_client( + self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, - timeout=timeout, api_version=api_version, - max_retries=max_retries, - organization=None, client=client, _is_async=_is_async, ) @@ -144,14 +143,13 @@ class AzureOpenAIFilesAPI(BaseLLM): max_retries: Optional[int], api_version: Optional[str] = None, client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ): openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( - get_azure_openai_client( + self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, - organization=None, api_version=api_version, client=client, _is_async=_is_async, @@ -197,14 +195,13 @@ class AzureOpenAIFilesAPI(BaseLLM): organization: Optional[str] = None, api_version: Optional[str] = None, client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ): openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( - get_azure_openai_client( + self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, - organization=organization, api_version=api_version, client=client, _is_async=_is_async, @@ -252,14 +249,13 @@ class AzureOpenAIFilesAPI(BaseLLM): purpose: Optional[str] = None, api_version: Optional[str] = None, client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None, + litellm_params: Optional[dict] = None, ): openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = ( - get_azure_openai_client( + self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, - organization=None, # openai param api_version=api_version, client=client, _is_async=_is_async, diff --git a/litellm/llms/azure/fine_tuning/handler.py b/litellm/llms/azure/fine_tuning/handler.py index c34b181eff..3d7cc336fb 100644 --- a/litellm/llms/azure/fine_tuning/handler.py +++ b/litellm/llms/azure/fine_tuning/handler.py @@ -3,11 +3,11 @@ from typing import Optional, Union import httpx from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI -from litellm.llms.azure.files.handler import get_azure_openai_client +from litellm.llms.azure.common_utils import BaseAzureLLM from litellm.llms.openai.fine_tuning.handler import OpenAIFineTuningAPI -class AzureOpenAIFineTuningAPI(OpenAIFineTuningAPI): +class AzureOpenAIFineTuningAPI(OpenAIFineTuningAPI, BaseAzureLLM): """ AzureOpenAI methods to support fine tuning, inherits from OpenAIFineTuningAPI. """ @@ -24,6 +24,7 @@ class AzureOpenAIFineTuningAPI(OpenAIFineTuningAPI): ] = None, _is_async: bool = False, api_version: Optional[str] = None, + litellm_params: Optional[dict] = None, ) -> Optional[ Union[ OpenAI, @@ -36,12 +37,10 @@ class AzureOpenAIFineTuningAPI(OpenAIFineTuningAPI): if isinstance(client, OpenAI) or isinstance(client, AsyncOpenAI): client = None - return get_azure_openai_client( + return self.get_azure_openai_client( + litellm_params=litellm_params or {}, api_key=api_key, api_base=api_base, - timeout=timeout, - max_retries=max_retries, - organization=organization, api_version=api_version, client=client, _is_async=_is_async, diff --git a/litellm/llms/azure_ai/chat/transformation.py b/litellm/llms/azure_ai/chat/transformation.py index 46a1a6bf9c..154f345537 100644 --- a/litellm/llms/azure_ai/chat/transformation.py +++ b/litellm/llms/azure_ai/chat/transformation.py @@ -16,10 +16,23 @@ from litellm.llms.openai.openai import OpenAIConfig from litellm.secret_managers.main import get_secret_str from litellm.types.llms.openai import AllMessageValues from litellm.types.utils import ModelResponse, ProviderField -from litellm.utils import _add_path_to_api_base +from litellm.utils import _add_path_to_api_base, supports_tool_choice class AzureAIStudioConfig(OpenAIConfig): + def get_supported_openai_params(self, model: str) -> List: + model_supports_tool_choice = True # azure ai supports this by default + if not supports_tool_choice(model=f"azure_ai/{model}"): + model_supports_tool_choice = False + supported_params = super().get_supported_openai_params(model) + if not model_supports_tool_choice: + filtered_supported_params = [] + for param in supported_params: + if param != "tool_choice": + filtered_supported_params.append(param) + return filtered_supported_params + return supported_params + def validate_environment( self, headers: dict, @@ -54,6 +67,7 @@ class AzureAIStudioConfig(OpenAIConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ @@ -79,12 +93,14 @@ class AzureAIStudioConfig(OpenAIConfig): original_url = httpx.URL(api_base) # Extract api_version or use default - api_version = cast(Optional[str], optional_params.get("api_version")) + api_version = cast(Optional[str], litellm_params.get("api_version")) - # Check if 'api-version' is already present - if "api-version" not in original_url.params and api_version: - # Add api_version to optional_params - original_url.params["api-version"] = api_version + # Create a new dictionary with existing params + query_params = dict(original_url.params) + + # Add api_version if needed + if "api-version" not in query_params and api_version: + query_params["api-version"] = api_version # Add the path to the base URL if "services.ai.azure.com" in api_base: @@ -96,8 +112,7 @@ class AzureAIStudioConfig(OpenAIConfig): api_base=api_base, ending_path="/chat/completions" ) - # Convert optional_params to query parameters - query_params = original_url.params + # Use the new query_params dictionary final_url = httpx.URL(new_url).copy_with(params=query_params) return str(final_url) diff --git a/litellm/llms/base_llm/anthropic_messages/transformation.py b/litellm/llms/base_llm/anthropic_messages/transformation.py new file mode 100644 index 0000000000..7619ffbbf6 --- /dev/null +++ b/litellm/llms/base_llm/anthropic_messages/transformation.py @@ -0,0 +1,35 @@ +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, Optional + +if TYPE_CHECKING: + from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj + + LiteLLMLoggingObj = _LiteLLMLoggingObj +else: + LiteLLMLoggingObj = Any + + +class BaseAnthropicMessagesConfig(ABC): + @abstractmethod + def validate_environment( + self, + headers: dict, + model: str, + api_key: Optional[str] = None, + ) -> dict: + pass + + @abstractmethod + def get_complete_url(self, api_base: Optional[str], model: str) -> str: + """ + OPTIONAL + + Get the complete url for the request + + Some providers need `model` in `api_base` + """ + return api_base or "" + + @abstractmethod + def get_supported_anthropic_messages_params(self, model: str) -> list: + pass diff --git a/litellm/llms/base_llm/audio_transcription/transformation.py b/litellm/llms/base_llm/audio_transcription/transformation.py index 66140455d9..e550c574e2 100644 --- a/litellm/llms/base_llm/audio_transcription/transformation.py +++ b/litellm/llms/base_llm/audio_transcription/transformation.py @@ -30,6 +30,7 @@ class BaseAudioTranscriptionConfig(BaseConfig, ABC): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/base_llm/chat/transformation.py b/litellm/llms/base_llm/chat/transformation.py index d05b79dd7f..1b5a6bc58e 100644 --- a/litellm/llms/base_llm/chat/transformation.py +++ b/litellm/llms/base_llm/chat/transformation.py @@ -51,6 +51,7 @@ class BaseLLMException(Exception): headers: Optional[Union[dict, httpx.Headers]] = None, request: Optional[httpx.Request] = None, response: Optional[httpx.Response] = None, + body: Optional[dict] = None, ): self.status_code = status_code self.message: str = message @@ -67,6 +68,7 @@ class BaseLLMException(Exception): self.response = httpx.Response( status_code=status_code, request=self.request ) + self.body = body super().__init__( self.message ) # Call the base class constructor with the parameters it needs @@ -268,6 +270,7 @@ class BaseConfig(ABC): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/base_llm/completion/transformation.py b/litellm/llms/base_llm/completion/transformation.py index ca258c2562..9432f02da1 100644 --- a/litellm/llms/base_llm/completion/transformation.py +++ b/litellm/llms/base_llm/completion/transformation.py @@ -31,6 +31,7 @@ class BaseTextCompletionConfig(BaseConfig, ABC): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/base_llm/embedding/transformation.py b/litellm/llms/base_llm/embedding/transformation.py index 940c6bf225..68c0a7c05a 100644 --- a/litellm/llms/base_llm/embedding/transformation.py +++ b/litellm/llms/base_llm/embedding/transformation.py @@ -45,6 +45,7 @@ class BaseEmbeddingConfig(BaseConfig, ABC): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/base_llm/image_variations/transformation.py b/litellm/llms/base_llm/image_variations/transformation.py index dcb53bea94..4d1cd6eebb 100644 --- a/litellm/llms/base_llm/image_variations/transformation.py +++ b/litellm/llms/base_llm/image_variations/transformation.py @@ -36,6 +36,7 @@ class BaseImageVariationConfig(BaseConfig, ABC): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/base_llm/responses/transformation.py b/litellm/llms/base_llm/responses/transformation.py new file mode 100644 index 0000000000..c41d63842b --- /dev/null +++ b/litellm/llms/base_llm/responses/transformation.py @@ -0,0 +1,133 @@ +import types +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, Dict, Optional, Union + +import httpx + +from litellm.types.llms.openai import ( + ResponseInputParam, + ResponsesAPIOptionalRequestParams, + ResponsesAPIRequestParams, + ResponsesAPIResponse, + ResponsesAPIStreamingResponse, +) +from litellm.types.router import GenericLiteLLMParams + +if TYPE_CHECKING: + from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj + + from ..chat.transformation import BaseLLMException as _BaseLLMException + + LiteLLMLoggingObj = _LiteLLMLoggingObj + BaseLLMException = _BaseLLMException +else: + LiteLLMLoggingObj = Any + BaseLLMException = Any + + +class BaseResponsesAPIConfig(ABC): + def __init__(self): + pass + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not k.startswith("_abc") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + + @abstractmethod + def get_supported_openai_params(self, model: str) -> list: + pass + + @abstractmethod + def map_openai_params( + self, + response_api_optional_params: ResponsesAPIOptionalRequestParams, + model: str, + drop_params: bool, + ) -> Dict: + + pass + + @abstractmethod + def validate_environment( + self, + headers: dict, + model: str, + api_key: Optional[str] = None, + ) -> dict: + return {} + + @abstractmethod + def get_complete_url( + self, + api_base: Optional[str], + model: str, + stream: Optional[bool] = None, + ) -> str: + """ + OPTIONAL + + Get the complete url for the request + + Some providers need `model` in `api_base` + """ + if api_base is None: + raise ValueError("api_base is required") + return api_base + + @abstractmethod + def transform_responses_api_request( + self, + model: str, + input: Union[str, ResponseInputParam], + response_api_optional_request_params: Dict, + litellm_params: GenericLiteLLMParams, + headers: dict, + ) -> ResponsesAPIRequestParams: + pass + + @abstractmethod + def transform_response_api_response( + self, + model: str, + raw_response: httpx.Response, + logging_obj: LiteLLMLoggingObj, + ) -> ResponsesAPIResponse: + pass + + @abstractmethod + def transform_streaming_response( + self, + model: str, + parsed_chunk: dict, + logging_obj: LiteLLMLoggingObj, + ) -> ResponsesAPIStreamingResponse: + """ + Transform a parsed streaming response chunk into a ResponsesAPIStreamingResponse + """ + pass + + def get_error_class( + self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers] + ) -> BaseLLMException: + from ..chat.transformation import BaseLLMException + + raise BaseLLMException( + status_code=status_code, + message=error_message, + headers=headers, + ) diff --git a/litellm/llms/bedrock/base_aws_llm.py b/litellm/llms/bedrock/base_aws_llm.py index bf9a070f26..86b47675d4 100644 --- a/litellm/llms/bedrock/base_aws_llm.py +++ b/litellm/llms/bedrock/base_aws_llm.py @@ -554,6 +554,7 @@ class BaseAWSLLM: aws_access_key_id = optional_params.pop("aws_access_key_id", None) aws_session_token = optional_params.pop("aws_session_token", None) aws_region_name = self._get_aws_region_name(optional_params, model) + optional_params.pop("aws_region_name", None) aws_role_name = optional_params.pop("aws_role_name", None) aws_session_name = optional_params.pop("aws_session_name", None) aws_profile_name = optional_params.pop("aws_profile_name", None) diff --git a/litellm/llms/bedrock/chat/converse_transformation.py b/litellm/llms/bedrock/chat/converse_transformation.py index 18ec425e44..0b0d55f23d 100644 --- a/litellm/llms/bedrock/chat/converse_transformation.py +++ b/litellm/llms/bedrock/chat/converse_transformation.py @@ -272,7 +272,7 @@ class AmazonConverseConfig(BaseConfig): optional_params["temperature"] = value if param == "top_p": optional_params["topP"] = value - if param == "tools": + if param == "tools" and isinstance(value, list): optional_params = self._add_tools_to_optional_params( optional_params=optional_params, tools=value ) @@ -598,7 +598,7 @@ class AmazonConverseConfig(BaseConfig): if _text is not None: _thinking_block["thinking"] = _text if _signature is not None: - _thinking_block["signature_delta"] = _signature + _thinking_block["signature"] = _signature thinking_blocks_list.append(_thinking_block) return thinking_blocks_list diff --git a/litellm/llms/bedrock/chat/invoke_handler.py b/litellm/llms/bedrock/chat/invoke_handler.py index 56cf891e76..44e5b40380 100644 --- a/litellm/llms/bedrock/chat/invoke_handler.py +++ b/litellm/llms/bedrock/chat/invoke_handler.py @@ -1231,7 +1231,9 @@ class AWSEventStreamDecoder: if len(self.content_blocks) == 0: return False - if "text" in self.content_blocks[0]: + if ( + "toolUse" not in self.content_blocks[0] + ): # be explicit - only do this if tool use block, as this is to prevent json decoding errors return False for block in self.content_blocks: @@ -1260,6 +1262,9 @@ class AWSEventStreamDecoder: _thinking_block = ChatCompletionThinkingBlock(type="thinking") if "text" in thinking_block: _thinking_block["thinking"] = thinking_block["text"] + elif "signature" in thinking_block: + _thinking_block["signature"] = thinking_block["signature"] + _thinking_block["thinking"] = "" # consistent with anthropic response thinking_blocks_list.append(_thinking_block) return thinking_blocks_list @@ -1322,6 +1327,12 @@ class AWSEventStreamDecoder: thinking_blocks = self.translate_thinking_blocks( delta_obj["reasoningContent"] ) + if ( + thinking_blocks + and len(thinking_blocks) > 0 + and reasoning_content is None + ): + reasoning_content = "" # set to non-empty string to ensure consistency with Anthropic elif ( "contentBlockIndex" in chunk_data ): # stop block, no 'start' or 'delta' object diff --git a/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py b/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py index a316eb7ea6..133eb659df 100644 --- a/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py +++ b/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py @@ -76,6 +76,7 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ @@ -129,7 +130,6 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM): ## CREDENTIALS ## # pop aws_secret_access_key, aws_access_key_id, aws_session_token, aws_region_name from kwargs, since completion calls fail with them - extra_headers = optional_params.get("extra_headers", None) aws_secret_access_key = optional_params.get("aws_secret_access_key", None) aws_access_key_id = optional_params.get("aws_access_key_id", None) aws_session_token = optional_params.get("aws_session_token", None) @@ -155,9 +155,10 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM): ) sigv4 = SigV4Auth(credentials, "bedrock", aws_region_name) - headers = {"Content-Type": "application/json"} - if extra_headers is not None: - headers = {"Content-Type": "application/json", **extra_headers} + if headers is not None: + headers = {"Content-Type": "application/json", **headers} + else: + headers = {"Content-Type": "application/json"} request = AWSRequest( method="POST", @@ -166,12 +167,13 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM): headers=headers, ) sigv4.add_auth(request) - if ( - extra_headers is not None and "Authorization" in extra_headers - ): # prevent sigv4 from overwriting the auth header - request.headers["Authorization"] = extra_headers["Authorization"] - return dict(request.headers) + request_headers_dict = dict(request.headers) + if ( + headers is not None and "Authorization" in headers + ): # prevent sigv4 from overwriting the auth header + request_headers_dict["Authorization"] = headers["Authorization"] + return request_headers_dict def transform_request( self, @@ -443,7 +445,7 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM): api_key: Optional[str] = None, api_base: Optional[str] = None, ) -> dict: - return {} + return headers def get_error_class( self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers] diff --git a/litellm/llms/bedrock/image/amazon_nova_canvas_transformation.py b/litellm/llms/bedrock/image/amazon_nova_canvas_transformation.py new file mode 100644 index 0000000000..de46edb923 --- /dev/null +++ b/litellm/llms/bedrock/image/amazon_nova_canvas_transformation.py @@ -0,0 +1,106 @@ +import types +from typing import List, Optional + +from openai.types.image import Image + +from litellm.types.llms.bedrock import ( + AmazonNovaCanvasTextToImageRequest, AmazonNovaCanvasTextToImageResponse, + AmazonNovaCanvasTextToImageParams, AmazonNovaCanvasRequestBase, +) +from litellm.types.utils import ImageResponse + + +class AmazonNovaCanvasConfig: + """ + Reference: https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/model-catalog/serverless/amazon.nova-canvas-v1:0 + + """ + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + + @classmethod + def get_supported_openai_params(cls, model: Optional[str] = None) -> List: + """ + """ + return ["n", "size", "quality"] + + @classmethod + def _is_nova_model(cls, model: Optional[str] = None) -> bool: + """ + Returns True if the model is a Nova Canvas model + + Nova models follow this pattern: + + """ + if model: + if "amazon.nova-canvas" in model: + return True + return False + + @classmethod + def transform_request_body( + cls, text: str, optional_params: dict + ) -> AmazonNovaCanvasRequestBase: + """ + Transform the request body for Amazon Nova Canvas model + """ + task_type = optional_params.pop("taskType", "TEXT_IMAGE") + image_generation_config = optional_params.pop("imageGenerationConfig", {}) + image_generation_config = {**image_generation_config, **optional_params} + if task_type == "TEXT_IMAGE": + text_to_image_params = image_generation_config.pop("textToImageParams", {}) + text_to_image_params = {"text" :text, **text_to_image_params} + text_to_image_params = AmazonNovaCanvasTextToImageParams(**text_to_image_params) + return AmazonNovaCanvasTextToImageRequest(textToImageParams=text_to_image_params, taskType=task_type, + imageGenerationConfig=image_generation_config) + raise NotImplementedError(f"Task type {task_type} is not supported") + + @classmethod + def map_openai_params(cls, non_default_params: dict, optional_params: dict) -> dict: + """ + Map the OpenAI params to the Bedrock params + """ + _size = non_default_params.get("size") + if _size is not None: + width, height = _size.split("x") + optional_params["width"], optional_params["height"] = int(width), int(height) + if non_default_params.get("n") is not None: + optional_params["numberOfImages"] = non_default_params.get("n") + if non_default_params.get("quality") is not None: + if non_default_params.get("quality") in ("hd", "premium"): + optional_params["quality"] = "premium" + if non_default_params.get("quality") == "standard": + optional_params["quality"] = "standard" + return optional_params + + @classmethod + def transform_response_dict_to_openai_response( + cls, model_response: ImageResponse, response_dict: dict + ) -> ImageResponse: + """ + Transform the response dict to the OpenAI response + """ + + nova_response = AmazonNovaCanvasTextToImageResponse(**response_dict) + openai_images: List[Image] = [] + for _img in nova_response.get("images", []): + openai_images.append(Image(b64_json=_img)) + + model_response.data = openai_images + return model_response diff --git a/litellm/llms/bedrock/image/image_handler.py b/litellm/llms/bedrock/image/image_handler.py index 4bd63fd21b..8f7762e547 100644 --- a/litellm/llms/bedrock/image/image_handler.py +++ b/litellm/llms/bedrock/image/image_handler.py @@ -10,6 +10,8 @@ import litellm from litellm._logging import verbose_logger from litellm.litellm_core_utils.litellm_logging import Logging as LitellmLogging from litellm.llms.custom_httpx.http_handler import ( + AsyncHTTPHandler, + HTTPHandler, _get_httpx_client, get_async_httpx_client, ) @@ -51,6 +53,7 @@ class BedrockImageGeneration(BaseAWSLLM): aimg_generation: bool = False, api_base: Optional[str] = None, extra_headers: Optional[dict] = None, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, ): prepared_request = self._prepare_request( model=model, @@ -69,9 +72,15 @@ class BedrockImageGeneration(BaseAWSLLM): logging_obj=logging_obj, prompt=prompt, model_response=model_response, + client=( + client + if client is not None and isinstance(client, AsyncHTTPHandler) + else None + ), ) - client = _get_httpx_client() + if client is None or not isinstance(client, HTTPHandler): + client = _get_httpx_client() try: response = client.post(url=prepared_request.endpoint_url, headers=prepared_request.prepped.headers, data=prepared_request.body) # type: ignore response.raise_for_status() @@ -99,13 +108,14 @@ class BedrockImageGeneration(BaseAWSLLM): logging_obj: LitellmLogging, prompt: str, model_response: ImageResponse, + client: Optional[AsyncHTTPHandler] = None, ) -> ImageResponse: """ Asynchronous handler for bedrock image generation Awaits the response from the bedrock image generation endpoint """ - async_client = get_async_httpx_client( + async_client = client or get_async_httpx_client( llm_provider=litellm.LlmProviders.BEDROCK, params={"timeout": timeout}, ) @@ -256,6 +266,8 @@ class BedrockImageGeneration(BaseAWSLLM): "text_prompts": [{"text": prompt, "weight": 1}], **inference_params, } + elif provider == "amazon": + return dict(litellm.AmazonNovaCanvasConfig.transform_request_body(text=prompt, optional_params=optional_params)) else: raise BedrockError( status_code=422, message=f"Unsupported model={model}, passed in" @@ -291,6 +303,7 @@ class BedrockImageGeneration(BaseAWSLLM): config_class = ( litellm.AmazonStability3Config if litellm.AmazonStability3Config._is_stability_3_model(model=model) + else litellm.AmazonNovaCanvasConfig if litellm.AmazonNovaCanvasConfig._is_nova_model(model=model) else litellm.AmazonStabilityConfig ) config_class.transform_response_dict_to_openai_response( diff --git a/litellm/llms/cloudflare/chat/transformation.py b/litellm/llms/cloudflare/chat/transformation.py index 555e3c21f4..83c7483df9 100644 --- a/litellm/llms/cloudflare/chat/transformation.py +++ b/litellm/llms/cloudflare/chat/transformation.py @@ -79,6 +79,7 @@ class CloudflareChatConfig(BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: if api_base is None: diff --git a/litellm/llms/codestral/completion/transformation.py b/litellm/llms/codestral/completion/transformation.py index 84551cd553..5955e91deb 100644 --- a/litellm/llms/codestral/completion/transformation.py +++ b/litellm/llms/codestral/completion/transformation.py @@ -84,7 +84,9 @@ class CodestralTextCompletionConfig(OpenAITextCompletionConfig): finish_reason = None logprobs = None - chunk_data = chunk_data.replace("data:", "") + chunk_data = ( + litellm.CustomStreamWrapper._strip_sse_data_from_chunk(chunk_data) or "" + ) chunk_data = chunk_data.strip() if len(chunk_data) == 0 or chunk_data == "[DONE]": return { diff --git a/litellm/llms/custom_httpx/aiohttp_handler.py b/litellm/llms/custom_httpx/aiohttp_handler.py index 4a9e07016f..c865fee17e 100644 --- a/litellm/llms/custom_httpx/aiohttp_handler.py +++ b/litellm/llms/custom_httpx/aiohttp_handler.py @@ -234,6 +234,7 @@ class BaseLLMAIOHTTPHandler: api_base=api_base, model=model, optional_params=optional_params, + litellm_params=litellm_params, stream=stream, ) @@ -483,6 +484,7 @@ class BaseLLMAIOHTTPHandler: api_base=api_base, model=model, optional_params=optional_params, + litellm_params=litellm_params, stream=False, ) diff --git a/litellm/llms/custom_httpx/llm_http_handler.py b/litellm/llms/custom_httpx/llm_http_handler.py index 991e4aeaec..01fe36acda 100644 --- a/litellm/llms/custom_httpx/llm_http_handler.py +++ b/litellm/llms/custom_httpx/llm_http_handler.py @@ -1,6 +1,6 @@ import io import json -from typing import TYPE_CHECKING, Any, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Coroutine, Dict, Optional, Tuple, Union import httpx # type: ignore @@ -11,13 +11,21 @@ import litellm.types.utils from litellm.llms.base_llm.chat.transformation import BaseConfig from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig from litellm.llms.base_llm.rerank.transformation import BaseRerankConfig +from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig from litellm.llms.custom_httpx.http_handler import ( AsyncHTTPHandler, HTTPHandler, _get_httpx_client, get_async_httpx_client, ) +from litellm.responses.streaming_iterator import ( + BaseResponsesAPIStreamingIterator, + ResponsesAPIStreamingIterator, + SyncResponsesAPIStreamingIterator, +) +from litellm.types.llms.openai import ResponseInputParam, ResponsesAPIResponse from litellm.types.rerank import OptionalRerankParams, RerankResponse +from litellm.types.router import GenericLiteLLMParams from litellm.types.utils import EmbeddingResponse, FileTypes, TranscriptionResponse from litellm.utils import CustomStreamWrapper, ModelResponse, ProviderConfigManager @@ -234,6 +242,7 @@ class BaseLLMHTTPHandler: model=model, optional_params=optional_params, stream=stream, + litellm_params=litellm_params, ) data = provider_config.transform_request( @@ -604,6 +613,7 @@ class BaseLLMHTTPHandler: api_base=api_base, model=model, optional_params=optional_params, + litellm_params=litellm_params, ) data = provider_config.transform_embedding_request( @@ -873,7 +883,9 @@ class BaseLLMHTTPHandler: elif isinstance(audio_file, bytes): # Assume it's already binary data binary_data = audio_file - elif isinstance(audio_file, io.BufferedReader): + elif isinstance(audio_file, io.BufferedReader) or isinstance( + audio_file, io.BytesIO + ): # Handle file-like objects binary_data = audio_file.read() @@ -897,6 +909,7 @@ class BaseLLMHTTPHandler: client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, atranscription: bool = False, headers: dict = {}, + litellm_params: dict = {}, ) -> TranscriptionResponse: provider_config = ProviderConfigManager.get_provider_audio_transcription_config( model=model, provider=litellm.LlmProviders(custom_llm_provider) @@ -920,6 +933,7 @@ class BaseLLMHTTPHandler: api_base=api_base, model=model, optional_params=optional_params, + litellm_params=litellm_params, ) # Handle the audio file based on type @@ -950,8 +964,235 @@ class BaseLLMHTTPHandler: return returned_response return model_response + def response_api_handler( + self, + model: str, + input: Union[str, ResponseInputParam], + responses_api_provider_config: BaseResponsesAPIConfig, + response_api_optional_request_params: Dict, + custom_llm_provider: str, + litellm_params: GenericLiteLLMParams, + logging_obj: LiteLLMLoggingObj, + extra_headers: Optional[Dict[str, Any]] = None, + extra_body: Optional[Dict[str, Any]] = None, + timeout: Optional[Union[float, httpx.Timeout]] = None, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, + _is_async: bool = False, + ) -> Union[ + ResponsesAPIResponse, + BaseResponsesAPIStreamingIterator, + Coroutine[ + Any, Any, Union[ResponsesAPIResponse, BaseResponsesAPIStreamingIterator] + ], + ]: + """ + Handles responses API requests. + When _is_async=True, returns a coroutine instead of making the call directly. + """ + if _is_async: + # Return the async coroutine if called with _is_async=True + return self.async_response_api_handler( + model=model, + input=input, + responses_api_provider_config=responses_api_provider_config, + response_api_optional_request_params=response_api_optional_request_params, + custom_llm_provider=custom_llm_provider, + litellm_params=litellm_params, + logging_obj=logging_obj, + extra_headers=extra_headers, + extra_body=extra_body, + timeout=timeout, + client=client if isinstance(client, AsyncHTTPHandler) else None, + ) + + if client is None or not isinstance(client, HTTPHandler): + sync_httpx_client = _get_httpx_client( + params={"ssl_verify": litellm_params.get("ssl_verify", None)} + ) + else: + sync_httpx_client = client + + headers = responses_api_provider_config.validate_environment( + api_key=litellm_params.api_key, + headers=response_api_optional_request_params.get("extra_headers", {}) or {}, + model=model, + ) + + if extra_headers: + headers.update(extra_headers) + + api_base = responses_api_provider_config.get_complete_url( + api_base=litellm_params.api_base, + model=model, + ) + + data = responses_api_provider_config.transform_responses_api_request( + model=model, + input=input, + response_api_optional_request_params=response_api_optional_request_params, + litellm_params=litellm_params, + headers=headers, + ) + + ## LOGGING + logging_obj.pre_call( + input=input, + api_key="", + additional_args={ + "complete_input_dict": data, + "api_base": api_base, + "headers": headers, + }, + ) + + # Check if streaming is requested + stream = response_api_optional_request_params.get("stream", False) + + try: + if stream: + # For streaming, use stream=True in the request + response = sync_httpx_client.post( + url=api_base, + headers=headers, + data=json.dumps(data), + timeout=timeout + or response_api_optional_request_params.get("timeout"), + stream=True, + ) + + return SyncResponsesAPIStreamingIterator( + response=response, + model=model, + logging_obj=logging_obj, + responses_api_provider_config=responses_api_provider_config, + ) + else: + # For non-streaming requests + response = sync_httpx_client.post( + url=api_base, + headers=headers, + data=json.dumps(data), + timeout=timeout + or response_api_optional_request_params.get("timeout"), + ) + except Exception as e: + raise self._handle_error( + e=e, + provider_config=responses_api_provider_config, + ) + + return responses_api_provider_config.transform_response_api_response( + model=model, + raw_response=response, + logging_obj=logging_obj, + ) + + async def async_response_api_handler( + self, + model: str, + input: Union[str, ResponseInputParam], + responses_api_provider_config: BaseResponsesAPIConfig, + response_api_optional_request_params: Dict, + custom_llm_provider: str, + litellm_params: GenericLiteLLMParams, + logging_obj: LiteLLMLoggingObj, + extra_headers: Optional[Dict[str, Any]] = None, + extra_body: Optional[Dict[str, Any]] = None, + timeout: Optional[Union[float, httpx.Timeout]] = None, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, + ) -> Union[ResponsesAPIResponse, BaseResponsesAPIStreamingIterator]: + """ + Async version of the responses API handler. + Uses async HTTP client to make requests. + """ + if client is None or not isinstance(client, AsyncHTTPHandler): + async_httpx_client = get_async_httpx_client( + llm_provider=litellm.LlmProviders(custom_llm_provider), + params={"ssl_verify": litellm_params.get("ssl_verify", None)}, + ) + else: + async_httpx_client = client + + headers = responses_api_provider_config.validate_environment( + api_key=litellm_params.api_key, + headers=response_api_optional_request_params.get("extra_headers", {}) or {}, + model=model, + ) + + if extra_headers: + headers.update(extra_headers) + + api_base = responses_api_provider_config.get_complete_url( + api_base=litellm_params.api_base, + model=model, + ) + + data = responses_api_provider_config.transform_responses_api_request( + model=model, + input=input, + response_api_optional_request_params=response_api_optional_request_params, + litellm_params=litellm_params, + headers=headers, + ) + + ## LOGGING + logging_obj.pre_call( + input=input, + api_key="", + additional_args={ + "complete_input_dict": data, + "api_base": api_base, + "headers": headers, + }, + ) + + # Check if streaming is requested + stream = response_api_optional_request_params.get("stream", False) + + try: + if stream: + # For streaming, we need to use stream=True in the request + response = await async_httpx_client.post( + url=api_base, + headers=headers, + data=json.dumps(data), + timeout=timeout + or response_api_optional_request_params.get("timeout"), + stream=True, + ) + + # Return the streaming iterator + return ResponsesAPIStreamingIterator( + response=response, + model=model, + logging_obj=logging_obj, + responses_api_provider_config=responses_api_provider_config, + ) + else: + # For non-streaming, proceed as before + response = await async_httpx_client.post( + url=api_base, + headers=headers, + data=json.dumps(data), + timeout=timeout + or response_api_optional_request_params.get("timeout"), + ) + except Exception as e: + raise self._handle_error( + e=e, + provider_config=responses_api_provider_config, + ) + + return responses_api_provider_config.transform_response_api_response( + model=model, + raw_response=response, + logging_obj=logging_obj, + ) + def _handle_error( - self, e: Exception, provider_config: Union[BaseConfig, BaseRerankConfig] + self, + e: Exception, + provider_config: Union[BaseConfig, BaseRerankConfig, BaseResponsesAPIConfig], ): status_code = getattr(e, "status_code", 500) error_headers = getattr(e, "headers", None) diff --git a/litellm/llms/databricks/streaming_utils.py b/litellm/llms/databricks/streaming_utils.py index 0deaa06988..2db53df908 100644 --- a/litellm/llms/databricks/streaming_utils.py +++ b/litellm/llms/databricks/streaming_utils.py @@ -89,7 +89,7 @@ class ModelResponseIterator: raise RuntimeError(f"Error receiving chunk from stream: {e}") try: - chunk = chunk.replace("data:", "") + chunk = litellm.CustomStreamWrapper._strip_sse_data_from_chunk(chunk) or "" chunk = chunk.strip() if len(chunk) > 0: json_chunk = json.loads(chunk) @@ -134,7 +134,7 @@ class ModelResponseIterator: raise RuntimeError(f"Error receiving chunk from stream: {e}") try: - chunk = chunk.replace("data:", "") + chunk = litellm.CustomStreamWrapper._strip_sse_data_from_chunk(chunk) or "" chunk = chunk.strip() if chunk == "[DONE]": raise StopAsyncIteration diff --git a/litellm/llms/deepgram/audio_transcription/transformation.py b/litellm/llms/deepgram/audio_transcription/transformation.py index c8dbd688cc..06296736ea 100644 --- a/litellm/llms/deepgram/audio_transcription/transformation.py +++ b/litellm/llms/deepgram/audio_transcription/transformation.py @@ -103,6 +103,7 @@ class DeepgramAudioTranscriptionConfig(BaseAudioTranscriptionConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: if api_base is None: diff --git a/litellm/llms/deepseek/chat/transformation.py b/litellm/llms/deepseek/chat/transformation.py index 747129ddd8..180cf7dc69 100644 --- a/litellm/llms/deepseek/chat/transformation.py +++ b/litellm/llms/deepseek/chat/transformation.py @@ -40,6 +40,7 @@ class DeepSeekChatConfig(OpenAIGPTConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/gemini/chat/transformation.py b/litellm/llms/gemini/chat/transformation.py index 6aa4cf5b52..fbc1916dcc 100644 --- a/litellm/llms/gemini/chat/transformation.py +++ b/litellm/llms/gemini/chat/transformation.py @@ -114,12 +114,16 @@ class GoogleAIStudioGeminiConfig(VertexGeminiConfig): if element.get("type") == "image_url": img_element = element _image_url: Optional[str] = None + format: Optional[str] = None if isinstance(img_element.get("image_url"), dict): _image_url = img_element["image_url"].get("url") # type: ignore + format = img_element["image_url"].get("format") # type: ignore else: _image_url = img_element.get("image_url") # type: ignore if _image_url and "https://" in _image_url: - image_obj = convert_to_anthropic_image_obj(_image_url) + image_obj = convert_to_anthropic_image_obj( + _image_url, format=format + ) img_element["image_url"] = ( # type: ignore convert_generic_image_chunk_to_openai_image_obj( image_obj diff --git a/litellm/llms/ollama/completion/transformation.py b/litellm/llms/ollama/completion/transformation.py index 283b2a2437..4a7a3556ae 100644 --- a/litellm/llms/ollama/completion/transformation.py +++ b/litellm/llms/ollama/completion/transformation.py @@ -356,6 +356,7 @@ class OllamaConfig(BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index aea1f38951..6f421680b4 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -1,7 +1,7 @@ import json import time import uuid -from typing import Any, List, Optional +from typing import Any, List, Optional, Union import aiohttp import httpx @@ -9,7 +9,11 @@ from pydantic import BaseModel import litellm from litellm import verbose_logger -from litellm.llms.custom_httpx.http_handler import get_async_httpx_client +from litellm.llms.custom_httpx.http_handler import ( + AsyncHTTPHandler, + HTTPHandler, + get_async_httpx_client, +) from litellm.llms.openai.chat.gpt_transformation import OpenAIGPTConfig from litellm.types.llms.ollama import OllamaToolCall, OllamaToolCallFunction from litellm.types.llms.openai import ChatCompletionAssistantToolCall @@ -205,6 +209,7 @@ def get_ollama_response( # noqa: PLR0915 api_key: Optional[str] = None, acompletion: bool = False, encoding=None, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, ): if api_base.endswith("/api/chat"): url = api_base @@ -301,7 +306,11 @@ def get_ollama_response( # noqa: PLR0915 headers: Optional[dict] = None if api_key is not None: headers = {"Authorization": "Bearer {}".format(api_key)} - response = litellm.module_level_client.post( + + sync_client = litellm.module_level_client + if client is not None and isinstance(client, HTTPHandler): + sync_client = client + response = sync_client.post( url=url, json=data, headers=headers, diff --git a/litellm/llms/openai/chat/gpt_transformation.py b/litellm/llms/openai/chat/gpt_transformation.py index 13b591e715..8974a2a074 100644 --- a/litellm/llms/openai/chat/gpt_transformation.py +++ b/litellm/llms/openai/chat/gpt_transformation.py @@ -20,7 +20,11 @@ from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator from litellm.llms.base_llm.base_utils import BaseLLMModelInfo from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMException from litellm.secret_managers.main import get_secret_str -from litellm.types.llms.openai import AllMessageValues, ChatCompletionImageObject +from litellm.types.llms.openai import ( + AllMessageValues, + ChatCompletionImageObject, + ChatCompletionImageUrlObject, +) from litellm.types.utils import ModelResponse, ModelResponseStream from litellm.utils import convert_to_model_response_object @@ -189,6 +193,16 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig): content_item["image_url"] = { "url": content_item["image_url"], } + elif isinstance(content_item["image_url"], dict): + litellm_specific_params = {"format"} + new_image_url_obj = ChatCompletionImageUrlObject( + **{ # type: ignore + k: v + for k, v in content_item["image_url"].items() + if k not in litellm_specific_params + } + ) + content_item["image_url"] = new_image_url_obj return messages def transform_request( @@ -277,6 +291,7 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: """ diff --git a/litellm/llms/openai/common_utils.py b/litellm/llms/openai/common_utils.py index 98a55b4bd3..a8412f867b 100644 --- a/litellm/llms/openai/common_utils.py +++ b/litellm/llms/openai/common_utils.py @@ -19,6 +19,7 @@ class OpenAIError(BaseLLMException): request: Optional[httpx.Request] = None, response: Optional[httpx.Response] = None, headers: Optional[Union[dict, httpx.Headers]] = None, + body: Optional[dict] = None, ): self.status_code = status_code self.message = message @@ -39,6 +40,7 @@ class OpenAIError(BaseLLMException): headers=self.headers, request=self.request, response=self.response, + body=body, ) diff --git a/litellm/llms/openai/fine_tuning/handler.py b/litellm/llms/openai/fine_tuning/handler.py index b7eab8e5fd..97b237c757 100644 --- a/litellm/llms/openai/fine_tuning/handler.py +++ b/litellm/llms/openai/fine_tuning/handler.py @@ -27,6 +27,7 @@ class OpenAIFineTuningAPI: ] = None, _is_async: bool = False, api_version: Optional[str] = None, + litellm_params: Optional[dict] = None, ) -> Optional[ Union[ OpenAI, diff --git a/litellm/llms/openai/openai.py b/litellm/llms/openai/openai.py index 5465a24945..880a043d08 100644 --- a/litellm/llms/openai/openai.py +++ b/litellm/llms/openai/openai.py @@ -37,6 +37,7 @@ from litellm.llms.custom_httpx.http_handler import _DEFAULT_TTL_FOR_HTTPX_CLIENT from litellm.types.utils import ( EmbeddingResponse, ImageResponse, + LiteLLMBatch, ModelResponse, ModelResponseStream, ) @@ -731,10 +732,14 @@ class OpenAIChatCompletion(BaseLLM): error_headers = getattr(e, "headers", None) error_text = getattr(e, "text", str(e)) error_response = getattr(e, "response", None) + error_body = getattr(e, "body", None) if error_headers is None and error_response: error_headers = getattr(error_response, "headers", None) raise OpenAIError( - status_code=status_code, message=error_text, headers=error_headers + status_code=status_code, + message=error_text, + headers=error_headers, + body=error_body, ) async def acompletion( @@ -827,13 +832,17 @@ class OpenAIChatCompletion(BaseLLM): except Exception as e: exception_response = getattr(e, "response", None) status_code = getattr(e, "status_code", 500) + exception_body = getattr(e, "body", None) error_headers = getattr(e, "headers", None) if error_headers is None and exception_response: error_headers = getattr(exception_response, "headers", None) message = getattr(e, "message", str(e)) raise OpenAIError( - status_code=status_code, message=message, headers=error_headers + status_code=status_code, + message=message, + headers=error_headers, + body=exception_body, ) def streaming( @@ -972,6 +981,7 @@ class OpenAIChatCompletion(BaseLLM): error_headers = getattr(e, "headers", None) status_code = getattr(e, "status_code", 500) error_response = getattr(e, "response", None) + exception_body = getattr(e, "body", None) if error_headers is None and error_response: error_headers = getattr(error_response, "headers", None) if response is not None and hasattr(response, "text"): @@ -979,6 +989,7 @@ class OpenAIChatCompletion(BaseLLM): status_code=status_code, message=f"{str(e)}\n\nOriginal Response: {response.text}", # type: ignore headers=error_headers, + body=exception_body, ) else: if type(e).__name__ == "ReadTimeout": @@ -986,16 +997,21 @@ class OpenAIChatCompletion(BaseLLM): status_code=408, message=f"{type(e).__name__}", headers=error_headers, + body=exception_body, ) elif hasattr(e, "status_code"): raise OpenAIError( status_code=getattr(e, "status_code", 500), message=str(e), headers=error_headers, + body=exception_body, ) else: raise OpenAIError( - status_code=500, message=f"{str(e)}", headers=error_headers + status_code=500, + message=f"{str(e)}", + headers=error_headers, + body=exception_body, ) def get_stream_options( @@ -1755,9 +1771,9 @@ class OpenAIBatchesAPI(BaseLLM): self, create_batch_data: CreateBatchRequest, openai_client: AsyncOpenAI, - ) -> Batch: + ) -> LiteLLMBatch: response = await openai_client.batches.create(**create_batch_data) - return response + return LiteLLMBatch(**response.model_dump()) def create_batch( self, @@ -1769,7 +1785,7 @@ class OpenAIBatchesAPI(BaseLLM): max_retries: Optional[int], organization: Optional[str], client: Optional[Union[OpenAI, AsyncOpenAI]] = None, - ) -> Union[Batch, Coroutine[Any, Any, Batch]]: + ) -> Union[LiteLLMBatch, Coroutine[Any, Any, LiteLLMBatch]]: openai_client: Optional[Union[OpenAI, AsyncOpenAI]] = self.get_openai_client( api_key=api_key, api_base=api_base, @@ -1792,17 +1808,18 @@ class OpenAIBatchesAPI(BaseLLM): return self.acreate_batch( # type: ignore create_batch_data=create_batch_data, openai_client=openai_client ) - response = openai_client.batches.create(**create_batch_data) - return response + response = cast(OpenAI, openai_client).batches.create(**create_batch_data) + + return LiteLLMBatch(**response.model_dump()) async def aretrieve_batch( self, retrieve_batch_data: RetrieveBatchRequest, openai_client: AsyncOpenAI, - ) -> Batch: + ) -> LiteLLMBatch: verbose_logger.debug("retrieving batch, args= %s", retrieve_batch_data) response = await openai_client.batches.retrieve(**retrieve_batch_data) - return response + return LiteLLMBatch(**response.model_dump()) def retrieve_batch( self, @@ -1837,8 +1854,8 @@ class OpenAIBatchesAPI(BaseLLM): return self.aretrieve_batch( # type: ignore retrieve_batch_data=retrieve_batch_data, openai_client=openai_client ) - response = openai_client.batches.retrieve(**retrieve_batch_data) - return response + response = cast(OpenAI, openai_client).batches.retrieve(**retrieve_batch_data) + return LiteLLMBatch(**response.model_dump()) async def acancel_batch( self, @@ -2633,7 +2650,7 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -2672,12 +2689,12 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], tools: Optional[Iterable[AssistantToolParam]], event_handler: Optional[AssistantEventHandler], ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]: - data = { + data: Dict[str, Any] = { "thread_id": thread_id, "assistant_id": assistant_id, "additional_instructions": additional_instructions, @@ -2697,12 +2714,12 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], tools: Optional[Iterable[AssistantToolParam]], event_handler: Optional[AssistantEventHandler], ) -> AssistantStreamManager[AssistantEventHandler]: - data = { + data: Dict[str, Any] = { "thread_id": thread_id, "assistant_id": assistant_id, "additional_instructions": additional_instructions, @@ -2724,7 +2741,7 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -2746,7 +2763,7 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], @@ -2769,7 +2786,7 @@ class OpenAIAssistantsAPI(BaseLLM): assistant_id: str, additional_instructions: Optional[str], instructions: Optional[str], - metadata: Optional[object], + metadata: Optional[Dict], model: Optional[str], stream: Optional[bool], tools: Optional[Iterable[AssistantToolParam]], diff --git a/litellm/llms/openai/responses/transformation.py b/litellm/llms/openai/responses/transformation.py new file mode 100644 index 0000000000..ce4052dc19 --- /dev/null +++ b/litellm/llms/openai/responses/transformation.py @@ -0,0 +1,190 @@ +from typing import TYPE_CHECKING, Any, Dict, Optional, Union, cast + +import httpx + +import litellm +from litellm._logging import verbose_logger +from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig +from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.openai import * +from litellm.types.router import GenericLiteLLMParams + +from ..common_utils import OpenAIError + +if TYPE_CHECKING: + from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj + + LiteLLMLoggingObj = _LiteLLMLoggingObj +else: + LiteLLMLoggingObj = Any + + +class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig): + def get_supported_openai_params(self, model: str) -> list: + """ + All OpenAI Responses API params are supported + """ + return [ + "input", + "model", + "include", + "instructions", + "max_output_tokens", + "metadata", + "parallel_tool_calls", + "previous_response_id", + "reasoning", + "store", + "stream", + "temperature", + "text", + "tool_choice", + "tools", + "top_p", + "truncation", + "user", + "extra_headers", + "extra_query", + "extra_body", + "timeout", + ] + + def map_openai_params( + self, + response_api_optional_params: ResponsesAPIOptionalRequestParams, + model: str, + drop_params: bool, + ) -> Dict: + """No mapping applied since inputs are in OpenAI spec already""" + return dict(response_api_optional_params) + + def transform_responses_api_request( + self, + model: str, + input: Union[str, ResponseInputParam], + response_api_optional_request_params: Dict, + litellm_params: GenericLiteLLMParams, + headers: dict, + ) -> ResponsesAPIRequestParams: + """No transform applied since inputs are in OpenAI spec already""" + return ResponsesAPIRequestParams( + model=model, input=input, **response_api_optional_request_params + ) + + def transform_response_api_response( + self, + model: str, + raw_response: httpx.Response, + logging_obj: LiteLLMLoggingObj, + ) -> ResponsesAPIResponse: + """No transform applied since outputs are in OpenAI spec already""" + try: + raw_response_json = raw_response.json() + except Exception: + raise OpenAIError( + message=raw_response.text, status_code=raw_response.status_code + ) + return ResponsesAPIResponse(**raw_response_json) + + def validate_environment( + self, + headers: dict, + model: str, + api_key: Optional[str] = None, + ) -> dict: + api_key = ( + api_key + or litellm.api_key + or litellm.openai_key + or get_secret_str("OPENAI_API_KEY") + ) + headers.update( + { + "Authorization": f"Bearer {api_key}", + } + ) + return headers + + def get_complete_url( + self, + api_base: Optional[str], + model: str, + stream: Optional[bool] = None, + ) -> str: + """ + Get the endpoint for OpenAI responses API + """ + api_base = ( + api_base + or litellm.api_base + or get_secret_str("OPENAI_API_BASE") + or "https://api.openai.com/v1" + ) + + # Remove trailing slashes + api_base = api_base.rstrip("/") + + return f"{api_base}/responses" + + def transform_streaming_response( + self, + model: str, + parsed_chunk: dict, + logging_obj: LiteLLMLoggingObj, + ) -> ResponsesAPIStreamingResponse: + """ + Transform a parsed streaming response chunk into a ResponsesAPIStreamingResponse + """ + # Convert the dictionary to a properly typed ResponsesAPIStreamingResponse + verbose_logger.debug("Raw OpenAI Chunk=%s", parsed_chunk) + event_type = str(parsed_chunk.get("type")) + event_pydantic_model = OpenAIResponsesAPIConfig.get_event_model_class( + event_type=event_type + ) + return event_pydantic_model(**parsed_chunk) + + @staticmethod + def get_event_model_class(event_type: str) -> Any: + """ + Returns the appropriate event model class based on the event type. + + Args: + event_type (str): The type of event from the response chunk + + Returns: + Any: The corresponding event model class + + Raises: + ValueError: If the event type is unknown + """ + event_models = { + ResponsesAPIStreamEvents.RESPONSE_CREATED: ResponseCreatedEvent, + ResponsesAPIStreamEvents.RESPONSE_IN_PROGRESS: ResponseInProgressEvent, + ResponsesAPIStreamEvents.RESPONSE_COMPLETED: ResponseCompletedEvent, + ResponsesAPIStreamEvents.RESPONSE_FAILED: ResponseFailedEvent, + ResponsesAPIStreamEvents.RESPONSE_INCOMPLETE: ResponseIncompleteEvent, + ResponsesAPIStreamEvents.OUTPUT_ITEM_ADDED: OutputItemAddedEvent, + ResponsesAPIStreamEvents.OUTPUT_ITEM_DONE: OutputItemDoneEvent, + ResponsesAPIStreamEvents.CONTENT_PART_ADDED: ContentPartAddedEvent, + ResponsesAPIStreamEvents.CONTENT_PART_DONE: ContentPartDoneEvent, + ResponsesAPIStreamEvents.OUTPUT_TEXT_DELTA: OutputTextDeltaEvent, + ResponsesAPIStreamEvents.OUTPUT_TEXT_ANNOTATION_ADDED: OutputTextAnnotationAddedEvent, + ResponsesAPIStreamEvents.OUTPUT_TEXT_DONE: OutputTextDoneEvent, + ResponsesAPIStreamEvents.REFUSAL_DELTA: RefusalDeltaEvent, + ResponsesAPIStreamEvents.REFUSAL_DONE: RefusalDoneEvent, + ResponsesAPIStreamEvents.FUNCTION_CALL_ARGUMENTS_DELTA: FunctionCallArgumentsDeltaEvent, + ResponsesAPIStreamEvents.FUNCTION_CALL_ARGUMENTS_DONE: FunctionCallArgumentsDoneEvent, + ResponsesAPIStreamEvents.FILE_SEARCH_CALL_IN_PROGRESS: FileSearchCallInProgressEvent, + ResponsesAPIStreamEvents.FILE_SEARCH_CALL_SEARCHING: FileSearchCallSearchingEvent, + ResponsesAPIStreamEvents.FILE_SEARCH_CALL_COMPLETED: FileSearchCallCompletedEvent, + ResponsesAPIStreamEvents.WEB_SEARCH_CALL_IN_PROGRESS: WebSearchCallInProgressEvent, + ResponsesAPIStreamEvents.WEB_SEARCH_CALL_SEARCHING: WebSearchCallSearchingEvent, + ResponsesAPIStreamEvents.WEB_SEARCH_CALL_COMPLETED: WebSearchCallCompletedEvent, + ResponsesAPIStreamEvents.ERROR: ErrorEvent, + } + + model_class = event_models.get(cast(ResponsesAPIStreamEvents, event_type)) + if not model_class: + raise ValueError(f"Unknown event type: {event_type}") + + return model_class diff --git a/litellm/llms/openai_like/chat/handler.py b/litellm/llms/openai_like/chat/handler.py index ac886e915c..821fc9b7f1 100644 --- a/litellm/llms/openai_like/chat/handler.py +++ b/litellm/llms/openai_like/chat/handler.py @@ -230,7 +230,7 @@ class OpenAILikeChatHandler(OpenAILikeBase): logging_obj, optional_params: dict, acompletion=None, - litellm_params=None, + litellm_params: dict = {}, logger_fn=None, headers: Optional[dict] = None, timeout: Optional[Union[float, httpx.Timeout]] = None, @@ -337,7 +337,7 @@ class OpenAILikeChatHandler(OpenAILikeBase): timeout=timeout, base_model=base_model, client=client, - json_mode=json_mode + json_mode=json_mode, ) else: ## COMPLETION CALL diff --git a/litellm/llms/openrouter/chat/transformation.py b/litellm/llms/openrouter/chat/transformation.py index 5a4c2ff209..4b95ec87cf 100644 --- a/litellm/llms/openrouter/chat/transformation.py +++ b/litellm/llms/openrouter/chat/transformation.py @@ -6,7 +6,16 @@ Calls done in OpenAI/openai.py as OpenRouter is openai-compatible. Docs: https://openrouter.ai/docs/parameters """ +from typing import Any, AsyncIterator, Iterator, Optional, Union + +import httpx + +from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator +from litellm.llms.base_llm.chat.transformation import BaseLLMException +from litellm.types.utils import ModelResponse, ModelResponseStream + from ...openai.chat.gpt_transformation import OpenAIGPTConfig +from ..common_utils import OpenRouterException class OpenrouterConfig(OpenAIGPTConfig): @@ -37,3 +46,43 @@ class OpenrouterConfig(OpenAIGPTConfig): extra_body # openai client supports `extra_body` param ) return mapped_openai_params + + def get_error_class( + self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers] + ) -> BaseLLMException: + return OpenRouterException( + message=error_message, + status_code=status_code, + headers=headers, + ) + + def get_model_response_iterator( + self, + streaming_response: Union[Iterator[str], AsyncIterator[str], ModelResponse], + sync_stream: bool, + json_mode: Optional[bool] = False, + ) -> Any: + return OpenRouterChatCompletionStreamingHandler( + streaming_response=streaming_response, + sync_stream=sync_stream, + json_mode=json_mode, + ) + + +class OpenRouterChatCompletionStreamingHandler(BaseModelResponseIterator): + + def chunk_parser(self, chunk: dict) -> ModelResponseStream: + try: + new_choices = [] + for choice in chunk["choices"]: + choice["delta"]["reasoning_content"] = choice["delta"].get("reasoning") + new_choices.append(choice) + return ModelResponseStream( + id=chunk["id"], + object="chat.completion.chunk", + created=chunk["created"], + model=chunk["model"], + choices=new_choices, + ) + except Exception as e: + raise e diff --git a/litellm/llms/openrouter/common_utils.py b/litellm/llms/openrouter/common_utils.py new file mode 100644 index 0000000000..96e53a5aae --- /dev/null +++ b/litellm/llms/openrouter/common_utils.py @@ -0,0 +1,5 @@ +from litellm.llms.base_llm.chat.transformation import BaseLLMException + + +class OpenRouterException(BaseLLMException): + pass diff --git a/litellm/llms/perplexity/chat/transformation.py b/litellm/llms/perplexity/chat/transformation.py index 8f71cc153f..dab64283ec 100644 --- a/litellm/llms/perplexity/chat/transformation.py +++ b/litellm/llms/perplexity/chat/transformation.py @@ -37,6 +37,7 @@ class PerplexityChatConfig(OpenAIGPTConfig): "response_format", "stream", "temperature", - "top_p" "max_retries", + "top_p", + "max_retries", "extra_headers", ] diff --git a/litellm/llms/replicate/chat/handler.py b/litellm/llms/replicate/chat/handler.py index e7d0d383e2..f52eb2ee05 100644 --- a/litellm/llms/replicate/chat/handler.py +++ b/litellm/llms/replicate/chat/handler.py @@ -169,7 +169,10 @@ def completion( ) # for pricing this must remain right before calling api prediction_url = replicate_config.get_complete_url( - api_base=api_base, model=model, optional_params=optional_params + api_base=api_base, + model=model, + optional_params=optional_params, + litellm_params=litellm_params, ) ## COMPLETION CALL @@ -243,7 +246,10 @@ async def async_completion( ) -> Union[ModelResponse, CustomStreamWrapper]: prediction_url = replicate_config.get_complete_url( - api_base=api_base, model=model, optional_params=optional_params + api_base=api_base, + model=model, + optional_params=optional_params, + litellm_params=litellm_params, ) async_handler = get_async_httpx_client( llm_provider=litellm.LlmProviders.REPLICATE, diff --git a/litellm/llms/replicate/chat/transformation.py b/litellm/llms/replicate/chat/transformation.py index 39aaad6808..75cfe6ced7 100644 --- a/litellm/llms/replicate/chat/transformation.py +++ b/litellm/llms/replicate/chat/transformation.py @@ -141,6 +141,7 @@ class ReplicateConfig(BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: version_id = self.model_to_version_id(model) diff --git a/litellm/llms/sagemaker/common_utils.py b/litellm/llms/sagemaker/common_utils.py index 49e4989ff1..9884f420c3 100644 --- a/litellm/llms/sagemaker/common_utils.py +++ b/litellm/llms/sagemaker/common_utils.py @@ -3,6 +3,7 @@ from typing import AsyncIterator, Iterator, List, Optional, Union import httpx +import litellm from litellm import verbose_logger from litellm.llms.base_llm.chat.transformation import BaseLLMException from litellm.types.utils import GenericStreamingChunk as GChunk @@ -78,7 +79,11 @@ class AWSEventStreamDecoder: message = self._parse_message_from_event(event) if message: # remove data: prefix and "\n\n" at the end - message = message.replace("data:", "").replace("\n\n", "") + message = ( + litellm.CustomStreamWrapper._strip_sse_data_from_chunk(message) + or "" + ) + message = message.replace("\n\n", "") # Accumulate JSON data accumulated_json += message @@ -127,7 +132,11 @@ class AWSEventStreamDecoder: if message: verbose_logger.debug("sagemaker parsed chunk bytes %s", message) # remove data: prefix and "\n\n" at the end - message = message.replace("data:", "").replace("\n\n", "") + message = ( + litellm.CustomStreamWrapper._strip_sse_data_from_chunk(message) + or "" + ) + message = message.replace("\n\n", "") # Accumulate JSON data accumulated_json += message diff --git a/litellm/llms/topaz/image_variations/transformation.py b/litellm/llms/topaz/image_variations/transformation.py index 112c3a8f64..8b95deed04 100644 --- a/litellm/llms/topaz/image_variations/transformation.py +++ b/litellm/llms/topaz/image_variations/transformation.py @@ -55,6 +55,7 @@ class TopazImageVariationConfig(BaseImageVariationConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: api_base = api_base or "https://api.topazlabs.com" diff --git a/litellm/llms/triton/completion/transformation.py b/litellm/llms/triton/completion/transformation.py index 0cd6940063..56151f89ef 100644 --- a/litellm/llms/triton/completion/transformation.py +++ b/litellm/llms/triton/completion/transformation.py @@ -3,7 +3,7 @@ Translates from OpenAI's `/v1/chat/completions` endpoint to Triton's `/generate` """ import json -from typing import Any, Dict, List, Literal, Optional, Union +from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional, Union from httpx import Headers, Response @@ -67,6 +67,21 @@ class TritonConfig(BaseConfig): optional_params[param] = value return optional_params + def get_complete_url( + self, + api_base: Optional[str], + model: str, + optional_params: dict, + litellm_params: dict, + stream: Optional[bool] = None, + ) -> str: + if api_base is None: + raise ValueError("api_base is required") + llm_type = self._get_triton_llm_type(api_base) + if llm_type == "generate" and stream: + return api_base + "_stream" + return api_base + def transform_response( self, model: str, @@ -149,6 +164,18 @@ class TritonConfig(BaseConfig): else: raise ValueError(f"Invalid Triton API base: {api_base}") + def get_model_response_iterator( + self, + streaming_response: Union[Iterator[str], AsyncIterator[str], ModelResponse], + sync_stream: bool, + json_mode: Optional[bool] = False, + ) -> Any: + return TritonResponseIterator( + streaming_response=streaming_response, + sync_stream=sync_stream, + json_mode=json_mode, + ) + class TritonGenerateConfig(TritonConfig): """ @@ -204,7 +231,7 @@ class TritonGenerateConfig(TritonConfig): return model_response -class TritonInferConfig(TritonGenerateConfig): +class TritonInferConfig(TritonConfig): """ Transformations for triton /infer endpoint (his is an infer model with a custom model on triton) """ diff --git a/litellm/llms/vertex_ai/batches/handler.py b/litellm/llms/vertex_ai/batches/handler.py index 3d723d8ecf..b82268bef6 100644 --- a/litellm/llms/vertex_ai/batches/handler.py +++ b/litellm/llms/vertex_ai/batches/handler.py @@ -9,11 +9,12 @@ from litellm.llms.custom_httpx.http_handler import ( get_async_httpx_client, ) from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import VertexLLM -from litellm.types.llms.openai import Batch, CreateBatchRequest +from litellm.types.llms.openai import CreateBatchRequest from litellm.types.llms.vertex_ai import ( VERTEX_CREDENTIALS_TYPES, VertexAIBatchPredictionJob, ) +from litellm.types.utils import LiteLLMBatch from .transformation import VertexAIBatchTransformation @@ -33,7 +34,7 @@ class VertexAIBatchPrediction(VertexLLM): vertex_location: Optional[str], timeout: Union[float, httpx.Timeout], max_retries: Optional[int], - ) -> Union[Batch, Coroutine[Any, Any, Batch]]: + ) -> Union[LiteLLMBatch, Coroutine[Any, Any, LiteLLMBatch]]: sync_handler = _get_httpx_client() @@ -101,7 +102,7 @@ class VertexAIBatchPrediction(VertexLLM): vertex_batch_request: VertexAIBatchPredictionJob, api_base: str, headers: Dict[str, str], - ) -> Batch: + ) -> LiteLLMBatch: client = get_async_httpx_client( llm_provider=litellm.LlmProviders.VERTEX_AI, ) @@ -138,7 +139,7 @@ class VertexAIBatchPrediction(VertexLLM): vertex_location: Optional[str], timeout: Union[float, httpx.Timeout], max_retries: Optional[int], - ) -> Union[Batch, Coroutine[Any, Any, Batch]]: + ) -> Union[LiteLLMBatch, Coroutine[Any, Any, LiteLLMBatch]]: sync_handler = _get_httpx_client() access_token, project_id = self._ensure_access_token( @@ -199,7 +200,7 @@ class VertexAIBatchPrediction(VertexLLM): self, api_base: str, headers: Dict[str, str], - ) -> Batch: + ) -> LiteLLMBatch: client = get_async_httpx_client( llm_provider=litellm.LlmProviders.VERTEX_AI, ) diff --git a/litellm/llms/vertex_ai/batches/transformation.py b/litellm/llms/vertex_ai/batches/transformation.py index 32cabdcf56..a97f312d48 100644 --- a/litellm/llms/vertex_ai/batches/transformation.py +++ b/litellm/llms/vertex_ai/batches/transformation.py @@ -4,8 +4,9 @@ from typing import Dict from litellm.llms.vertex_ai.common_utils import ( _convert_vertex_datetime_to_openai_datetime, ) -from litellm.types.llms.openai import Batch, BatchJobStatus, CreateBatchRequest +from litellm.types.llms.openai import BatchJobStatus, CreateBatchRequest from litellm.types.llms.vertex_ai import * +from litellm.types.utils import LiteLLMBatch class VertexAIBatchTransformation: @@ -47,8 +48,8 @@ class VertexAIBatchTransformation: @classmethod def transform_vertex_ai_batch_response_to_openai_batch_response( cls, response: VertexBatchPredictionResponse - ) -> Batch: - return Batch( + ) -> LiteLLMBatch: + return LiteLLMBatch( id=cls._get_batch_id_from_vertex_ai_batch_response(response), completion_window="24hrs", created_at=_convert_vertex_datetime_to_openai_datetime( diff --git a/litellm/llms/vertex_ai/common_utils.py b/litellm/llms/vertex_ai/common_utils.py index a412a1f0db..f7149c349a 100644 --- a/litellm/llms/vertex_ai/common_utils.py +++ b/litellm/llms/vertex_ai/common_utils.py @@ -170,6 +170,9 @@ def _build_vertex_schema(parameters: dict): strip_field( parameters, field_name="$schema" ) # 5. Remove $schema - json schema value, not supported by OpenAPI - causes vertex errors. + strip_field( + parameters, field_name="$id" + ) # 6. Remove id - json schema value, not supported by OpenAPI - causes vertex errors. return parameters diff --git a/litellm/llms/vertex_ai/gemini/transformation.py b/litellm/llms/vertex_ai/gemini/transformation.py index 8109c8bf61..d6bafc7c60 100644 --- a/litellm/llms/vertex_ai/gemini/transformation.py +++ b/litellm/llms/vertex_ai/gemini/transformation.py @@ -55,10 +55,11 @@ else: LiteLLMLoggingObj = Any -def _process_gemini_image(image_url: str) -> PartType: +def _process_gemini_image(image_url: str, format: Optional[str] = None) -> PartType: """ Given an image URL, return the appropriate PartType for Gemini """ + try: # GCS URIs if "gs://" in image_url: @@ -66,25 +67,30 @@ def _process_gemini_image(image_url: str) -> PartType: extension_with_dot = os.path.splitext(image_url)[-1] # Ex: ".png" extension = extension_with_dot[1:] # Ex: "png" - file_type = get_file_type_from_extension(extension) + if not format: + file_type = get_file_type_from_extension(extension) - # Validate the file type is supported by Gemini - if not is_gemini_1_5_accepted_file_type(file_type): - raise Exception(f"File type not supported by gemini - {file_type}") + # Validate the file type is supported by Gemini + if not is_gemini_1_5_accepted_file_type(file_type): + raise Exception(f"File type not supported by gemini - {file_type}") - mime_type = get_file_mime_type_for_file_type(file_type) + mime_type = get_file_mime_type_for_file_type(file_type) + else: + mime_type = format file_data = FileDataType(mime_type=mime_type, file_uri=image_url) return PartType(file_data=file_data) elif ( "https://" in image_url - and (image_type := _get_image_mime_type_from_url(image_url)) is not None + and (image_type := format or _get_image_mime_type_from_url(image_url)) + is not None ): + file_data = FileDataType(file_uri=image_url, mime_type=image_type) return PartType(file_data=file_data) elif "http://" in image_url or "https://" in image_url or "base64" in image_url: # https links for unsupported mime types and base64 images - image = convert_to_anthropic_image_obj(image_url) + image = convert_to_anthropic_image_obj(image_url, format=format) _blob = BlobType(data=image["data"], mime_type=image["media_type"]) return PartType(inline_data=_blob) raise Exception("Invalid image received - {}".format(image_url)) @@ -159,11 +165,15 @@ def _gemini_convert_messages_with_history( # noqa: PLR0915 elif element["type"] == "image_url": element = cast(ChatCompletionImageObject, element) img_element = element + format: Optional[str] = None if isinstance(img_element["image_url"], dict): image_url = img_element["image_url"]["url"] + format = img_element["image_url"].get("format") else: image_url = img_element["image_url"] - _part = _process_gemini_image(image_url=image_url) + _part = _process_gemini_image( + image_url=image_url, format=format + ) _parts.append(_part) user_content.extend(_parts) elif ( diff --git a/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py b/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py index a87b5f3a2a..9ac1b1ffc4 100644 --- a/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py +++ b/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py @@ -846,7 +846,7 @@ async def make_call( message=VertexGeminiConfig().translate_exception_str(exception_string), headers=e.response.headers, ) - if response.status_code != 200: + if response.status_code != 200 and response.status_code != 201: raise VertexAIError( status_code=response.status_code, message=response.text, @@ -884,7 +884,7 @@ def make_sync_call( response = client.post(api_base, headers=headers, data=data, stream=True) - if response.status_code != 200: + if response.status_code != 200 and response.status_code != 201: raise VertexAIError( status_code=response.status_code, message=str(response.read()), @@ -1023,7 +1023,6 @@ class VertexLLM(VertexBase): gemini_api_key: Optional[str] = None, extra_headers: Optional[dict] = None, ) -> Union[ModelResponse, CustomStreamWrapper]: - should_use_v1beta1_features = self.is_using_v1beta1_features( optional_params=optional_params ) @@ -1409,7 +1408,8 @@ class ModelResponseIterator: return self.chunk_parser(chunk=json_chunk) def handle_accumulated_json_chunk(self, chunk: str) -> GenericStreamingChunk: - message = chunk.replace("data:", "").replace("\n\n", "") + chunk = litellm.CustomStreamWrapper._strip_sse_data_from_chunk(chunk) or "" + message = chunk.replace("\n\n", "") # Accumulate JSON data self.accumulated_json += message @@ -1432,7 +1432,7 @@ class ModelResponseIterator: def _common_chunk_parsing_logic(self, chunk: str) -> GenericStreamingChunk: try: - chunk = chunk.replace("data:", "") + chunk = litellm.CustomStreamWrapper._strip_sse_data_from_chunk(chunk) or "" if len(chunk) > 0: """ Check if initial chunk valid json diff --git a/litellm/llms/voyage/embedding/transformation.py b/litellm/llms/voyage/embedding/transformation.py index 623dfe73af..51abc9e43a 100644 --- a/litellm/llms/voyage/embedding/transformation.py +++ b/litellm/llms/voyage/embedding/transformation.py @@ -43,6 +43,7 @@ class VoyageEmbeddingConfig(BaseEmbeddingConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: if api_base: diff --git a/litellm/llms/watsonx/chat/handler.py b/litellm/llms/watsonx/chat/handler.py index fd195214db..8ea19d413e 100644 --- a/litellm/llms/watsonx/chat/handler.py +++ b/litellm/llms/watsonx/chat/handler.py @@ -31,7 +31,7 @@ class WatsonXChatHandler(OpenAILikeChatHandler): logging_obj, optional_params: dict, acompletion=None, - litellm_params=None, + litellm_params: dict = {}, headers: Optional[dict] = None, logger_fn=None, timeout: Optional[Union[float, httpx.Timeout]] = None, @@ -63,6 +63,7 @@ class WatsonXChatHandler(OpenAILikeChatHandler): api_base=api_base, model=model, optional_params=optional_params, + litellm_params=litellm_params, stream=optional_params.get("stream", False), ) diff --git a/litellm/llms/watsonx/chat/transformation.py b/litellm/llms/watsonx/chat/transformation.py index d5e0ed6544..f253da6f5b 100644 --- a/litellm/llms/watsonx/chat/transformation.py +++ b/litellm/llms/watsonx/chat/transformation.py @@ -83,6 +83,7 @@ class IBMWatsonXChatConfig(IBMWatsonXMixin, OpenAIGPTConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: url = self._get_base_url(api_base=api_base) diff --git a/litellm/llms/watsonx/completion/transformation.py b/litellm/llms/watsonx/completion/transformation.py index 7a4df23944..f414354e2a 100644 --- a/litellm/llms/watsonx/completion/transformation.py +++ b/litellm/llms/watsonx/completion/transformation.py @@ -318,6 +318,7 @@ class IBMWatsonXAIConfig(IBMWatsonXMixin, BaseConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: url = self._get_base_url(api_base=api_base) diff --git a/litellm/llms/watsonx/embed/transformation.py b/litellm/llms/watsonx/embed/transformation.py index 69c1f8fffa..359137ee5e 100644 --- a/litellm/llms/watsonx/embed/transformation.py +++ b/litellm/llms/watsonx/embed/transformation.py @@ -54,6 +54,7 @@ class IBMWatsonXEmbeddingConfig(IBMWatsonXMixin, BaseEmbeddingConfig): api_base: Optional[str], model: str, optional_params: dict, + litellm_params: dict, stream: Optional[bool] = None, ) -> str: url = self._get_base_url(api_base=api_base) diff --git a/litellm/main.py b/litellm/main.py index 9244e47d49..6ae2df517d 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1159,6 +1159,18 @@ def completion( # type: ignore # noqa: PLR0915 prompt_id=prompt_id, prompt_variables=prompt_variables, ssl_verify=ssl_verify, + merge_reasoning_content_in_choices=kwargs.get( + "merge_reasoning_content_in_choices", None + ), + api_version=api_version, + azure_ad_token=kwargs.get("azure_ad_token"), + tenant_id=kwargs.get("tenant_id"), + client_id=kwargs.get("client_id"), + client_secret=kwargs.get("client_secret"), + azure_username=kwargs.get("azure_username"), + azure_password=kwargs.get("azure_password"), + max_retries=max_retries, + timeout=timeout, ) logging.update_environment_variables( model=model, @@ -2271,23 +2283,22 @@ def completion( # type: ignore # noqa: PLR0915 data = {"model": model, "messages": messages, **optional_params} ## COMPLETION CALL - response = openai_like_chat_completion.completion( + response = base_llm_http_handler.completion( model=model, + stream=stream, messages=messages, - headers=headers, - api_key=api_key, + acompletion=acompletion, api_base=api_base, model_response=model_response, - print_verbose=print_verbose, optional_params=optional_params, litellm_params=litellm_params, - logger_fn=logger_fn, - logging_obj=logging, - acompletion=acompletion, - timeout=timeout, # type: ignore custom_llm_provider="openrouter", - custom_prompt_dict=custom_prompt_dict, + timeout=timeout, + headers=headers, encoding=encoding, + api_key=api_key, + logging_obj=logging, # model call logging done inside the class as we make need to modify I/O to fit aleph alpha's requirements + client=client, ) ## LOGGING logging.post_call( @@ -2853,6 +2864,7 @@ def completion( # type: ignore # noqa: PLR0915 acompletion=acompletion, model_response=model_response, encoding=encoding, + client=client, ) if acompletion is True or optional_params.get("stream", False) is True: return generator @@ -3380,6 +3392,7 @@ def embedding( # noqa: PLR0915 } } ) + litellm_params_dict = get_litellm_params(**kwargs) logging: Logging = litellm_logging_obj # type: ignore @@ -3441,6 +3454,7 @@ def embedding( # noqa: PLR0915 aembedding=aembedding, max_retries=max_retries, headers=headers or extra_headers, + litellm_params=litellm_params_dict, ) elif ( model in litellm.open_ai_embedding_models @@ -3930,42 +3944,19 @@ async def atext_completion( ctx = contextvars.copy_context() func_with_context = partial(ctx.run, func) - _, custom_llm_provider, _, _ = get_llm_provider( - model=model, api_base=kwargs.get("api_base", None) - ) - - if ( - custom_llm_provider == "openai" - or custom_llm_provider == "azure" - or custom_llm_provider == "azure_text" - or custom_llm_provider == "custom_openai" - or custom_llm_provider == "anyscale" - or custom_llm_provider == "mistral" - or custom_llm_provider == "openrouter" - or custom_llm_provider == "deepinfra" - or custom_llm_provider == "perplexity" - or custom_llm_provider == "groq" - or custom_llm_provider == "nvidia_nim" - or custom_llm_provider == "cerebras" - or custom_llm_provider == "sambanova" - or custom_llm_provider == "ai21_chat" - or custom_llm_provider == "ai21" - or custom_llm_provider == "volcengine" - or custom_llm_provider == "text-completion-codestral" - or custom_llm_provider == "deepseek" - or custom_llm_provider == "text-completion-openai" - or custom_llm_provider == "huggingface" - or custom_llm_provider == "ollama" - or custom_llm_provider == "vertex_ai" - or custom_llm_provider in litellm.openai_compatible_providers - ): # currently implemented aiohttp calls for just azure and openai, soon all. - # Await normally - response = await loop.run_in_executor(None, func_with_context) - if asyncio.iscoroutine(response): - response = await response + init_response = await loop.run_in_executor(None, func_with_context) + if isinstance(init_response, dict) or isinstance( + init_response, TextCompletionResponse + ): ## CACHING SCENARIO + if isinstance(init_response, dict): + response = TextCompletionResponse(**init_response) + else: + response = init_response + elif asyncio.iscoroutine(init_response): + response = await init_response else: - # Call the synchronous function using run_in_executor - response = await loop.run_in_executor(None, func_with_context) + response = init_response # type: ignore + if ( kwargs.get("stream", False) is True or isinstance(response, TextCompletionStreamWrapper) @@ -4554,6 +4545,7 @@ def image_generation( # noqa: PLR0915 non_default_params = { k: v for k, v in kwargs.items() if k not in default_params } # model-specific params - pass them straight to the model/provider + optional_params = get_optional_params_image_gen( model=model, n=n, @@ -4565,6 +4557,9 @@ def image_generation( # noqa: PLR0915 custom_llm_provider=custom_llm_provider, **non_default_params, ) + + litellm_params_dict = get_litellm_params(**kwargs) + logging: Logging = litellm_logging_obj logging.update_environment_variables( model=model, @@ -4635,6 +4630,7 @@ def image_generation( # noqa: PLR0915 aimg_generation=aimg_generation, client=client, headers=headers, + litellm_params=litellm_params_dict, ) elif ( custom_llm_provider == "openai" @@ -4663,6 +4659,7 @@ def image_generation( # noqa: PLR0915 optional_params=optional_params, model_response=model_response, aimg_generation=aimg_generation, + client=client, ) elif custom_llm_provider == "vertex_ai": vertex_ai_project = ( @@ -5029,6 +5026,7 @@ def transcription( custom_llm_provider=custom_llm_provider, drop_params=drop_params, ) + litellm_params_dict = get_litellm_params(**kwargs) litellm_logging_obj.update_environment_variables( model=model, @@ -5082,6 +5080,7 @@ def transcription( api_version=api_version, azure_ad_token=azure_ad_token, max_retries=max_retries, + litellm_params=litellm_params_dict, ) elif ( custom_llm_provider == "openai" @@ -5184,7 +5183,7 @@ async def aspeech(*args, **kwargs) -> HttpxBinaryResponseContent: @client -def speech( +def speech( # noqa: PLR0915 model: str, input: str, voice: Optional[Union[str, dict]] = None, @@ -5225,7 +5224,7 @@ def speech( if max_retries is None: max_retries = litellm.num_retries or openai.DEFAULT_MAX_RETRIES - + litellm_params_dict = get_litellm_params(**kwargs) logging_obj = kwargs.get("litellm_logging_obj", None) logging_obj.update_environment_variables( model=model, @@ -5342,6 +5341,7 @@ def speech( timeout=timeout, client=client, # pass AsyncOpenAI, OpenAI client aspeech=aspeech, + litellm_params=litellm_params_dict, ) elif custom_llm_provider == "vertex_ai" or custom_llm_provider == "vertex_ai_beta": diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 140fa36478..fa9c7ffbd5 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -6,7 +6,7 @@ "input_cost_per_token": 0.0000, "output_cost_per_token": 0.000, "litellm_provider": "one of https://docs.litellm.ai/docs/providers", - "mode": "one of chat, embedding, completion, image_generation, audio_transcription, audio_speech", + "mode": "one of: chat, embedding, completion, image_generation, audio_transcription, audio_speech, image_generation, moderation, rerank", "supports_function_calling": true, "supports_parallel_function_calling": true, "supports_vision": true, @@ -931,7 +931,7 @@ "input_cost_per_token": 0.000000, "output_cost_per_token": 0.000000, "litellm_provider": "openai", - "mode": "moderations" + "mode": "moderation" }, "text-moderation-007": { "max_tokens": 32768, @@ -940,7 +940,7 @@ "input_cost_per_token": 0.000000, "output_cost_per_token": 0.000000, "litellm_provider": "openai", - "mode": "moderations" + "mode": "moderation" }, "text-moderation-latest": { "max_tokens": 32768, @@ -949,7 +949,7 @@ "input_cost_per_token": 0.000000, "output_cost_per_token": 0.000000, "litellm_provider": "openai", - "mode": "moderations" + "mode": "moderation" }, "256-x-256/dall-e-2": { "mode": "image_generation", @@ -1021,6 +1021,120 @@ "input_cost_per_character": 0.000030, "litellm_provider": "openai" }, + "azure/gpt-4o-mini-realtime-preview-2024-12-17": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000006, + "input_cost_per_audio_token": 0.00001, + "cache_read_input_token_cost": 0.0000003, + "cache_creation_input_audio_token_cost": 0.0000003, + "output_cost_per_token": 0.0000024, + "output_cost_per_audio_token": 0.00002, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-mini-realtime-preview-2024-12-17": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000066, + "input_cost_per_audio_token": 0.000011, + "cache_read_input_token_cost": 0.00000033, + "cache_creation_input_audio_token_cost": 0.00000033, + "output_cost_per_token": 0.00000264, + "output_cost_per_audio_token": 0.000022, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/us/gpt-4o-mini-realtime-preview-2024-12-17": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000066, + "input_cost_per_audio_token": 0.000011, + "cache_read_input_token_cost": 0.00000033, + "cache_creation_input_audio_token_cost": 0.00000033, + "output_cost_per_token": 0.00000264, + "output_cost_per_audio_token": 0.000022, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/gpt-4o-realtime-preview-2024-10-01": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000005, + "input_cost_per_audio_token": 0.0001, + "cache_read_input_token_cost": 0.0000025, + "cache_creation_input_audio_token_cost": 0.00002, + "output_cost_per_token": 0.00002, + "output_cost_per_audio_token": 0.0002, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/us/gpt-4o-realtime-preview-2024-10-01": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000055, + "input_cost_per_audio_token": 0.00011, + "cache_read_input_token_cost": 0.00000275, + "cache_creation_input_audio_token_cost": 0.000022, + "output_cost_per_token": 0.000022, + "output_cost_per_audio_token": 0.00022, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-realtime-preview-2024-10-01": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000055, + "input_cost_per_audio_token": 0.00011, + "cache_read_input_token_cost": 0.00000275, + "cache_creation_input_audio_token_cost": 0.000022, + "output_cost_per_token": 0.000022, + "output_cost_per_audio_token": 0.00022, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, "azure/o3-mini-2025-01-31": { "max_tokens": 100000, "max_input_tokens": 200000, @@ -1034,6 +1148,36 @@ "supports_prompt_caching": true, "supports_tool_choice": true }, + "azure/us/o3-mini-2025-01-31": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.00000121, + "input_cost_per_token_batches": 0.000000605, + "output_cost_per_token": 0.00000484, + "output_cost_per_token_batches": 0.00000242, + "cache_read_input_token_cost": 0.000000605, + "litellm_provider": "azure", + "mode": "chat", + "supports_vision": false, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/eu/o3-mini-2025-01-31": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.00000121, + "input_cost_per_token_batches": 0.000000605, + "output_cost_per_token": 0.00000484, + "output_cost_per_token_batches": 0.00000242, + "cache_read_input_token_cost": 0.000000605, + "litellm_provider": "azure", + "mode": "chat", + "supports_vision": false, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, "azure/tts-1": { "mode": "audio_speech", "input_cost_per_character": 0.000015, @@ -1068,9 +1212,9 @@ "max_tokens": 65536, "max_input_tokens": 128000, "max_output_tokens": 65536, - "input_cost_per_token": 0.000003, - "output_cost_per_token": 0.000012, - "cache_read_input_token_cost": 0.0000015, + "input_cost_per_token": 0.00000121, + "output_cost_per_token": 0.00000484, + "cache_read_input_token_cost": 0.000000605, "litellm_provider": "azure", "mode": "chat", "supports_function_calling": true, @@ -1082,9 +1226,41 @@ "max_tokens": 65536, "max_input_tokens": 128000, "max_output_tokens": 65536, - "input_cost_per_token": 0.000003, - "output_cost_per_token": 0.000012, - "cache_read_input_token_cost": 0.0000015, + "input_cost_per_token": 0.00000121, + "output_cost_per_token": 0.00000484, + "cache_read_input_token_cost": 0.000000605, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false, + "supports_prompt_caching": true + }, + "azure/us/o1-mini-2024-09-12": { + "max_tokens": 65536, + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "input_cost_per_token": 0.00000121, + "input_cost_per_token_batches": 0.000000605, + "output_cost_per_token": 0.00000484, + "output_cost_per_token_batches": 0.00000242, + "cache_read_input_token_cost": 0.000000605, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false, + "supports_prompt_caching": true + }, + "azure/eu/o1-mini-2024-09-12": { + "max_tokens": 65536, + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "input_cost_per_token": 0.00000121, + "input_cost_per_token_batches": 0.000000605, + "output_cost_per_token": 0.00000484, + "output_cost_per_token_batches": 0.00000242, + "cache_read_input_token_cost": 0.000000605, "litellm_provider": "azure", "mode": "chat", "supports_function_calling": true, @@ -1122,6 +1298,36 @@ "supports_prompt_caching": true, "supports_tool_choice": true }, + "azure/us/o1-2024-12-17": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.0000165, + "output_cost_per_token": 0.000066, + "cache_read_input_token_cost": 0.00000825, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/eu/o1-2024-12-17": { + "max_tokens": 100000, + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "input_cost_per_token": 0.0000165, + "output_cost_per_token": 0.000066, + "cache_read_input_token_cost": 0.00000825, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, "azure/o1-preview": { "max_tokens": 32768, "max_input_tokens": 128000, @@ -1150,6 +1356,34 @@ "supports_vision": false, "supports_prompt_caching": true }, + "azure/us/o1-preview-2024-09-12": { + "max_tokens": 32768, + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "input_cost_per_token": 0.0000165, + "output_cost_per_token": 0.000066, + "cache_read_input_token_cost": 0.00000825, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false, + "supports_prompt_caching": true + }, + "azure/eu/o1-preview-2024-09-12": { + "max_tokens": 32768, + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "input_cost_per_token": 0.0000165, + "output_cost_per_token": 0.000066, + "cache_read_input_token_cost": 0.00000825, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false, + "supports_prompt_caching": true + }, "azure/gpt-4o": { "max_tokens": 4096, "max_input_tokens": 128000, @@ -1195,6 +1429,36 @@ "supports_vision": true, "supports_tool_choice": true }, + "azure/us/gpt-4o-2024-11-20": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000275, + "cache_creation_input_token_cost": 0.00000138, + "output_cost_per_token": 0.000011, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-2024-11-20": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000275, + "cache_creation_input_token_cost": 0.00000138, + "output_cost_per_token": 0.000011, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_tool_choice": true + }, "azure/gpt-4o-2024-05-13": { "max_tokens": 4096, "max_input_tokens": 128000, @@ -1225,6 +1489,38 @@ "supports_prompt_caching": true, "supports_tool_choice": true }, + "azure/us/gpt-4o-2024-08-06": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000275, + "output_cost_per_token": 0.000011, + "cache_read_input_token_cost": 0.000001375, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-2024-08-06": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000275, + "output_cost_per_token": 0.000011, + "cache_read_input_token_cost": 0.000001375, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, "azure/global-standard/gpt-4o-2024-11-20": { "max_tokens": 16384, "max_input_tokens": 128000, @@ -1285,6 +1581,38 @@ "supports_prompt_caching": true, "supports_tool_choice": true }, + "azure/us/gpt-4o-mini-2024-07-18": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.000000165, + "output_cost_per_token": 0.00000066, + "cache_read_input_token_cost": 0.000000083, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-mini-2024-07-18": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.000000165, + "output_cost_per_token": 0.00000066, + "cache_read_input_token_cost": 0.000000083, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, "azure/gpt-4-turbo-2024-04-09": { "max_tokens": 4096, "max_input_tokens": 128000, @@ -1625,13 +1953,23 @@ "max_tokens": 8192, "max_input_tokens": 128000, "max_output_tokens": 8192, - "input_cost_per_token": 0.0, - "input_cost_per_token_cache_hit": 0.0, - "output_cost_per_token": 0.0, + "input_cost_per_token": 0.00000135, + "output_cost_per_token": 0.0000054, "litellm_provider": "azure_ai", "mode": "chat", - "supports_prompt_caching": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "source": "https://techcommunity.microsoft.com/blog/machinelearningblog/deepseek-r1-improved-performance-higher-limits-and-transparent-pricing/4386367" + }, + "azure_ai/deepseek-v3": { + "max_tokens": 8192, + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "input_cost_per_token": 0.00000114, + "output_cost_per_token": 0.00000456, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_tool_choice": true, + "source": "https://techcommunity.microsoft.com/blog/machinelearningblog/announcing-deepseek-v3-on-azure-ai-foundry-and-github/4390438" }, "azure_ai/jamba-instruct": { "max_tokens": 4096, @@ -1643,6 +1981,17 @@ "mode": "chat", "supports_tool_choice": true }, + "azure_ai/mistral-nemo": { + "max_tokens": 4096, + "max_input_tokens": 131072, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.00000015, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_function_calling": true, + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.mistral-nemo-12b-2407?tab=PlansAndPrice" + }, "azure_ai/mistral-large": { "max_tokens": 8191, "max_input_tokens": 32000, @@ -1770,10 +2119,34 @@ "source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/metagenai.meta-llama-3-1-405b-instruct-offer?tab=PlansAndPrice", "supports_tool_choice": true }, - "azure_ai/Phi-4": { + "azure_ai/Phi-4-mini-instruct": { "max_tokens": 4096, - "max_input_tokens": 128000, + "max_input_tokens": 131072, "max_output_tokens": 4096, + "input_cost_per_token": 0, + "output_cost_per_token": 0, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_function_calling": true, + "source": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/models-featured#microsoft" + }, + "azure_ai/Phi-4-multimodal-instruct": { + "max_tokens": 4096, + "max_input_tokens": 131072, + "max_output_tokens": 4096, + "input_cost_per_token": 0, + "output_cost_per_token": 0, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_audio_input": true, + "supports_function_calling": true, + "supports_vision": true, + "source": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/models-featured#microsoft" + }, + "azure_ai/Phi-4": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, "input_cost_per_token": 0.000000125, "output_cost_per_token": 0.0000005, "litellm_provider": "azure_ai", @@ -1921,6 +2294,7 @@ "output_cost_per_token": 0.0, "litellm_provider": "azure_ai", "mode": "embedding", + "supports_embedding_image_input": true, "source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/cohere.cohere-embed-v3-english-offer?tab=PlansAndPrice" }, "azure_ai/Cohere-embed-v3-multilingual": { @@ -1931,6 +2305,7 @@ "output_cost_per_token": 0.0, "litellm_provider": "azure_ai", "mode": "embedding", + "supports_embedding_image_input": true, "source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/cohere.cohere-embed-v3-english-offer?tab=PlansAndPrice" }, "babbage-002": { @@ -1994,8 +2369,8 @@ "max_tokens": 8191, "max_input_tokens": 32000, "max_output_tokens": 8191, - "input_cost_per_token": 0.000001, - "output_cost_per_token": 0.000003, + "input_cost_per_token": 0.0000001, + "output_cost_per_token": 0.0000003, "litellm_provider": "mistral", "supports_function_calling": true, "mode": "chat", @@ -2006,8 +2381,8 @@ "max_tokens": 8191, "max_input_tokens": 32000, "max_output_tokens": 8191, - "input_cost_per_token": 0.000001, - "output_cost_per_token": 0.000003, + "input_cost_per_token": 0.0000001, + "output_cost_per_token": 0.0000003, "litellm_provider": "mistral", "supports_function_calling": true, "mode": "chat", @@ -2795,6 +3170,7 @@ "supports_vision": true, "tool_use_system_prompt_tokens": 264, "supports_assistant_prefill": true, + "supports_pdf_input": true, "supports_prompt_caching": true, "supports_response_schema": true, "deprecation_date": "2025-10-01", @@ -2814,6 +3190,7 @@ "supports_vision": true, "tool_use_system_prompt_tokens": 264, "supports_assistant_prefill": true, + "supports_pdf_input": true, "supports_prompt_caching": true, "supports_response_schema": true, "deprecation_date": "2025-10-01", @@ -2888,6 +3265,7 @@ "supports_vision": true, "tool_use_system_prompt_tokens": 159, "supports_assistant_prefill": true, + "supports_pdf_input": true, "supports_prompt_caching": true, "supports_response_schema": true, "deprecation_date": "2025-06-01", @@ -2907,15 +3285,16 @@ "supports_vision": true, "tool_use_system_prompt_tokens": 159, "supports_assistant_prefill": true, + "supports_pdf_input": true, "supports_prompt_caching": true, "supports_response_schema": true, "deprecation_date": "2025-06-01", "supports_tool_choice": true }, "claude-3-7-sonnet-latest": { - "max_tokens": 8192, + "max_tokens": 128000, "max_input_tokens": 200000, - "max_output_tokens": 8192, + "max_output_tokens": 128000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000015, "cache_creation_input_token_cost": 0.00000375, @@ -2926,15 +3305,16 @@ "supports_vision": true, "tool_use_system_prompt_tokens": 159, "supports_assistant_prefill": true, + "supports_pdf_input": true, "supports_prompt_caching": true, "supports_response_schema": true, "deprecation_date": "2025-06-01", "supports_tool_choice": true }, "claude-3-7-sonnet-20250219": { - "max_tokens": 8192, + "max_tokens": 128000, "max_input_tokens": 200000, - "max_output_tokens": 8192, + "max_output_tokens": 128000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000015, "cache_creation_input_token_cost": 0.00000375, @@ -2945,6 +3325,7 @@ "supports_vision": true, "tool_use_system_prompt_tokens": 159, "supports_assistant_prefill": true, + "supports_pdf_input": true, "supports_prompt_caching": true, "supports_response_schema": true, "deprecation_date": "2026-02-01", @@ -3886,31 +4267,6 @@ "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", "supports_tool_choice": true }, - "gemini/gemini-2.0-flash": { - "max_tokens": 8192, - "max_input_tokens": 1048576, - "max_output_tokens": 8192, - "max_images_per_prompt": 3000, - "max_videos_per_prompt": 10, - "max_video_length": 1, - "max_audio_length_hours": 8.4, - "max_audio_per_prompt": 1, - "max_pdf_size_mb": 30, - "input_cost_per_audio_token": 0.0000007, - "input_cost_per_token": 0.0000001, - "output_cost_per_token": 0.0000004, - "litellm_provider": "gemini", - "mode": "chat", - "rpm": 10000, - "tpm": 10000000, - "supports_system_messages": true, - "supports_function_calling": true, - "supports_vision": true, - "supports_response_schema": true, - "supports_audio_output": true, - "supports_tool_choice": true, - "source": "https://ai.google.dev/pricing#2_0flash" - }, "gemini-2.0-flash-001": { "max_tokens": 8192, "max_input_tokens": 1048576, @@ -4002,6 +4358,69 @@ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", "supports_tool_choice": true }, + "gemini/gemini-2.0-pro-exp-02-05": { + "max_tokens": 8192, + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_images_per_prompt": 3000, + "max_videos_per_prompt": 10, + "max_video_length": 1, + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_pdf_size_mb": 30, + "input_cost_per_image": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_token": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_character": 0, + "output_cost_per_token_above_128k_tokens": 0, + "output_cost_per_character_above_128k_tokens": 0, + "litellm_provider": "gemini", + "mode": "chat", + "rpm": 2, + "tpm": 1000000, + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_audio_input": true, + "supports_video_input": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "gemini/gemini-2.0-flash": { + "max_tokens": 8192, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_images_per_prompt": 3000, + "max_videos_per_prompt": 10, + "max_video_length": 1, + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_pdf_size_mb": 30, + "input_cost_per_audio_token": 0.0000007, + "input_cost_per_token": 0.0000001, + "output_cost_per_token": 0.0000004, + "litellm_provider": "gemini", + "mode": "chat", + "rpm": 10000, + "tpm": 10000000, + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_response_schema": true, + "supports_audio_output": true, + "supports_tool_choice": true, + "source": "https://ai.google.dev/pricing#2_0flash" + }, "gemini/gemini-2.0-flash-001": { "max_tokens": 8192, "max_input_tokens": 1048576, @@ -4091,7 +4510,7 @@ "gemini/gemini-2.0-flash-thinking-exp": { "max_tokens": 8192, "max_input_tokens": 1048576, - "max_output_tokens": 8192, + "max_output_tokens": 65536, "max_images_per_prompt": 3000, "max_videos_per_prompt": 10, "max_video_length": 1, @@ -4124,6 +4543,98 @@ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", "supports_tool_choice": true }, + "gemini/gemini-2.0-flash-thinking-exp-01-21": { + "max_tokens": 8192, + "max_input_tokens": 1048576, + "max_output_tokens": 65536, + "max_images_per_prompt": 3000, + "max_videos_per_prompt": 10, + "max_video_length": 1, + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_pdf_size_mb": 30, + "input_cost_per_image": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_token": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_character": 0, + "output_cost_per_token_above_128k_tokens": 0, + "output_cost_per_character_above_128k_tokens": 0, + "litellm_provider": "gemini", + "mode": "chat", + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_response_schema": true, + "supports_audio_output": true, + "tpm": 4000000, + "rpm": 10, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", + "supports_tool_choice": true + }, + "gemini/gemma-3-27b-it": { + "max_tokens": 8192, + "max_input_tokens": 131072, + "max_output_tokens": 8192, + "input_cost_per_image": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_token": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_character": 0, + "output_cost_per_token_above_128k_tokens": 0, + "output_cost_per_character_above_128k_tokens": 0, + "litellm_provider": "gemini", + "mode": "chat", + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_response_schema": true, + "supports_audio_output": false, + "source": "https://aistudio.google.com", + "supports_tool_choice": true + }, + "gemini/learnlm-1.5-pro-experimental": { + "max_tokens": 8192, + "max_input_tokens": 32767, + "max_output_tokens": 8192, + "input_cost_per_image": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_token": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_character": 0, + "output_cost_per_token_above_128k_tokens": 0, + "output_cost_per_character_above_128k_tokens": 0, + "litellm_provider": "gemini", + "mode": "chat", + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_response_schema": true, + "supports_audio_output": false, + "source": "https://aistudio.google.com", + "supports_tool_choice": true + }, "vertex_ai/claude-3-sonnet": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -4159,6 +4670,7 @@ "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", "supports_function_calling": true, + "supports_pdf_input": true, "supports_vision": true, "supports_assistant_prefill": true, "supports_tool_choice": true @@ -4172,6 +4684,7 @@ "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", "supports_function_calling": true, + "supports_pdf_input": true, "supports_vision": true, "supports_assistant_prefill": true, "supports_tool_choice": true @@ -4185,6 +4698,7 @@ "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", "supports_function_calling": true, + "supports_pdf_input": true, "supports_vision": true, "supports_assistant_prefill": true, "supports_tool_choice": true @@ -4198,6 +4712,7 @@ "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", "supports_function_calling": true, + "supports_pdf_input": true, "supports_vision": true, "supports_assistant_prefill": true, "supports_tool_choice": true @@ -4213,6 +4728,7 @@ "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", "supports_function_calling": true, + "supports_pdf_input": true, "supports_vision": true, "tool_use_system_prompt_tokens": 159, "supports_assistant_prefill": true, @@ -4256,6 +4772,7 @@ "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", "supports_function_calling": true, + "supports_pdf_input": true, "supports_assistant_prefill": true, "supports_tool_choice": true }, @@ -4268,6 +4785,7 @@ "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", "supports_function_calling": true, + "supports_pdf_input": true, "supports_assistant_prefill": true, "supports_tool_choice": true }, @@ -4498,6 +5016,12 @@ "mode": "image_generation", "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" }, + "vertex_ai/imagen-3.0-generate-002": { + "output_cost_per_image": 0.04, + "litellm_provider": "vertex_ai-image-models", + "mode": "image_generation", + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, "vertex_ai/imagen-3.0-generate-001": { "output_cost_per_image": 0.04, "litellm_provider": "vertex_ai-image-models", @@ -5278,6 +5802,7 @@ "input_cost_per_token": 0.00000010, "output_cost_per_token": 0.00000, "litellm_provider": "cohere", + "supports_embedding_image_input": true, "mode": "embedding" }, "embed-english-v2.0": { @@ -6064,6 +6589,26 @@ "mode": "chat", "supports_tool_choice": true }, + "jamba-large-1.6": { + "max_tokens": 256000, + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "input_cost_per_token": 0.000002, + "output_cost_per_token": 0.000008, + "litellm_provider": "ai21", + "mode": "chat", + "supports_tool_choice": true + }, + "jamba-mini-1.6": { + "max_tokens": 256000, + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "input_cost_per_token": 0.0000002, + "output_cost_per_token": 0.0000004, + "litellm_provider": "ai21", + "mode": "chat", + "supports_tool_choice": true + }, "j2-mid": { "max_tokens": 8192, "max_input_tokens": 8192, @@ -6421,7 +6966,7 @@ "supports_response_schema": true }, "us.amazon.nova-micro-v1:0": { - "max_tokens": 4096, + "max_tokens": 4096, "max_input_tokens": 300000, "max_output_tokens": 4096, "input_cost_per_token": 0.000000035, @@ -6432,6 +6977,18 @@ "supports_prompt_caching": true, "supports_response_schema": true }, + "eu.amazon.nova-micro-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 300000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000000046, + "output_cost_per_token": 0.000000184, + "litellm_provider": "bedrock_converse", + "mode": "chat", + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true + }, "amazon.nova-lite-v1:0": { "max_tokens": 4096, "max_input_tokens": 128000, @@ -6447,7 +7004,7 @@ "supports_response_schema": true }, "us.amazon.nova-lite-v1:0": { - "max_tokens": 4096, + "max_tokens": 4096, "max_input_tokens": 128000, "max_output_tokens": 4096, "input_cost_per_token": 0.00000006, @@ -6460,6 +7017,20 @@ "supports_prompt_caching": true, "supports_response_schema": true }, + "eu.amazon.nova-lite-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000000078, + "output_cost_per_token": 0.000000312, + "litellm_provider": "bedrock_converse", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true + }, "amazon.nova-pro-v1:0": { "max_tokens": 4096, "max_input_tokens": 300000, @@ -6475,7 +7046,7 @@ "supports_response_schema": true }, "us.amazon.nova-pro-v1:0": { - "max_tokens": 4096, + "max_tokens": 4096, "max_input_tokens": 300000, "max_output_tokens": 4096, "input_cost_per_token": 0.0000008, @@ -6488,6 +7059,27 @@ "supports_prompt_caching": true, "supports_response_schema": true }, + "1024-x-1024/50-steps/bedrock/amazon.nova-canvas-v1:0": { + "max_input_tokens": 2600, + "output_cost_per_image": 0.06, + "litellm_provider": "bedrock", + "mode": "image_generation" + }, + "eu.amazon.nova-pro-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 300000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000105, + "output_cost_per_token": 0.0000042, + "litellm_provider": "bedrock_converse", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "source": "https://aws.amazon.com/bedrock/pricing/" + }, "anthropic.claude-3-sonnet-20240229-v1:0": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -7392,8 +7984,9 @@ "max_input_tokens": 512, "input_cost_per_token": 0.0000001, "output_cost_per_token": 0.000000, - "litellm_provider": "bedrock", - "mode": "embedding" + "litellm_provider": "bedrock", + "mode": "embedding", + "supports_embedding_image_input": true }, "cohere.embed-multilingual-v3": { "max_tokens": 512, @@ -7401,7 +7994,20 @@ "input_cost_per_token": 0.0000001, "output_cost_per_token": 0.000000, "litellm_provider": "bedrock", - "mode": "embedding" + "mode": "embedding", + "supports_embedding_image_input": true + }, + "us.deepseek.r1-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000135, + "output_cost_per_token": 0.0000054, + "litellm_provider": "bedrock_converse", + "mode": "chat", + "supports_function_calling": false, + "supports_tool_choice": false + }, "meta.llama3-3-70b-instruct-v1:0": { "max_tokens": 4096, @@ -7817,22 +8423,22 @@ "mode": "image_generation" }, "stability.sd3-5-large-v1:0": { - "max_tokens": 77, - "max_input_tokens": 77, + "max_tokens": 77, + "max_input_tokens": 77, "output_cost_per_image": 0.08, "litellm_provider": "bedrock", "mode": "image_generation" }, "stability.stable-image-core-v1:0": { - "max_tokens": 77, - "max_input_tokens": 77, + "max_tokens": 77, + "max_input_tokens": 77, "output_cost_per_image": 0.04, "litellm_provider": "bedrock", "mode": "image_generation" }, "stability.stable-image-core-v1:1": { - "max_tokens": 77, - "max_input_tokens": 77, + "max_tokens": 77, + "max_input_tokens": 77, "output_cost_per_image": 0.04, "litellm_provider": "bedrock", "mode": "image_generation" @@ -7845,8 +8451,8 @@ "mode": "image_generation" }, "stability.stable-image-ultra-v1:1": { - "max_tokens": 77, - "max_input_tokens": 77, + "max_tokens": 77, + "max_input_tokens": 77, "output_cost_per_image": 0.14, "litellm_provider": "bedrock", "mode": "image_generation" diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/157-cf7bc8b3ae1b80ba.js b/litellm/proxy/_experimental/out/_next/static/chunks/157-cf7bc8b3ae1b80ba.js new file mode 100644 index 0000000000..6a596c25d8 --- /dev/null +++ b/litellm/proxy/_experimental/out/_next/static/chunks/157-cf7bc8b3ae1b80ba.js @@ -0,0 +1,11 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[157],{12660:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M917.7 148.8l-42.4-42.4c-1.6-1.6-3.6-2.3-5.7-2.3s-4.1.8-5.7 2.3l-76.1 76.1a199.27 199.27 0 00-112.1-34.3c-51.2 0-102.4 19.5-141.5 58.6L432.3 308.7a8.03 8.03 0 000 11.3L704 591.7c1.6 1.6 3.6 2.3 5.7 2.3 2 0 4.1-.8 5.7-2.3l101.9-101.9c68.9-69 77-175.7 24.3-253.5l76.1-76.1c3.1-3.2 3.1-8.3 0-11.4zM769.1 441.7l-59.4 59.4-186.8-186.8 59.4-59.4c24.9-24.9 58.1-38.7 93.4-38.7 35.3 0 68.4 13.7 93.4 38.7 24.9 24.9 38.7 58.1 38.7 93.4 0 35.3-13.8 68.4-38.7 93.4zm-190.2 105a8.03 8.03 0 00-11.3 0L501 613.3 410.7 523l66.7-66.7c3.1-3.1 3.1-8.2 0-11.3L441 408.6a8.03 8.03 0 00-11.3 0L363 475.3l-43-43a7.85 7.85 0 00-5.7-2.3c-2 0-4.1.8-5.7 2.3L206.8 534.2c-68.9 69-77 175.7-24.3 253.5l-76.1 76.1a8.03 8.03 0 000 11.3l42.4 42.4c1.6 1.6 3.6 2.3 5.7 2.3s4.1-.8 5.7-2.3l76.1-76.1c33.7 22.9 72.9 34.3 112.1 34.3 51.2 0 102.4-19.5 141.5-58.6l101.9-101.9c3.1-3.1 3.1-8.2 0-11.3l-43-43 66.7-66.7c3.1-3.1 3.1-8.2 0-11.3l-36.6-36.2zM441.7 769.1a131.32 131.32 0 01-93.4 38.7c-35.3 0-68.4-13.7-93.4-38.7a131.32 131.32 0 01-38.7-93.4c0-35.3 13.7-68.4 38.7-93.4l59.4-59.4 186.8 186.8-59.4 59.4z"}}]},name:"api",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},88009:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M464 144H160c-8.8 0-16 7.2-16 16v304c0 8.8 7.2 16 16 16h304c8.8 0 16-7.2 16-16V160c0-8.8-7.2-16-16-16zm-52 268H212V212h200v200zm452-268H560c-8.8 0-16 7.2-16 16v304c0 8.8 7.2 16 16 16h304c8.8 0 16-7.2 16-16V160c0-8.8-7.2-16-16-16zm-52 268H612V212h200v200zM464 544H160c-8.8 0-16 7.2-16 16v304c0 8.8 7.2 16 16 16h304c8.8 0 16-7.2 16-16V560c0-8.8-7.2-16-16-16zm-52 268H212V612h200v200zm452-268H560c-8.8 0-16 7.2-16 16v304c0 8.8 7.2 16 16 16h304c8.8 0 16-7.2 16-16V560c0-8.8-7.2-16-16-16zm-52 268H612V612h200v200z"}}]},name:"appstore",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},37527:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M894 462c30.9 0 43.8-39.7 18.7-58L530.8 126.2a31.81 31.81 0 00-37.6 0L111.3 404c-25.1 18.2-12.2 58 18.8 58H192v374h-72c-4.4 0-8 3.6-8 8v52c0 4.4 3.6 8 8 8h784c4.4 0 8-3.6 8-8v-52c0-4.4-3.6-8-8-8h-72V462h62zM512 196.7l271.1 197.2H240.9L512 196.7zM264 462h117v374H264V462zm189 0h117v374H453V462zm307 374H642V462h118v374z"}}]},name:"bank",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},9775:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M888 792H200V168c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v688c0 4.4 3.6 8 8 8h752c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8zm-600-80h56c4.4 0 8-3.6 8-8V560c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v144c0 4.4 3.6 8 8 8zm152 0h56c4.4 0 8-3.6 8-8V384c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v320c0 4.4 3.6 8 8 8zm152 0h56c4.4 0 8-3.6 8-8V462c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v242c0 4.4 3.6 8 8 8zm152 0h56c4.4 0 8-3.6 8-8V304c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v400c0 4.4 3.6 8 8 8z"}}]},name:"bar-chart",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},68208:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M856 376H648V168c0-8.8-7.2-16-16-16H168c-8.8 0-16 7.2-16 16v464c0 8.8 7.2 16 16 16h208v208c0 8.8 7.2 16 16 16h464c8.8 0 16-7.2 16-16V392c0-8.8-7.2-16-16-16zm-480 16v188H220V220h360v156H392c-8.8 0-16 7.2-16 16zm204 52v136H444V444h136zm224 360H444V648h188c8.8 0 16-7.2 16-16V444h156v360z"}}]},name:"block",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},9738:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M912 190h-69.9c-9.8 0-19.1 4.5-25.1 12.2L404.7 724.5 207 474a32 32 0 00-25.1-12.2H112c-6.7 0-10.4 7.7-6.3 12.9l273.9 347c12.8 16.2 37.4 16.2 50.3 0l488.4-618.9c4.1-5.1.4-12.8-6.3-12.8z"}}]},name:"check",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},44625:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M832 64H192c-17.7 0-32 14.3-32 32v832c0 17.7 14.3 32 32 32h640c17.7 0 32-14.3 32-32V96c0-17.7-14.3-32-32-32zm-600 72h560v208H232V136zm560 480H232V408h560v208zm0 272H232V680h560v208zM304 240a40 40 0 1080 0 40 40 0 10-80 0zm0 272a40 40 0 1080 0 40 40 0 10-80 0zm0 272a40 40 0 1080 0 40 40 0 10-80 0z"}}]},name:"database",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},70464:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M884 256h-75c-5.1 0-9.9 2.5-12.9 6.6L512 654.2 227.9 262.6c-3-4.1-7.8-6.6-12.9-6.6h-75c-6.5 0-10.3 7.4-6.5 12.7l352.6 486.1c12.8 17.6 39 17.6 51.7 0l352.6-486.1c3.9-5.3.1-12.7-6.4-12.7z"}}]},name:"down",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},73879:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M505.7 661a8 8 0 0012.6 0l112-141.7c4.1-5.2.4-12.9-6.3-12.9h-74.1V168c0-4.4-3.6-8-8-8h-60c-4.4 0-8 3.6-8 8v338.3H400c-6.7 0-10.4 7.7-6.3 12.9l112 141.8zM878 626h-60c-4.4 0-8 3.6-8 8v154H214V634c0-4.4-3.6-8-8-8h-60c-4.4 0-8 3.6-8 8v198c0 17.7 14.3 32 32 32h684c17.7 0 32-14.3 32-32V634c0-4.4-3.6-8-8-8z"}}]},name:"download",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},39760:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M176 511a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0z"}}]},name:"ellipsis",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},41169:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 472a40 40 0 1080 0 40 40 0 10-80 0zm367 352.9L696.3 352V178H768v-68H256v68h71.7v174L145 824.9c-2.8 7.4-4.3 15.2-4.3 23.1 0 35.3 28.7 64 64 64h614.6c7.9 0 15.7-1.5 23.1-4.3 33-12.7 49.4-49.8 36.6-82.8zM395.7 364.7V180h232.6v184.7L719.2 600c-20.7-5.3-42.1-8-63.9-8-61.2 0-119.2 21.5-165.3 60a188.78 188.78 0 01-121.3 43.9c-32.7 0-64.1-8.3-91.8-23.7l118.8-307.5zM210.5 844l41.7-107.8c35.7 18.1 75.4 27.8 116.6 27.8 61.2 0 119.2-21.5 165.3-60 33.9-28.2 76.3-43.9 121.3-43.9 35 0 68.4 9.5 97.6 27.1L813.5 844h-603z"}}]},name:"experiment",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},6520:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M942.2 486.2C847.4 286.5 704.1 186 512 186c-192.2 0-335.4 100.5-430.2 300.3a60.3 60.3 0 000 51.5C176.6 737.5 319.9 838 512 838c192.2 0 335.4-100.5 430.2-300.3 7.7-16.2 7.7-35 0-51.5zM512 766c-161.3 0-279.4-81.8-362.7-254C232.6 339.8 350.7 258 512 258c161.3 0 279.4 81.8 362.7 254C791.5 684.2 673.4 766 512 766zm-4-430c-97.2 0-176 78.8-176 176s78.8 176 176 176 176-78.8 176-176-78.8-176-176-176zm0 288c-61.9 0-112-50.1-112-112s50.1-112 112-112 112 50.1 112 112-50.1 112-112 112z"}}]},name:"eye",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},15424:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm0 820c-205.4 0-372-166.6-372-372s166.6-372 372-372 372 166.6 372 372-166.6 372-372 372z"}},{tag:"path",attrs:{d:"M464 336a48 48 0 1096 0 48 48 0 10-96 0zm72 112h-48c-4.4 0-8 3.6-8 8v272c0 4.4 3.6 8 8 8h48c4.4 0 8-3.6 8-8V456c0-4.4-3.6-8-8-8z"}}]},name:"info-circle",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},92403:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M608 112c-167.9 0-304 136.1-304 304 0 70.3 23.9 135 63.9 186.5l-41.1 41.1-62.3-62.3a8.15 8.15 0 00-11.4 0l-39.8 39.8a8.15 8.15 0 000 11.4l62.3 62.3-44.9 44.9-62.3-62.3a8.15 8.15 0 00-11.4 0l-39.8 39.8a8.15 8.15 0 000 11.4l62.3 62.3-65.3 65.3a8.03 8.03 0 000 11.3l42.3 42.3c3.1 3.1 8.2 3.1 11.3 0l253.6-253.6A304.06 304.06 0 00608 720c167.9 0 304-136.1 304-304S775.9 112 608 112zm161.2 465.2C726.2 620.3 668.9 644 608 644c-60.9 0-118.2-23.7-161.2-66.8-43.1-43-66.8-100.3-66.8-161.2 0-60.9 23.7-118.2 66.8-161.2 43-43.1 100.3-66.8 161.2-66.8 60.9 0 118.2 23.7 161.2 66.8 43.1 43 66.8 100.3 66.8 161.2 0 60.9-23.7 118.2-66.8 161.2z"}}]},name:"key",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},15327:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M724 218.3V141c0-6.7-7.7-10.4-12.9-6.3L260.3 486.8a31.86 31.86 0 000 50.3l450.8 352.1c5.3 4.1 12.9.4 12.9-6.3v-77.3c0-4.9-2.3-9.6-6.1-12.6l-360-281 360-281.1c3.8-3 6.1-7.7 6.1-12.6z"}}]},name:"left",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},48231:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M888 792H200V168c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v688c0 4.4 3.6 8 8 8h752c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8zM305.8 637.7c3.1 3.1 8.1 3.1 11.3 0l138.3-137.6L583 628.5c3.1 3.1 8.2 3.1 11.3 0l275.4-275.3c3.1-3.1 3.1-8.2 0-11.3l-39.6-39.6a8.03 8.03 0 00-11.3 0l-230 229.9L461.4 404a8.03 8.03 0 00-11.3 0L266.3 586.7a8.03 8.03 0 000 11.3l39.5 39.7z"}}]},name:"line-chart",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},40428:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M868 732h-70.3c-4.8 0-9.3 2.1-12.3 5.8-7 8.5-14.5 16.7-22.4 24.5a353.84 353.84 0 01-112.7 75.9A352.8 352.8 0 01512.4 866c-47.9 0-94.3-9.4-137.9-27.8a353.84 353.84 0 01-112.7-75.9 353.28 353.28 0 01-76-112.5C167.3 606.2 158 559.9 158 512s9.4-94.2 27.8-137.8c17.8-42.1 43.4-80 76-112.5s70.5-58.1 112.7-75.9c43.6-18.4 90-27.8 137.9-27.8 47.9 0 94.3 9.3 137.9 27.8 42.2 17.8 80.1 43.4 112.7 75.9 7.9 7.9 15.3 16.1 22.4 24.5 3 3.7 7.6 5.8 12.3 5.8H868c6.3 0 10.2-7 6.7-12.3C798 160.5 663.8 81.6 511.3 82 271.7 82.6 79.6 277.1 82 516.4 84.4 751.9 276.2 942 512.4 942c152.1 0 285.7-78.8 362.3-197.7 3.4-5.3-.4-12.3-6.7-12.3zm88.9-226.3L815 393.7c-5.3-4.2-13-.4-13 6.3v76H488c-4.4 0-8 3.6-8 8v56c0 4.4 3.6 8 8 8h314v76c0 6.7 7.8 10.5 13 6.3l141.9-112a8 8 0 000-12.6z"}}]},name:"logout",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},45246:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M696 480H328c-4.4 0-8 3.6-8 8v48c0 4.4 3.6 8 8 8h368c4.4 0 8-3.6 8-8v-48c0-4.4-3.6-8-8-8z"}},{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm0 820c-205.4 0-372-166.6-372-372s166.6-372 372-372 372 166.6 372 372-166.6 372-372 372z"}}]},name:"minus-circle",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},28595:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm0 820c-205.4 0-372-166.6-372-372s166.6-372 372-372 372 166.6 372 372-166.6 372-372 372z"}},{tag:"path",attrs:{d:"M719.4 499.1l-296.1-215A15.9 15.9 0 00398 297v430c0 13.1 14.8 20.5 25.3 12.9l296.1-215a15.9 15.9 0 000-25.8zm-257.6 134V390.9L628.5 512 461.8 633.1z"}}]},name:"play-circle",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},96473:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M482 152h60q8 0 8 8v704q0 8-8 8h-60q-8 0-8-8V160q0-8 8-8z"}},{tag:"path",attrs:{d:"M192 474h672q8 0 8 8v60q0 8-8 8H160q-8 0-8-8v-60q0-8 8-8z"}}]},name:"plus",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},57400:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"0 0 1024 1024",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64L128 192v384c0 212.1 171.9 384 384 384s384-171.9 384-384V192L512 64zm312 512c0 172.3-139.7 312-312 312S200 748.3 200 576V246l312-110 312 110v330z"}},{tag:"path",attrs:{d:"M378.4 475.1a35.91 35.91 0 00-50.9 0 35.91 35.91 0 000 50.9l129.4 129.4 2.1 2.1a33.98 33.98 0 0048.1 0L730.6 434a33.98 33.98 0 000-48.1l-2.8-2.8a33.98 33.98 0 00-48.1 0L483 579.7 378.4 475.1z"}}]},name:"safety",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},29436:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M909.6 854.5L649.9 594.8C690.2 542.7 712 479 712 412c0-80.2-31.3-155.4-87.9-212.1-56.6-56.7-132-87.9-212.1-87.9s-155.5 31.3-212.1 87.9C143.2 256.5 112 331.8 112 412c0 80.1 31.3 155.5 87.9 212.1C256.5 680.8 331.8 712 412 712c67 0 130.6-21.8 182.7-62l259.7 259.6a8.2 8.2 0 0011.6 0l43.6-43.5a8.2 8.2 0 000-11.6zM570.4 570.4C528 612.7 471.8 636 412 636s-116-23.3-158.4-65.6C211.3 528 188 471.8 188 412s23.3-116.1 65.6-158.4C296 211.3 352.2 188 412 188s116.1 23.2 158.4 65.6S636 352.2 636 412s-23.3 116.1-65.6 158.4z"}}]},name:"search",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},55322:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M924.8 625.7l-65.5-56c3.1-19 4.7-38.4 4.7-57.8s-1.6-38.8-4.7-57.8l65.5-56a32.03 32.03 0 009.3-35.2l-.9-2.6a443.74 443.74 0 00-79.7-137.9l-1.8-2.1a32.12 32.12 0 00-35.1-9.5l-81.3 28.9c-30-24.6-63.5-44-99.7-57.6l-15.7-85a32.05 32.05 0 00-25.8-25.7l-2.7-.5c-52.1-9.4-106.9-9.4-159 0l-2.7.5a32.05 32.05 0 00-25.8 25.7l-15.8 85.4a351.86 351.86 0 00-99 57.4l-81.9-29.1a32 32 0 00-35.1 9.5l-1.8 2.1a446.02 446.02 0 00-79.7 137.9l-.9 2.6c-4.5 12.5-.8 26.5 9.3 35.2l66.3 56.6c-3.1 18.8-4.6 38-4.6 57.1 0 19.2 1.5 38.4 4.6 57.1L99 625.5a32.03 32.03 0 00-9.3 35.2l.9 2.6c18.1 50.4 44.9 96.9 79.7 137.9l1.8 2.1a32.12 32.12 0 0035.1 9.5l81.9-29.1c29.8 24.5 63.1 43.9 99 57.4l15.8 85.4a32.05 32.05 0 0025.8 25.7l2.7.5a449.4 449.4 0 00159 0l2.7-.5a32.05 32.05 0 0025.8-25.7l15.7-85a350 350 0 0099.7-57.6l81.3 28.9a32 32 0 0035.1-9.5l1.8-2.1c34.8-41.1 61.6-87.5 79.7-137.9l.9-2.6c4.5-12.3.8-26.3-9.3-35zM788.3 465.9c2.5 15.1 3.8 30.6 3.8 46.1s-1.3 31-3.8 46.1l-6.6 40.1 74.7 63.9a370.03 370.03 0 01-42.6 73.6L721 702.8l-31.4 25.8c-23.9 19.6-50.5 35-79.3 45.8l-38.1 14.3-17.9 97a377.5 377.5 0 01-85 0l-17.9-97.2-37.8-14.5c-28.5-10.8-55-26.2-78.7-45.7l-31.4-25.9-93.4 33.2c-17-22.9-31.2-47.6-42.6-73.6l75.5-64.5-6.5-40c-2.4-14.9-3.7-30.3-3.7-45.5 0-15.3 1.2-30.6 3.7-45.5l6.5-40-75.5-64.5c11.3-26.1 25.6-50.7 42.6-73.6l93.4 33.2 31.4-25.9c23.7-19.5 50.2-34.9 78.7-45.7l37.9-14.3 17.9-97.2c28.1-3.2 56.8-3.2 85 0l17.9 97 38.1 14.3c28.7 10.8 55.4 26.2 79.3 45.8l31.4 25.8 92.8-32.9c17 22.9 31.2 47.6 42.6 73.6L781.8 426l6.5 39.9zM512 326c-97.2 0-176 78.8-176 176s78.8 176 176 176 176-78.8 176-176-78.8-176-176-176zm79.2 255.2A111.6 111.6 0 01512 614c-29.9 0-58-11.7-79.2-32.8A111.6 111.6 0 01400 502c0-29.9 11.7-58 32.8-79.2C454 401.6 482.1 390 512 390c29.9 0 58 11.6 79.2 32.8A111.6 111.6 0 01624 502c0 29.9-11.7 58-32.8 79.2z"}}]},name:"setting",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},41361:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M824.2 699.9a301.55 301.55 0 00-86.4-60.4C783.1 602.8 812 546.8 812 484c0-110.8-92.4-201.7-203.2-200-109.1 1.7-197 90.6-197 200 0 62.8 29 118.8 74.2 155.5a300.95 300.95 0 00-86.4 60.4C345 754.6 314 826.8 312 903.8a8 8 0 008 8.2h56c4.3 0 7.9-3.4 8-7.7 1.9-58 25.4-112.3 66.7-153.5A226.62 226.62 0 01612 684c60.9 0 118.2 23.7 161.3 66.8C814.5 792 838 846.3 840 904.3c.1 4.3 3.7 7.7 8 7.7h56a8 8 0 008-8.2c-2-77-33-149.2-87.8-203.9zM612 612c-34.2 0-66.4-13.3-90.5-37.5a126.86 126.86 0 01-37.5-91.8c.3-32.8 13.4-64.5 36.3-88 24-24.6 56.1-38.3 90.4-38.7 33.9-.3 66.8 12.9 91 36.6 24.8 24.3 38.4 56.8 38.4 91.4 0 34.2-13.3 66.3-37.5 90.5A127.3 127.3 0 01612 612zM361.5 510.4c-.9-8.7-1.4-17.5-1.4-26.4 0-15.9 1.5-31.4 4.3-46.5.7-3.6-1.2-7.3-4.5-8.8-13.6-6.1-26.1-14.5-36.9-25.1a127.54 127.54 0 01-38.7-95.4c.9-32.1 13.8-62.6 36.3-85.6 24.7-25.3 57.9-39.1 93.2-38.7 31.9.3 62.7 12.6 86 34.4 7.9 7.4 14.7 15.6 20.4 24.4 2 3.1 5.9 4.4 9.3 3.2 17.6-6.1 36.2-10.4 55.3-12.4 5.6-.6 8.8-6.6 6.3-11.6-32.5-64.3-98.9-108.7-175.7-109.9-110.9-1.7-203.3 89.2-203.3 199.9 0 62.8 28.9 118.8 74.2 155.5-31.8 14.7-61.1 35-86.5 60.4-54.8 54.7-85.8 126.9-87.8 204a8 8 0 008 8.2h56.1c4.3 0 7.9-3.4 8-7.7 1.9-58 25.4-112.3 66.7-153.5 29.4-29.4 65.4-49.8 104.7-59.7 3.9-1 6.5-4.7 6-8.7z"}}]},name:"team",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},19574:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M848 359.3H627.7L825.8 109c4.1-5.3.4-13-6.3-13H436c-2.8 0-5.5 1.5-6.9 4L170 547.5c-3.1 5.3.7 12 6.9 12h174.4l-89.4 357.6c-1.9 7.8 7.5 13.3 13.3 7.7L853.5 373c5.2-4.9 1.7-13.7-5.5-13.7zM378.2 732.5l60.3-241H281.1l189.6-327.4h224.6L487 427.4h211L378.2 732.5z"}}]},name:"thunderbolt",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},3632:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M400 317.7h73.9V656c0 4.4 3.6 8 8 8h60c4.4 0 8-3.6 8-8V317.7H624c6.7 0 10.4-7.7 6.3-12.9L518.3 163a8 8 0 00-12.6 0l-112 141.7c-4.1 5.3-.4 13 6.3 13zM878 626h-60c-4.4 0-8 3.6-8 8v154H214V634c0-4.4-3.6-8-8-8h-60c-4.4 0-8 3.6-8 8v198c0 17.7 14.3 32 32 32h684c17.7 0 32-14.3 32-32V634c0-4.4-3.6-8-8-8z"}}]},name:"upload",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},15883:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(1119),o=n(2265),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M858.5 763.6a374 374 0 00-80.6-119.5 375.63 375.63 0 00-119.5-80.6c-.4-.2-.8-.3-1.2-.5C719.5 518 760 444.7 760 362c0-137-111-248-248-248S264 225 264 362c0 82.7 40.5 156 102.8 201.1-.4.2-.8.3-1.2.5-44.8 18.9-85 46-119.5 80.6a375.63 375.63 0 00-80.6 119.5A371.7 371.7 0 00136 901.8a8 8 0 008 8.2h60c4.4 0 7.9-3.5 8-7.8 2-77.2 33-149.5 87.8-204.3 56.7-56.7 132-87.9 212.2-87.9s155.5 31.2 212.2 87.9C779 752.7 810 825 812 902.2c.1 4.4 3.6 7.8 8 7.8h60a8 8 0 008-8.2c-1-47.8-10.9-94.3-29.5-138.2zM512 534c-45.9 0-89.1-17.9-121.6-50.4S340 407.9 340 362c0-45.9 17.9-89.1 50.4-121.6S466.1 190 512 190s89.1 17.9 121.6 50.4S684 316.1 684 362c0 45.9-17.9 89.1-50.4 121.6S557.9 534 512 534z"}}]},name:"user",theme:"outlined"},a=n(55015),l=o.forwardRef(function(e,t){return o.createElement(a.Z,(0,r.Z)({},e,{ref:t,icon:i}))})},58747:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(5853),o=n(2265);let i=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M11.9999 13.1714L16.9497 8.22168L18.3639 9.63589L11.9999 15.9999L5.63599 9.63589L7.0502 8.22168L11.9999 13.1714Z"}))}},4537:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(5853),o=n(2265);let i=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M12 22C6.47715 22 2 17.5228 2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22ZM12 10.5858L9.17157 7.75736L7.75736 9.17157L10.5858 12L7.75736 14.8284L9.17157 16.2426L12 13.4142L14.8284 16.2426L16.2426 14.8284L13.4142 12L16.2426 9.17157L14.8284 7.75736L12 10.5858Z"}))}},69907:function(e,t,n){"use strict";n.d(t,{Z:function(){return em}});var r=n(5853),o=n(2265),i=n(47625),a=n(93765),l=n(61994),c=n(59221),s=n(86757),u=n.n(s),d=n(95645),f=n.n(d),p=n(77571),h=n.n(p),m=n(82559),g=n.n(m),v=n(21652),y=n.n(v),b=n(57165),x=n(81889),w=n(9841),S=n(58772),k=n(34067),E=n(16630),C=n(85355),O=n(82944),j=["layout","type","stroke","connectNulls","isRange","ref"];function P(e){return(P="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function M(){return(M=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(i,j));return o.createElement(w.m,{clipPath:n?"url(#clipPath-".concat(r,")"):null},o.createElement(b.H,M({},(0,O.L6)(d,!0),{points:e,connectNulls:s,type:l,baseLine:t,layout:a,stroke:"none",className:"recharts-area-area"})),"none"!==c&&o.createElement(b.H,M({},(0,O.L6)(this.props,!1),{className:"recharts-area-curve",layout:a,type:l,connectNulls:s,fill:"none",points:e})),"none"!==c&&u&&o.createElement(b.H,M({},(0,O.L6)(this.props,!1),{className:"recharts-area-curve",layout:a,type:l,connectNulls:s,fill:"none",points:t})))}},{key:"renderAreaWithAnimation",value:function(e,t){var n=this,r=this.props,i=r.points,a=r.baseLine,l=r.isAnimationActive,s=r.animationBegin,u=r.animationDuration,d=r.animationEasing,f=r.animationId,p=this.state,m=p.prevPoints,v=p.prevBaseLine;return o.createElement(c.ZP,{begin:s,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"area-".concat(f),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var l=r.t;if(m){var c,s=m.length/i.length,u=i.map(function(e,t){var n=Math.floor(t*s);if(m[n]){var r=m[n],o=(0,E.k4)(r.x,e.x),i=(0,E.k4)(r.y,e.y);return I(I({},e),{},{x:o(l),y:i(l)})}return e});return c=(0,E.hj)(a)&&"number"==typeof a?(0,E.k4)(v,a)(l):h()(a)||g()(a)?(0,E.k4)(v,0)(l):a.map(function(e,t){var n=Math.floor(t*s);if(v[n]){var r=v[n],o=(0,E.k4)(r.x,e.x),i=(0,E.k4)(r.y,e.y);return I(I({},e),{},{x:o(l),y:i(l)})}return e}),n.renderAreaStatically(u,c,e,t)}return o.createElement(w.m,null,o.createElement("defs",null,o.createElement("clipPath",{id:"animationClipPath-".concat(t)},n.renderClipRect(l))),o.createElement(w.m,{clipPath:"url(#animationClipPath-".concat(t,")")},n.renderAreaStatically(i,a,e,t)))})}},{key:"renderArea",value:function(e,t){var n=this.props,r=n.points,o=n.baseLine,i=n.isAnimationActive,a=this.state,l=a.prevPoints,c=a.prevBaseLine,s=a.totalLength;return i&&r&&r.length&&(!l&&s>0||!y()(l,r)||!y()(c,o))?this.renderAreaWithAnimation(e,t):this.renderAreaStatically(r,o,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,i=t.points,a=t.className,c=t.top,s=t.left,u=t.xAxis,d=t.yAxis,f=t.width,p=t.height,m=t.isAnimationActive,g=t.id;if(n||!i||!i.length)return null;var v=this.state.isAnimationFinished,y=1===i.length,b=(0,l.Z)("recharts-area",a),x=u&&u.allowDataOverflow,k=d&&d.allowDataOverflow,E=x||k,C=h()(g)?this.id:g,j=null!==(e=(0,O.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},P=j.r,M=j.strokeWidth,N=((0,O.$k)(r)?r:{}).clipDot,I=void 0===N||N,R=2*(void 0===P?3:P)+(void 0===M?2:M);return o.createElement(w.m,{className:b},x||k?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(C)},o.createElement("rect",{x:x?s:s-f/2,y:k?c:c-p/2,width:x?f:2*f,height:k?p:2*p})),!I&&o.createElement("clipPath",{id:"clipPath-dots-".concat(C)},o.createElement("rect",{x:s-R/2,y:c-R/2,width:f+R,height:p+R}))):null,y?null:this.renderArea(E,C),(r||y)&&this.renderDots(E,I,C),(!m||v)&&S.e.renderCallByParent(this.props,i))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,curBaseLine:e.baseLine,prevPoints:t.curPoints,prevBaseLine:t.curBaseLine}:e.points!==t.curPoints||e.baseLine!==t.curBaseLine?{curPoints:e.points,curBaseLine:e.baseLine}:null}}],n&&R(a.prototype,n),r&&R(a,r),Object.defineProperty(a,"prototype",{writable:!1}),a}(o.PureComponent);D(L,"displayName","Area"),D(L,"defaultProps",{stroke:"#3182bd",fill:"#3182bd",fillOpacity:.6,xAxisId:0,yAxisId:0,legendType:"line",connectNulls:!1,points:[],dot:!1,activeDot:!0,hide:!1,isAnimationActive:!k.x.isSsr,animationBegin:0,animationDuration:1500,animationEasing:"ease"}),D(L,"getBaseValue",function(e,t,n,r){var o=e.layout,i=e.baseValue,a=t.props.baseValue,l=null!=a?a:i;if((0,E.hj)(l)&&"number"==typeof l)return l;var c="horizontal"===o?r:n,s=c.scale.domain();if("number"===c.type){var u=Math.max(s[0],s[1]),d=Math.min(s[0],s[1]);return"dataMin"===l?d:"dataMax"===l?u:u<0?u:Math.max(Math.min(s[0],s[1]),0)}return"dataMin"===l?s[0]:"dataMax"===l?s[1]:s[0]}),D(L,"getComposedData",function(e){var t,n=e.props,r=e.item,o=e.xAxis,i=e.yAxis,a=e.xAxisTicks,l=e.yAxisTicks,c=e.bandSize,s=e.dataKey,u=e.stackedData,d=e.dataStartIndex,f=e.displayedData,p=e.offset,h=n.layout,m=u&&u.length,g=L.getBaseValue(n,r,o,i),v="horizontal"===h,y=!1,b=f.map(function(e,t){m?n=u[d+t]:Array.isArray(n=(0,C.F$)(e,s))?y=!0:n=[g,n];var n,r=null==n[1]||m&&null==(0,C.F$)(e,s);return v?{x:(0,C.Hv)({axis:o,ticks:a,bandSize:c,entry:e,index:t}),y:r?null:i.scale(n[1]),value:n,payload:e}:{x:r?null:o.scale(n[1]),y:(0,C.Hv)({axis:i,ticks:l,bandSize:c,entry:e,index:t}),value:n,payload:e}});return t=m||y?b.map(function(e){var t=Array.isArray(e.value)?e.value[0]:null;return v?{x:e.x,y:null!=t&&null!=e.y?i.scale(t):null}:{x:null!=t?o.scale(t):null,y:e.y}}):v?i.scale(g):o.scale(g),I({points:b,baseLine:t,layout:h,isRange:y},p)}),D(L,"renderDotItem",function(e,t){return o.isValidElement(e)?o.cloneElement(e,t):u()(e)?e(t):o.createElement(x.o,M({},t,{className:"recharts-area-dot"}))});var z=n(97059),B=n(62994),F=n(25311),H=(0,a.z)({chartName:"AreaChart",GraphicalChild:L,axisComponents:[{axisType:"xAxis",AxisComp:z.K},{axisType:"yAxis",AxisComp:B.B}],formatAxisMap:F.t9}),q=n(56940),W=n(8147),K=n(22190),U=n(13137),V=["type","layout","connectNulls","ref"];function G(e){return(G="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function X(){return(X=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);ni){c=[].concat(Q(r.slice(0,s)),[i-u]);break}var d=c.length%2==0?[0,l]:[l];return[].concat(Q(a.repeat(r,Math.floor(t/o))),Q(c),d).map(function(e){return"".concat(e,"px")}).join(", ")}),eo(en(e),"id",(0,E.EL)("recharts-line-")),eo(en(e),"pathRef",function(t){e.mainCurve=t}),eo(en(e),"handleAnimationEnd",function(){e.setState({isAnimationFinished:!0}),e.props.onAnimationEnd&&e.props.onAnimationEnd()}),eo(en(e),"handleAnimationStart",function(){e.setState({isAnimationFinished:!1}),e.props.onAnimationStart&&e.props.onAnimationStart()}),e}return n=[{key:"componentDidMount",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();this.setState({totalLength:e})}}},{key:"componentDidUpdate",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();e!==this.state.totalLength&&this.setState({totalLength:e})}}},{key:"getTotalLength",value:function(){var e=this.mainCurve;try{return e&&e.getTotalLength&&e.getTotalLength()||0}catch(e){return 0}}},{key:"renderErrorBar",value:function(e,t){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var n=this.props,r=n.points,i=n.xAxis,a=n.yAxis,l=n.layout,c=n.children,s=(0,O.NN)(c,U.W);if(!s)return null;var u=function(e,t){return{x:e.x,y:e.y,value:e.value,errorVal:(0,C.F$)(e.payload,t)}};return o.createElement(w.m,{clipPath:e?"url(#clipPath-".concat(t,")"):null},s.map(function(e){return o.cloneElement(e,{key:"bar-".concat(e.props.dataKey),data:r,xAxis:i,yAxis:a,layout:l,dataPointFormatter:u})}))}},{key:"renderDots",value:function(e,t,n){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var r=this.props,i=r.dot,l=r.points,c=r.dataKey,s=(0,O.L6)(this.props,!1),u=(0,O.L6)(i,!0),d=l.map(function(e,t){var n=Y(Y(Y({key:"dot-".concat(t),r:3},s),u),{},{value:e.value,dataKey:c,cx:e.x,cy:e.y,index:t,payload:e.payload});return a.renderDotItem(i,n)}),f={clipPath:e?"url(#clipPath-".concat(t?"":"dots-").concat(n,")"):null};return o.createElement(w.m,X({className:"recharts-line-dots",key:"dots"},f),d)}},{key:"renderCurveStatically",value:function(e,t,n,r){var i=this.props,a=i.type,l=i.layout,c=i.connectNulls,s=(i.ref,function(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(i,V)),u=Y(Y(Y({},(0,O.L6)(s,!0)),{},{fill:"none",className:"recharts-line-curve",clipPath:t?"url(#clipPath-".concat(n,")"):null,points:e},r),{},{type:a,layout:l,connectNulls:c});return o.createElement(b.H,X({},u,{pathRef:this.pathRef}))}},{key:"renderCurveWithAnimation",value:function(e,t){var n=this,r=this.props,i=r.points,a=r.strokeDasharray,l=r.isAnimationActive,s=r.animationBegin,u=r.animationDuration,d=r.animationEasing,f=r.animationId,p=r.animateNewValues,h=r.width,m=r.height,g=this.state,v=g.prevPoints,y=g.totalLength;return o.createElement(c.ZP,{begin:s,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"line-".concat(f),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var o,l=r.t;if(v){var c=v.length/i.length,s=i.map(function(e,t){var n=Math.floor(t*c);if(v[n]){var r=v[n],o=(0,E.k4)(r.x,e.x),i=(0,E.k4)(r.y,e.y);return Y(Y({},e),{},{x:o(l),y:i(l)})}if(p){var a=(0,E.k4)(2*h,e.x),s=(0,E.k4)(m/2,e.y);return Y(Y({},e),{},{x:a(l),y:s(l)})}return Y(Y({},e),{},{x:e.x,y:e.y})});return n.renderCurveStatically(s,e,t)}var u=(0,E.k4)(0,y)(l);if(a){var d="".concat(a).split(/[,\s]+/gim).map(function(e){return parseFloat(e)});o=n.getStrokeDasharray(u,y,d)}else o=n.generateSimpleStrokeDasharray(y,u);return n.renderCurveStatically(i,e,t,{strokeDasharray:o})})}},{key:"renderCurve",value:function(e,t){var n=this.props,r=n.points,o=n.isAnimationActive,i=this.state,a=i.prevPoints,l=i.totalLength;return o&&r&&r.length&&(!a&&l>0||!y()(a,r))?this.renderCurveWithAnimation(e,t):this.renderCurveStatically(r,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,i=t.points,a=t.className,c=t.xAxis,s=t.yAxis,u=t.top,d=t.left,f=t.width,p=t.height,m=t.isAnimationActive,g=t.id;if(n||!i||!i.length)return null;var v=this.state.isAnimationFinished,y=1===i.length,b=(0,l.Z)("recharts-line",a),x=c&&c.allowDataOverflow,k=s&&s.allowDataOverflow,E=x||k,C=h()(g)?this.id:g,j=null!==(e=(0,O.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},P=j.r,M=j.strokeWidth,N=((0,O.$k)(r)?r:{}).clipDot,I=void 0===N||N,R=2*(void 0===P?3:P)+(void 0===M?2:M);return o.createElement(w.m,{className:b},x||k?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(C)},o.createElement("rect",{x:x?d:d-f/2,y:k?u:u-p/2,width:x?f:2*f,height:k?p:2*p})),!I&&o.createElement("clipPath",{id:"clipPath-dots-".concat(C)},o.createElement("rect",{x:d-R/2,y:u-R/2,width:f+R,height:p+R}))):null,!y&&this.renderCurve(E,C),this.renderErrorBar(E,C),(y||r)&&this.renderDots(E,I,C),(!m||v)&&S.e.renderCallByParent(this.props,i))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,prevPoints:t.curPoints}:e.points!==t.curPoints?{curPoints:e.points}:null}},{key:"repeat",value:function(e,t){for(var n=e.length%2!=0?[].concat(Q(e),[0]):e,r=[],o=0;o{let{data:n=[],categories:a=[],index:l,stack:c=!1,colors:s=ef.s,valueFormatter:u=eh.Cj,startEndOnly:d=!1,showXAxis:f=!0,showYAxis:p=!0,yAxisWidth:h=56,intervalType:m="equidistantPreserveStart",showAnimation:g=!1,animationDuration:v=900,showTooltip:y=!0,showLegend:b=!0,showGridLines:w=!0,showGradient:S=!0,autoMinValue:k=!1,curveType:E="linear",minValue:C,maxValue:O,connectNulls:j=!1,allowDecimals:P=!0,noDataText:M,className:N,onValueChange:I,enableLegendSlider:R=!1,customTooltip:T,rotateLabelX:A,tickGap:_=5}=e,D=(0,r._T)(e,["data","categories","index","stack","colors","valueFormatter","startEndOnly","showXAxis","showYAxis","yAxisWidth","intervalType","showAnimation","animationDuration","showTooltip","showLegend","showGridLines","showGradient","autoMinValue","curveType","minValue","maxValue","connectNulls","allowDecimals","noDataText","className","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap"]),Z=(f||p)&&(!d||p)?20:0,[F,U]=(0,o.useState)(60),[V,G]=(0,o.useState)(void 0),[X,$]=(0,o.useState)(void 0),Y=(0,eu.me)(a,s),Q=(0,eu.i4)(k,C,O),J=!!I;function ee(e){J&&(e===X&&!V||(0,eu.FB)(n,e)&&V&&V.dataKey===e?($(void 0),null==I||I(null)):($(e),null==I||I({eventType:"category",categoryClicked:e})),G(void 0))}return o.createElement("div",Object.assign({ref:t,className:(0,ep.q)("w-full h-80",N)},D),o.createElement(i.h,{className:"h-full w-full"},(null==n?void 0:n.length)?o.createElement(H,{data:n,onClick:J&&(X||V)?()=>{G(void 0),$(void 0),null==I||I(null)}:void 0},w?o.createElement(q.q,{className:(0,ep.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:!0,vertical:!1}):null,o.createElement(z.K,{padding:{left:Z,right:Z},hide:!f,dataKey:l,tick:{transform:"translate(0, 6)"},ticks:d?[n[0][l],n[n.length-1][l]]:void 0,fill:"",stroke:"",className:(0,ep.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),interval:d?"preserveStartEnd":m,tickLine:!1,axisLine:!1,minTickGap:_,angle:null==A?void 0:A.angle,dy:null==A?void 0:A.verticalShift,height:null==A?void 0:A.xAxisHeight}),o.createElement(B.B,{width:h,hide:!p,axisLine:!1,tickLine:!1,type:"number",domain:Q,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,ep.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:u,allowDecimals:P}),o.createElement(W.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{stroke:"#d1d5db",strokeWidth:1},content:y?e=>{let{active:t,payload:n,label:r}=e;return T?o.createElement(T,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=Y.get(e.dataKey))&&void 0!==t?t:ed.fr.Gray})}),active:t,label:r}):o.createElement(ec.ZP,{active:t,payload:n,label:r,valueFormatter:u,categoryColors:Y})}:o.createElement(o.Fragment,null),position:{y:0}}),b?o.createElement(K.D,{verticalAlign:"top",height:F,content:e=>{let{payload:t}=e;return(0,el.Z)({payload:t},Y,U,X,J?e=>ee(e):void 0,R)}}):null,a.map(e=>{var t,n;return o.createElement("defs",{key:e},S?o.createElement("linearGradient",{className:(0,eh.bM)(null!==(t=Y.get(e))&&void 0!==t?t:ed.fr.Gray,ef.K.text).textColor,id:Y.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{offset:"5%",stopColor:"currentColor",stopOpacity:V||X&&X!==e?.15:.4}),o.createElement("stop",{offset:"95%",stopColor:"currentColor",stopOpacity:0})):o.createElement("linearGradient",{className:(0,eh.bM)(null!==(n=Y.get(e))&&void 0!==n?n:ed.fr.Gray,ef.K.text).textColor,id:Y.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{stopColor:"currentColor",stopOpacity:V||X&&X!==e?.1:.3})))}),a.map(e=>{var t;return o.createElement(L,{className:(0,eh.bM)(null!==(t=Y.get(e))&&void 0!==t?t:ed.fr.Gray,ef.K.text).strokeColor,strokeOpacity:V||X&&X!==e?.3:1,activeDot:e=>{var t;let{cx:r,cy:i,stroke:a,strokeLinecap:l,strokeLinejoin:c,strokeWidth:s,dataKey:u}=e;return o.createElement(x.o,{className:(0,ep.q)("stroke-tremor-background dark:stroke-dark-tremor-background",I?"cursor-pointer":"",(0,eh.bM)(null!==(t=Y.get(u))&&void 0!==t?t:ed.fr.Gray,ef.K.text).fillColor),cx:r,cy:i,r:5,fill:"",stroke:a,strokeLinecap:l,strokeLinejoin:c,strokeWidth:s,onClick:(t,r)=>{r.stopPropagation(),J&&(e.index===(null==V?void 0:V.index)&&e.dataKey===(null==V?void 0:V.dataKey)||(0,eu.FB)(n,e.dataKey)&&X&&X===e.dataKey?($(void 0),G(void 0),null==I||I(null)):($(e.dataKey),G({index:e.index,dataKey:e.dataKey}),null==I||I(Object.assign({eventType:"dot",categoryClicked:e.dataKey},e.payload))))}})},dot:t=>{var r;let{stroke:i,strokeLinecap:a,strokeLinejoin:l,strokeWidth:c,cx:s,cy:u,dataKey:d,index:f}=t;return(0,eu.FB)(n,e)&&!(V||X&&X!==e)||(null==V?void 0:V.index)===f&&(null==V?void 0:V.dataKey)===e?o.createElement(x.o,{key:f,cx:s,cy:u,r:5,stroke:i,fill:"",strokeLinecap:a,strokeLinejoin:l,strokeWidth:c,className:(0,ep.q)("stroke-tremor-background dark:stroke-dark-tremor-background",I?"cursor-pointer":"",(0,eh.bM)(null!==(r=Y.get(d))&&void 0!==r?r:ed.fr.Gray,ef.K.text).fillColor)}):o.createElement(o.Fragment,{key:f})},key:e,name:e,type:E,dataKey:e,stroke:"",fill:"url(#".concat(Y.get(e),")"),strokeWidth:2,strokeLinejoin:"round",strokeLinecap:"round",isAnimationActive:g,animationDuration:v,stackId:c?"a":void 0,connectNulls:j})}),I?a.map(e=>o.createElement(ea,{className:(0,ep.q)("cursor-pointer"),strokeOpacity:0,key:e,name:e,type:E,dataKey:e,stroke:"transparent",fill:"transparent",legendType:"none",tooltipType:"none",strokeWidth:12,connectNulls:j,onClick:(e,t)=>{t.stopPropagation();let{name:n}=e;ee(n)}})):null):o.createElement(es.Z,{noDataText:M})))});em.displayName="AreaChart"},40278:function(e,t,n){"use strict";n.d(t,{Z:function(){return k}});var r=n(5853),o=n(7084),i=n(26898),a=n(65954),l=n(1153),c=n(2265),s=n(47625),u=n(93765),d=n(31699),f=n(97059),p=n(62994),h=n(25311),m=(0,u.z)({chartName:"BarChart",GraphicalChild:d.$,defaultTooltipEventType:"axis",validateTooltipEventTypes:["axis","item"],axisComponents:[{axisType:"xAxis",AxisComp:f.K},{axisType:"yAxis",AxisComp:p.B}],formatAxisMap:h.t9}),g=n(56940),v=n(8147),y=n(22190),b=n(65278),x=n(98593),w=n(69448),S=n(32644);let k=c.forwardRef((e,t)=>{let{data:n=[],categories:u=[],index:h,colors:k=i.s,valueFormatter:E=l.Cj,layout:C="horizontal",stack:O=!1,relative:j=!1,startEndOnly:P=!1,animationDuration:M=900,showAnimation:N=!1,showXAxis:I=!0,showYAxis:R=!0,yAxisWidth:T=56,intervalType:A="equidistantPreserveStart",showTooltip:_=!0,showLegend:D=!0,showGridLines:Z=!0,autoMinValue:L=!1,minValue:z,maxValue:B,allowDecimals:F=!0,noDataText:H,onValueChange:q,enableLegendSlider:W=!1,customTooltip:K,rotateLabelX:U,tickGap:V=5,className:G}=e,X=(0,r._T)(e,["data","categories","index","colors","valueFormatter","layout","stack","relative","startEndOnly","animationDuration","showAnimation","showXAxis","showYAxis","yAxisWidth","intervalType","showTooltip","showLegend","showGridLines","autoMinValue","minValue","maxValue","allowDecimals","noDataText","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap","className"]),$=I||R?20:0,[Y,Q]=(0,c.useState)(60),J=(0,S.me)(u,k),[ee,et]=c.useState(void 0),[en,er]=(0,c.useState)(void 0),eo=!!q;function ei(e,t,n){var r,o,i,a;n.stopPropagation(),q&&((0,S.vZ)(ee,Object.assign(Object.assign({},e.payload),{value:e.value}))?(er(void 0),et(void 0),null==q||q(null)):(er(null===(o=null===(r=e.tooltipPayload)||void 0===r?void 0:r[0])||void 0===o?void 0:o.dataKey),et(Object.assign(Object.assign({},e.payload),{value:e.value})),null==q||q(Object.assign({eventType:"bar",categoryClicked:null===(a=null===(i=e.tooltipPayload)||void 0===i?void 0:i[0])||void 0===a?void 0:a.dataKey},e.payload))))}let ea=(0,S.i4)(L,z,B);return c.createElement("div",Object.assign({ref:t,className:(0,a.q)("w-full h-80",G)},X),c.createElement(s.h,{className:"h-full w-full"},(null==n?void 0:n.length)?c.createElement(m,{data:n,stackOffset:O?"sign":j?"expand":"none",layout:"vertical"===C?"vertical":"horizontal",onClick:eo&&(en||ee)?()=>{et(void 0),er(void 0),null==q||q(null)}:void 0},Z?c.createElement(g.q,{className:(0,a.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:"vertical"!==C,vertical:"vertical"===C}):null,"vertical"!==C?c.createElement(f.K,{padding:{left:$,right:$},hide:!I,dataKey:h,interval:P?"preserveStartEnd":A,tick:{transform:"translate(0, 6)"},ticks:P?[n[0][h],n[n.length-1][h]]:void 0,fill:"",stroke:"",className:(0,a.q)("mt-4 text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,angle:null==U?void 0:U.angle,dy:null==U?void 0:U.verticalShift,height:null==U?void 0:U.xAxisHeight,minTickGap:V}):c.createElement(f.K,{hide:!I,type:"number",tick:{transform:"translate(-3, 0)"},domain:ea,fill:"",stroke:"",className:(0,a.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,tickFormatter:E,minTickGap:V,allowDecimals:F,angle:null==U?void 0:U.angle,dy:null==U?void 0:U.verticalShift,height:null==U?void 0:U.xAxisHeight}),"vertical"!==C?c.createElement(p.B,{width:T,hide:!R,axisLine:!1,tickLine:!1,type:"number",domain:ea,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,a.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:j?e=>"".concat((100*e).toString()," %"):E,allowDecimals:F}):c.createElement(p.B,{width:T,hide:!R,dataKey:h,axisLine:!1,tickLine:!1,ticks:P?[n[0][h],n[n.length-1][h]]:void 0,type:"category",interval:"preserveStartEnd",tick:{transform:"translate(0, 6)"},fill:"",stroke:"",className:(0,a.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content")}),c.createElement(v.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{fill:"#d1d5db",opacity:"0.15"},content:_?e=>{let{active:t,payload:n,label:r}=e;return K?c.createElement(K,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=J.get(e.dataKey))&&void 0!==t?t:o.fr.Gray})}),active:t,label:r}):c.createElement(x.ZP,{active:t,payload:n,label:r,valueFormatter:E,categoryColors:J})}:c.createElement(c.Fragment,null),position:{y:0}}),D?c.createElement(y.D,{verticalAlign:"top",height:Y,content:e=>{let{payload:t}=e;return(0,b.Z)({payload:t},J,Q,en,eo?e=>{eo&&(e!==en||ee?(er(e),null==q||q({eventType:"category",categoryClicked:e})):(er(void 0),null==q||q(null)),et(void 0))}:void 0,W)}}):null,u.map(e=>{var t;return c.createElement(d.$,{className:(0,a.q)((0,l.bM)(null!==(t=J.get(e))&&void 0!==t?t:o.fr.Gray,i.K.background).fillColor,q?"cursor-pointer":""),key:e,name:e,type:"linear",stackId:O||j?"a":void 0,dataKey:e,fill:"",isAnimationActive:N,animationDuration:M,shape:e=>((e,t,n,r)=>{let{fillOpacity:o,name:i,payload:a,value:l}=e,{x:s,width:u,y:d,height:f}=e;return"horizontal"===r&&f<0?(d+=f,f=Math.abs(f)):"vertical"===r&&u<0&&(s+=u,u=Math.abs(u)),c.createElement("rect",{x:s,y:d,width:u,height:f,opacity:t||n&&n!==i?(0,S.vZ)(t,Object.assign(Object.assign({},a),{value:l}))?o:.3:o})})(e,ee,en,C),onClick:ei})})):c.createElement(w.Z,{noDataText:H})))});k.displayName="BarChart"},14042:function(e,t,n){"use strict";n.d(t,{Z:function(){return ez}});var r=n(5853),o=n(7084),i=n(26898),a=n(65954),l=n(1153),c=n(2265),s=n(60474),u=n(47625),d=n(93765),f=n(86757),p=n.n(f),h=n(9841),m=n(81889),g=n(61994),v=n(82944),y=["points","className","baseLinePoints","connectNulls"];function b(){return(b=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&void 0!==arguments[0]?arguments[0]:[],t=[[]];return e.forEach(function(e){S(e)?t[t.length-1].push(e):t[t.length-1].length>0&&t.push([])}),S(e[0])&&t[t.length-1].push(e[0]),t[t.length-1].length<=0&&(t=t.slice(0,-1)),t},E=function(e,t){var n=k(e);t&&(n=[n.reduce(function(e,t){return[].concat(x(e),x(t))},[])]);var r=n.map(function(e){return e.reduce(function(e,t,n){return"".concat(e).concat(0===n?"M":"L").concat(t.x,",").concat(t.y)},"")}).join("");return 1===n.length?"".concat(r,"Z"):r},C=function(e,t,n){var r=E(e,n);return"".concat("Z"===r.slice(-1)?r.slice(0,-1):r,"L").concat(E(t.reverse(),n).slice(1))},O=function(e){var t=e.points,n=e.className,r=e.baseLinePoints,o=e.connectNulls,i=function(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(e,y);if(!t||!t.length)return null;var a=(0,g.Z)("recharts-polygon",n);if(r&&r.length){var l=i.stroke&&"none"!==i.stroke,s=C(t,r,o);return c.createElement("g",{className:a},c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"Z"===s.slice(-1)?i.fill:"none",stroke:"none",d:s})),l?c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"none",d:E(t,o)})):null,l?c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"none",d:E(r,o)})):null)}var u=E(t,o);return c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"Z"===u.slice(-1)?i.fill:"none",className:a,d:u}))},j=n(58811),P=n(41637),M=n(39206);function N(e){return(N="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function I(){return(I=Object.assign?Object.assign.bind():function(e){for(var t=1;t1e-5?"outer"===t?"start":"end":n<-.00001?"outer"===t?"end":"start":"middle"}},{key:"renderAxisLine",value:function(){var e=this.props,t=e.cx,n=e.cy,r=e.radius,o=e.axisLine,i=e.axisLineType,a=T(T({},(0,v.L6)(this.props,!1)),{},{fill:"none"},(0,v.L6)(o,!1));if("circle"===i)return c.createElement(m.o,I({className:"recharts-polar-angle-axis-line"},a,{cx:t,cy:n,r:r}));var l=this.props.ticks.map(function(e){return(0,M.op)(t,n,r,e.coordinate)});return c.createElement(O,I({className:"recharts-polar-angle-axis-line"},a,{points:l}))}},{key:"renderTicks",value:function(){var e=this,t=this.props,n=t.ticks,r=t.tick,o=t.tickLine,a=t.tickFormatter,l=t.stroke,s=(0,v.L6)(this.props,!1),u=(0,v.L6)(r,!1),d=T(T({},s),{},{fill:"none"},(0,v.L6)(o,!1)),f=n.map(function(t,n){var f=e.getTickLineCoord(t),p=T(T(T({textAnchor:e.getTickTextAnchor(t)},s),{},{stroke:"none",fill:l},u),{},{index:n,payload:t,x:f.x2,y:f.y2});return c.createElement(h.m,I({className:"recharts-polar-angle-axis-tick",key:"tick-".concat(t.coordinate)},(0,P.bw)(e.props,t,n)),o&&c.createElement("line",I({className:"recharts-polar-angle-axis-tick-line"},d,f)),r&&i.renderTickItem(r,p,a?a(t.value,n):t.value))});return c.createElement(h.m,{className:"recharts-polar-angle-axis-ticks"},f)}},{key:"render",value:function(){var e=this.props,t=e.ticks,n=e.radius,r=e.axisLine;return!(n<=0)&&t&&t.length?c.createElement(h.m,{className:"recharts-polar-angle-axis"},r&&this.renderAxisLine(),this.renderTicks()):null}}],r=[{key:"renderTickItem",value:function(e,t,n){return c.isValidElement(e)?c.cloneElement(e,t):p()(e)?e(t):c.createElement(j.x,I({},t,{className:"recharts-polar-angle-axis-tick-value"}),n)}}],n&&A(i.prototype,n),r&&A(i,r),Object.defineProperty(i,"prototype",{writable:!1}),i}(c.PureComponent);Z(B,"displayName","PolarAngleAxis"),Z(B,"axisType","angleAxis"),Z(B,"defaultProps",{type:"category",angleAxisId:0,scale:"auto",cx:0,cy:0,orientation:"outer",axisLine:!0,tickLine:!0,tickSize:8,tick:!0,hide:!1,allowDuplicatedCategory:!0});var F=n(35802),H=n.n(F),q=n(37891),W=n.n(q),K=n(26680),U=["cx","cy","angle","ticks","axisLine"],V=["ticks","tick","angle","tickFormatter","stroke"];function G(e){return(G="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function X(){return(X=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function J(e,t){for(var n=0;n0?el()(e,"paddingAngle",0):0;if(n){var l=(0,eg.k4)(n.endAngle-n.startAngle,e.endAngle-e.startAngle),c=ek(ek({},e),{},{startAngle:i+a,endAngle:i+l(r)+a});o.push(c),i=c.endAngle}else{var s=e.endAngle,d=e.startAngle,f=(0,eg.k4)(0,s-d)(r),p=ek(ek({},e),{},{startAngle:i+a,endAngle:i+f+a});o.push(p),i=p.endAngle}}),c.createElement(h.m,null,e.renderSectorsStatically(o))})}},{key:"attachKeyboardHandlers",value:function(e){var t=this;e.onkeydown=function(e){if(!e.altKey)switch(e.key){case"ArrowLeft":var n=++t.state.sectorToFocus%t.sectorRefs.length;t.sectorRefs[n].focus(),t.setState({sectorToFocus:n});break;case"ArrowRight":var r=--t.state.sectorToFocus<0?t.sectorRefs.length-1:t.state.sectorToFocus%t.sectorRefs.length;t.sectorRefs[r].focus(),t.setState({sectorToFocus:r});break;case"Escape":t.sectorRefs[t.state.sectorToFocus].blur(),t.setState({sectorToFocus:0})}}}},{key:"renderSectors",value:function(){var e=this.props,t=e.sectors,n=e.isAnimationActive,r=this.state.prevSectors;return n&&t&&t.length&&(!r||!es()(r,t))?this.renderSectorsWithAnimation():this.renderSectorsStatically(t)}},{key:"componentDidMount",value:function(){this.pieRef&&this.attachKeyboardHandlers(this.pieRef)}},{key:"render",value:function(){var e=this,t=this.props,n=t.hide,r=t.sectors,o=t.className,i=t.label,a=t.cx,l=t.cy,s=t.innerRadius,u=t.outerRadius,d=t.isAnimationActive,f=this.state.isAnimationFinished;if(n||!r||!r.length||!(0,eg.hj)(a)||!(0,eg.hj)(l)||!(0,eg.hj)(s)||!(0,eg.hj)(u))return null;var p=(0,g.Z)("recharts-pie",o);return c.createElement(h.m,{tabIndex:this.props.rootTabIndex,className:p,ref:function(t){e.pieRef=t}},this.renderSectors(),i&&this.renderLabels(r),K._.renderCallByParent(this.props,null,!1),(!d||f)&&ep.e.renderCallByParent(this.props,r,!1))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return t.prevIsAnimationActive!==e.isAnimationActive?{prevIsAnimationActive:e.isAnimationActive,prevAnimationId:e.animationId,curSectors:e.sectors,prevSectors:[],isAnimationFinished:!0}:e.isAnimationActive&&e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curSectors:e.sectors,prevSectors:t.curSectors,isAnimationFinished:!0}:e.sectors!==t.curSectors?{curSectors:e.sectors,isAnimationFinished:!0}:null}},{key:"getTextAnchor",value:function(e,t){return e>t?"start":e=360?x:x-1)*u,S=i.reduce(function(e,t){var n=(0,ev.F$)(t,b,0);return e+((0,eg.hj)(n)?n:0)},0);return S>0&&(t=i.map(function(e,t){var r,o=(0,ev.F$)(e,b,0),i=(0,ev.F$)(e,f,t),a=((0,eg.hj)(o)?o:0)/S,s=(r=t?n.endAngle+(0,eg.uY)(v)*u*(0!==o?1:0):c)+(0,eg.uY)(v)*((0!==o?m:0)+a*w),d=(r+s)/2,p=(g.innerRadius+g.outerRadius)/2,y=[{name:i,value:o,payload:e,dataKey:b,type:h}],x=(0,M.op)(g.cx,g.cy,p,d);return n=ek(ek(ek({percent:a,cornerRadius:l,name:i,tooltipPayload:y,midAngle:d,middleRadius:p,tooltipPosition:x},e),g),{},{value:(0,ev.F$)(e,b),startAngle:r,endAngle:s,payload:e,paddingAngle:(0,eg.uY)(v)*u})})),ek(ek({},g),{},{sectors:t,data:i})});var eI=(0,d.z)({chartName:"PieChart",GraphicalChild:eN,validateTooltipEventTypes:["item"],defaultTooltipEventType:"item",legendContent:"children",axisComponents:[{axisType:"angleAxis",AxisComp:B},{axisType:"radiusAxis",AxisComp:eo}],formatAxisMap:M.t9,defaultProps:{layout:"centric",startAngle:0,endAngle:360,cx:"50%",cy:"50%",innerRadius:0,outerRadius:"80%"}}),eR=n(8147),eT=n(69448),eA=n(98593);let e_=e=>{let{active:t,payload:n,valueFormatter:r}=e;if(t&&(null==n?void 0:n[0])){let e=null==n?void 0:n[0];return c.createElement(eA.$B,null,c.createElement("div",{className:(0,a.q)("px-4 py-2")},c.createElement(eA.zX,{value:r(e.value),name:e.name,color:e.payload.color})))}return null},eD=(e,t)=>e.map((e,n)=>{let r=ne||t((0,l.vP)(n.map(e=>e[r]))),eL=e=>{let{cx:t,cy:n,innerRadius:r,outerRadius:o,startAngle:i,endAngle:a,className:l}=e;return c.createElement("g",null,c.createElement(s.L,{cx:t,cy:n,innerRadius:r,outerRadius:o,startAngle:i,endAngle:a,className:l,fill:"",opacity:.3,style:{outline:"none"}}))},ez=c.forwardRef((e,t)=>{let{data:n=[],category:s="value",index:d="name",colors:f=i.s,variant:p="donut",valueFormatter:h=l.Cj,label:m,showLabel:g=!0,animationDuration:v=900,showAnimation:y=!1,showTooltip:b=!0,noDataText:x,onValueChange:w,customTooltip:S,className:k}=e,E=(0,r._T)(e,["data","category","index","colors","variant","valueFormatter","label","showLabel","animationDuration","showAnimation","showTooltip","noDataText","onValueChange","customTooltip","className"]),C="donut"==p,O=eZ(m,h,n,s),[j,P]=c.useState(void 0),M=!!w;return(0,c.useEffect)(()=>{let e=document.querySelectorAll(".recharts-pie-sector");e&&e.forEach(e=>{e.setAttribute("style","outline: none")})},[j]),c.createElement("div",Object.assign({ref:t,className:(0,a.q)("w-full h-40",k)},E),c.createElement(u.h,{className:"h-full w-full"},(null==n?void 0:n.length)?c.createElement(eI,{onClick:M&&j?()=>{P(void 0),null==w||w(null)}:void 0,margin:{top:0,left:0,right:0,bottom:0}},g&&C?c.createElement("text",{className:(0,a.q)("fill-tremor-content-emphasis","dark:fill-dark-tremor-content-emphasis"),x:"50%",y:"50%",textAnchor:"middle",dominantBaseline:"middle"},O):null,c.createElement(eN,{className:(0,a.q)("stroke-tremor-background dark:stroke-dark-tremor-background",w?"cursor-pointer":"cursor-default"),data:eD(n,f),cx:"50%",cy:"50%",startAngle:90,endAngle:-270,innerRadius:C?"75%":"0%",outerRadius:"100%",stroke:"",strokeLinejoin:"round",dataKey:s,nameKey:d,isAnimationActive:y,animationDuration:v,onClick:function(e,t,n){n.stopPropagation(),M&&(j===t?(P(void 0),null==w||w(null)):(P(t),null==w||w(Object.assign({eventType:"slice"},e.payload.payload))))},activeIndex:j,inactiveShape:eL,style:{outline:"none"}}),c.createElement(eR.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,content:b?e=>{var t;let{active:n,payload:r}=e;return S?c.createElement(S,{payload:null==r?void 0:r.map(e=>{var t,n,i;return Object.assign(Object.assign({},e),{color:null!==(i=null===(n=null===(t=null==r?void 0:r[0])||void 0===t?void 0:t.payload)||void 0===n?void 0:n.color)&&void 0!==i?i:o.fr.Gray})}),active:n,label:null===(t=null==r?void 0:r[0])||void 0===t?void 0:t.name}):c.createElement(e_,{active:n,payload:r,valueFormatter:h})}:c.createElement(c.Fragment,null)})):c.createElement(eT.Z,{noDataText:x})))});ez.displayName="DonutChart"},65278:function(e,t,n){"use strict";n.d(t,{Z:function(){return m}});var r=n(2265);let o=(e,t)=>{let[n,o]=(0,r.useState)(t);(0,r.useEffect)(()=>{let t=()=>{o(window.innerWidth),e()};return t(),window.addEventListener("resize",t),()=>window.removeEventListener("resize",t)},[e,n])};var i=n(5853),a=n(26898),l=n(65954),c=n(1153);let s=e=>{var t=(0,i._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M8 12L14 6V18L8 12Z"}))},u=e=>{var t=(0,i._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M16 12L10 18V6L16 12Z"}))},d=(0,c.fn)("Legend"),f=e=>{let{name:t,color:n,onClick:o,activeLegend:i}=e,s=!!o;return r.createElement("li",{className:(0,l.q)(d("legendItem"),"group inline-flex items-center px-2 py-0.5 rounded-tremor-small transition whitespace-nowrap",s?"cursor-pointer":"cursor-default","text-tremor-content",s?"hover:bg-tremor-background-subtle":"","dark:text-dark-tremor-content",s?"dark:hover:bg-dark-tremor-background-subtle":""),onClick:e=>{e.stopPropagation(),null==o||o(t,n)}},r.createElement("svg",{className:(0,l.q)("flex-none h-2 w-2 mr-1.5",(0,c.bM)(n,a.K.text).textColor,i&&i!==t?"opacity-40":"opacity-100"),fill:"currentColor",viewBox:"0 0 8 8"},r.createElement("circle",{cx:4,cy:4,r:4})),r.createElement("p",{className:(0,l.q)("whitespace-nowrap truncate text-tremor-default","text-tremor-content",s?"group-hover:text-tremor-content-emphasis":"","dark:text-dark-tremor-content",i&&i!==t?"opacity-40":"opacity-100",s?"dark:group-hover:text-dark-tremor-content-emphasis":"")},t))},p=e=>{let{icon:t,onClick:n,disabled:o}=e,[i,a]=r.useState(!1),c=r.useRef(null);return r.useEffect(()=>(i?c.current=setInterval(()=>{null==n||n()},300):clearInterval(c.current),()=>clearInterval(c.current)),[i,n]),(0,r.useEffect)(()=>{o&&(clearInterval(c.current),a(!1))},[o]),r.createElement("button",{type:"button",className:(0,l.q)(d("legendSliderButton"),"w-5 group inline-flex items-center truncate rounded-tremor-small transition",o?"cursor-not-allowed":"cursor-pointer",o?"text-tremor-content-subtle":"text-tremor-content hover:text-tremor-content-emphasis hover:bg-tremor-background-subtle",o?"dark:text-dark-tremor-subtle":"dark:text-dark-tremor dark:hover:text-tremor-content-emphasis dark:hover:bg-dark-tremor-background-subtle"),disabled:o,onClick:e=>{e.stopPropagation(),null==n||n()},onMouseDown:e=>{e.stopPropagation(),a(!0)},onMouseUp:e=>{e.stopPropagation(),a(!1)}},r.createElement(t,{className:"w-full"}))},h=r.forwardRef((e,t)=>{var n,o;let{categories:c,colors:h=a.s,className:m,onClickLegendItem:g,activeLegend:v,enableLegendSlider:y=!1}=e,b=(0,i._T)(e,["categories","colors","className","onClickLegendItem","activeLegend","enableLegendSlider"]),x=r.useRef(null),[w,S]=r.useState(null),[k,E]=r.useState(null),C=r.useRef(null),O=(0,r.useCallback)(()=>{let e=null==x?void 0:x.current;e&&S({left:e.scrollLeft>0,right:e.scrollWidth-e.clientWidth>e.scrollLeft})},[S]),j=(0,r.useCallback)(e=>{var t;let n=null==x?void 0:x.current,r=null!==(t=null==n?void 0:n.clientWidth)&&void 0!==t?t:0;n&&y&&(n.scrollTo({left:"left"===e?n.scrollLeft-r:n.scrollLeft+r,behavior:"smooth"}),setTimeout(()=>{O()},400))},[y,O]);r.useEffect(()=>{let e=e=>{"ArrowLeft"===e?j("left"):"ArrowRight"===e&&j("right")};return k?(e(k),C.current=setInterval(()=>{e(k)},300)):clearInterval(C.current),()=>clearInterval(C.current)},[k,j]);let P=e=>{e.stopPropagation(),"ArrowLeft"!==e.key&&"ArrowRight"!==e.key||(e.preventDefault(),E(e.key))},M=e=>{e.stopPropagation(),E(null)};return r.useEffect(()=>{let e=null==x?void 0:x.current;return y&&(O(),null==e||e.addEventListener("keydown",P),null==e||e.addEventListener("keyup",M)),()=>{null==e||e.removeEventListener("keydown",P),null==e||e.removeEventListener("keyup",M)}},[O,y]),r.createElement("ol",Object.assign({ref:t,className:(0,l.q)(d("root"),"relative overflow-hidden",m)},b),r.createElement("div",{ref:x,tabIndex:0,className:(0,l.q)("h-full flex",y?(null==w?void 0:w.right)||(null==w?void 0:w.left)?"pl-4 pr-12 items-center overflow-auto snap-mandatory [&::-webkit-scrollbar]:hidden [scrollbar-width:none]":"":"flex-wrap")},c.map((e,t)=>r.createElement(f,{key:"item-".concat(t),name:e,color:h[t],onClick:g,activeLegend:v}))),y&&((null==w?void 0:w.right)||(null==w?void 0:w.left))?r.createElement(r.Fragment,null,r.createElement("div",{className:(0,l.q)("from-tremor-background","dark:from-dark-tremor-background","absolute top-0 bottom-0 left-0 w-4 bg-gradient-to-r to-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("to-tremor-background","dark:to-dark-tremor-background","absolute top-0 bottom-0 right-10 w-4 bg-gradient-to-r from-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("bg-tremor-background","dark:bg-dark-tremor-background","absolute flex top-0 pr-1 bottom-0 right-0 items-center justify-center h-full")},r.createElement(p,{icon:s,onClick:()=>{E(null),j("left")},disabled:!(null==w?void 0:w.left)}),r.createElement(p,{icon:u,onClick:()=>{E(null),j("right")},disabled:!(null==w?void 0:w.right)}))):null)});h.displayName="Legend";let m=(e,t,n,i,a,l)=>{let{payload:c}=e,s=(0,r.useRef)(null);o(()=>{var e,t;n((t=null===(e=s.current)||void 0===e?void 0:e.clientHeight)?Number(t)+20:60)});let u=c.filter(e=>"none"!==e.type);return r.createElement("div",{ref:s,className:"flex items-center justify-end"},r.createElement(h,{categories:u.map(e=>e.value),colors:u.map(e=>t.get(e.value)),onClickLegendItem:a,activeLegend:i,enableLegendSlider:l}))}},98593:function(e,t,n){"use strict";n.d(t,{$B:function(){return c},ZP:function(){return u},zX:function(){return s}});var r=n(2265),o=n(7084),i=n(26898),a=n(65954),l=n(1153);let c=e=>{let{children:t}=e;return r.createElement("div",{className:(0,a.q)("rounded-tremor-default text-tremor-default border","bg-tremor-background shadow-tremor-dropdown border-tremor-border","dark:bg-dark-tremor-background dark:shadow-dark-tremor-dropdown dark:border-dark-tremor-border")},t)},s=e=>{let{value:t,name:n,color:o}=e;return r.createElement("div",{className:"flex items-center justify-between space-x-8"},r.createElement("div",{className:"flex items-center space-x-2"},r.createElement("span",{className:(0,a.q)("shrink-0 rounded-tremor-full border-2 h-3 w-3","border-tremor-background shadow-tremor-card","dark:border-dark-tremor-background dark:shadow-dark-tremor-card",(0,l.bM)(o,i.K.background).bgColor)}),r.createElement("p",{className:(0,a.q)("text-right whitespace-nowrap","text-tremor-content","dark:text-dark-tremor-content")},n)),r.createElement("p",{className:(0,a.q)("font-medium tabular-nums text-right whitespace-nowrap","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},t))},u=e=>{let{active:t,payload:n,label:i,categoryColors:l,valueFormatter:u}=e;if(t&&n){let e=n.filter(e=>"none"!==e.type);return r.createElement(c,null,r.createElement("div",{className:(0,a.q)("border-tremor-border border-b px-4 py-2","dark:border-dark-tremor-border")},r.createElement("p",{className:(0,a.q)("font-medium","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},i)),r.createElement("div",{className:(0,a.q)("px-4 py-2 space-y-1")},e.map((e,t)=>{var n;let{value:i,name:a}=e;return r.createElement(s,{key:"id-".concat(t),value:u(i),name:a,color:null!==(n=l.get(a))&&void 0!==n?n:o.fr.Blue})})))}return null}},69448:function(e,t,n){"use strict";n.d(t,{Z:function(){return f}});var r=n(65954),o=n(2265),i=n(5853);let a=(0,n(1153).fn)("Flex"),l={start:"justify-start",end:"justify-end",center:"justify-center",between:"justify-between",around:"justify-around",evenly:"justify-evenly"},c={start:"items-start",end:"items-end",center:"items-center",baseline:"items-baseline",stretch:"items-stretch"},s={row:"flex-row",col:"flex-col","row-reverse":"flex-row-reverse","col-reverse":"flex-col-reverse"},u=o.forwardRef((e,t)=>{let{flexDirection:n="row",justifyContent:u="between",alignItems:d="center",children:f,className:p}=e,h=(0,i._T)(e,["flexDirection","justifyContent","alignItems","children","className"]);return o.createElement("div",Object.assign({ref:t,className:(0,r.q)(a("root"),"flex w-full",s[n],l[u],c[d],p)},h),f)});u.displayName="Flex";var d=n(84264);let f=e=>{let{noDataText:t="No data"}=e;return o.createElement(u,{alignItems:"center",justifyContent:"center",className:(0,r.q)("w-full h-full border border-dashed rounded-tremor-default","border-tremor-border","dark:border-dark-tremor-border")},o.createElement(d.Z,{className:(0,r.q)("text-tremor-content","dark:text-dark-tremor-content")},t))}},32644:function(e,t,n){"use strict";n.d(t,{FB:function(){return i},i4:function(){return o},me:function(){return r},vZ:function(){return function e(t,n){if(t===n)return!0;if("object"!=typeof t||"object"!=typeof n||null===t||null===n)return!1;let r=Object.keys(t),o=Object.keys(n);if(r.length!==o.length)return!1;for(let i of r)if(!o.includes(i)||!e(t[i],n[i]))return!1;return!0}}});let r=(e,t)=>{let n=new Map;return e.forEach((e,r)=>{n.set(e,t[r])}),n},o=(e,t,n)=>[e?"auto":null!=t?t:0,null!=n?n:"auto"];function i(e,t){let n=[];for(let r of e)if(Object.prototype.hasOwnProperty.call(r,t)&&(n.push(r[t]),n.length>1))return!1;return!0}},41649:function(e,t,n){"use strict";n.d(t,{Z:function(){return p}});var r=n(5853),o=n(2265),i=n(1526),a=n(7084),l=n(26898),c=n(65954),s=n(1153);let u={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},d={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},f=(0,s.fn)("Badge"),p=o.forwardRef((e,t)=>{let{color:n,icon:p,size:h=a.u8.SM,tooltip:m,className:g,children:v}=e,y=(0,r._T)(e,["color","icon","size","tooltip","className","children"]),b=p||null,{tooltipProps:x,getReferenceProps:w}=(0,i.l)();return o.createElement("span",Object.assign({ref:(0,s.lq)([t,x.refs.setReference]),className:(0,c.q)(f("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full",n?(0,c.q)((0,s.bM)(n,l.K.background).bgColor,(0,s.bM)(n,l.K.text).textColor,"bg-opacity-20 dark:bg-opacity-25"):(0,c.q)("bg-tremor-brand-muted text-tremor-brand-emphasis","dark:bg-dark-tremor-brand-muted dark:text-dark-tremor-brand-emphasis"),u[h].paddingX,u[h].paddingY,u[h].fontSize,g)},w,y),o.createElement(i.Z,Object.assign({text:m},x)),b?o.createElement(b,{className:(0,c.q)(f("icon"),"shrink-0 -ml-1 mr-1.5",d[h].height,d[h].width)}):null,o.createElement("p",{className:(0,c.q)(f("text"),"text-sm whitespace-nowrap")},v))});p.displayName="Badge"},47323:function(e,t,n){"use strict";n.d(t,{Z:function(){return m}});var r=n(5853),o=n(2265),i=n(1526),a=n(7084),l=n(65954),c=n(1153),s=n(26898);let u={xs:{paddingX:"px-1.5",paddingY:"py-1.5"},sm:{paddingX:"px-1.5",paddingY:"py-1.5"},md:{paddingX:"px-2",paddingY:"py-2"},lg:{paddingX:"px-2",paddingY:"py-2"},xl:{paddingX:"px-2.5",paddingY:"py-2.5"}},d={xs:{height:"h-3",width:"w-3"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-7",width:"w-7"},xl:{height:"h-9",width:"w-9"}},f={simple:{rounded:"",border:"",ring:"",shadow:""},light:{rounded:"rounded-tremor-default",border:"",ring:"",shadow:""},shadow:{rounded:"rounded-tremor-default",border:"border",ring:"",shadow:"shadow-tremor-card dark:shadow-dark-tremor-card"},solid:{rounded:"rounded-tremor-default",border:"border-2",ring:"ring-1",shadow:""},outlined:{rounded:"rounded-tremor-default",border:"border",ring:"ring-2",shadow:""}},p=(e,t)=>{switch(e){case"simple":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:"",borderColor:"",ringColor:""};case"light":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand-muted dark:bg-dark-tremor-brand-muted",borderColor:"",ringColor:""};case"shadow":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:"border-tremor-border dark:border-dark-tremor-border",ringColor:""};case"solid":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand dark:bg-dark-tremor-brand",borderColor:"border-tremor-brand-inverted dark:border-dark-tremor-brand-inverted",ringColor:"ring-tremor-ring dark:ring-dark-tremor-ring"};case"outlined":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:t?(0,c.bM)(t,s.K.ring).borderColor:"border-tremor-brand-subtle dark:border-dark-tremor-brand-subtle",ringColor:t?(0,l.q)((0,c.bM)(t,s.K.ring).ringColor,"ring-opacity-40"):"ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted"}}},h=(0,c.fn)("Icon"),m=o.forwardRef((e,t)=>{let{icon:n,variant:s="simple",tooltip:m,size:g=a.u8.SM,color:v,className:y}=e,b=(0,r._T)(e,["icon","variant","tooltip","size","color","className"]),x=p(s,v),{tooltipProps:w,getReferenceProps:S}=(0,i.l)();return o.createElement("span",Object.assign({ref:(0,c.lq)([t,w.refs.setReference]),className:(0,l.q)(h("root"),"inline-flex flex-shrink-0 items-center",x.bgColor,x.textColor,x.borderColor,x.ringColor,f[s].rounded,f[s].border,f[s].shadow,f[s].ring,u[g].paddingX,u[g].paddingY,y)},S,b),o.createElement(i.Z,Object.assign({text:m},w)),o.createElement(n,{className:(0,l.q)(h("icon"),"shrink-0",d[g].height,d[g].width)}))});m.displayName="Icon"},53003:function(e,t,n){"use strict";let r,o,i;n.d(t,{Z:function(){return nF}});var a,l,c,s,u=n(5853),d=n(2265),f=n(54887),p=n(13323),h=n(64518),m=n(96822),g=n(40293);function v(){for(var e=arguments.length,t=Array(e),n=0;n(0,g.r)(...t),[...t])}var y=n(72238),b=n(93689);let x=(0,d.createContext)(!1);var w=n(61424),S=n(27847);let k=d.Fragment,E=d.Fragment,C=(0,d.createContext)(null),O=(0,d.createContext)(null);Object.assign((0,S.yV)(function(e,t){var n;let r,o,i=(0,d.useRef)(null),a=(0,b.T)((0,b.h)(e=>{i.current=e}),t),l=v(i),c=function(e){let t=(0,d.useContext)(x),n=(0,d.useContext)(C),r=v(e),[o,i]=(0,d.useState)(()=>{if(!t&&null!==n||w.O.isServer)return null;let e=null==r?void 0:r.getElementById("headlessui-portal-root");if(e)return e;if(null===r)return null;let o=r.createElement("div");return o.setAttribute("id","headlessui-portal-root"),r.body.appendChild(o)});return(0,d.useEffect)(()=>{null!==o&&(null!=r&&r.body.contains(o)||null==r||r.body.appendChild(o))},[o,r]),(0,d.useEffect)(()=>{t||null!==n&&i(n.current)},[n,i,t]),o}(i),[s]=(0,d.useState)(()=>{var e;return w.O.isServer?null:null!=(e=null==l?void 0:l.createElement("div"))?e:null}),u=(0,d.useContext)(O),g=(0,y.H)();return(0,h.e)(()=>{!c||!s||c.contains(s)||(s.setAttribute("data-headlessui-portal",""),c.appendChild(s))},[c,s]),(0,h.e)(()=>{if(s&&u)return u.register(s)},[u,s]),n=()=>{var e;c&&s&&(s instanceof Node&&c.contains(s)&&c.removeChild(s),c.childNodes.length<=0&&(null==(e=c.parentElement)||e.removeChild(c)))},r=(0,p.z)(n),o=(0,d.useRef)(!1),(0,d.useEffect)(()=>(o.current=!1,()=>{o.current=!0,(0,m.Y)(()=>{o.current&&r()})}),[r]),g&&c&&s?(0,f.createPortal)((0,S.sY)({ourProps:{ref:a},theirProps:e,defaultTag:k,name:"Portal"}),s):null}),{Group:(0,S.yV)(function(e,t){let{target:n,...r}=e,o={ref:(0,b.T)(t)};return d.createElement(C.Provider,{value:n},(0,S.sY)({ourProps:o,theirProps:r,defaultTag:E,name:"Popover.Group"}))})});var j=n(31948),P=n(17684),M=n(98505),N=n(80004),I=n(38198),R=n(3141),T=((r=T||{})[r.Forwards=0]="Forwards",r[r.Backwards=1]="Backwards",r);function A(){let e=(0,d.useRef)(0);return(0,R.s)("keydown",t=>{"Tab"===t.key&&(e.current=t.shiftKey?1:0)},!0),e}var _=n(37863),D=n(47634),Z=n(37105),L=n(24536),z=n(37388),B=((o=B||{})[o.Open=0]="Open",o[o.Closed=1]="Closed",o),F=((i=F||{})[i.TogglePopover=0]="TogglePopover",i[i.ClosePopover=1]="ClosePopover",i[i.SetButton=2]="SetButton",i[i.SetButtonId=3]="SetButtonId",i[i.SetPanel=4]="SetPanel",i[i.SetPanelId=5]="SetPanelId",i);let H={0:e=>{let t={...e,popoverState:(0,L.E)(e.popoverState,{0:1,1:0})};return 0===t.popoverState&&(t.__demoMode=!1),t},1:e=>1===e.popoverState?e:{...e,popoverState:1},2:(e,t)=>e.button===t.button?e:{...e,button:t.button},3:(e,t)=>e.buttonId===t.buttonId?e:{...e,buttonId:t.buttonId},4:(e,t)=>e.panel===t.panel?e:{...e,panel:t.panel},5:(e,t)=>e.panelId===t.panelId?e:{...e,panelId:t.panelId}},q=(0,d.createContext)(null);function W(e){let t=(0,d.useContext)(q);if(null===t){let t=Error("<".concat(e," /> is missing a parent component."));throw Error.captureStackTrace&&Error.captureStackTrace(t,W),t}return t}q.displayName="PopoverContext";let K=(0,d.createContext)(null);function U(e){let t=(0,d.useContext)(K);if(null===t){let t=Error("<".concat(e," /> is missing a parent component."));throw Error.captureStackTrace&&Error.captureStackTrace(t,U),t}return t}K.displayName="PopoverAPIContext";let V=(0,d.createContext)(null);function G(){return(0,d.useContext)(V)}V.displayName="PopoverGroupContext";let X=(0,d.createContext)(null);function $(e,t){return(0,L.E)(t.type,H,e,t)}X.displayName="PopoverPanelContext";let Y=S.AN.RenderStrategy|S.AN.Static,Q=S.AN.RenderStrategy|S.AN.Static,J=Object.assign((0,S.yV)(function(e,t){var n,r,o,i;let a,l,c,s,u,f;let{__demoMode:h=!1,...m}=e,g=(0,d.useRef)(null),y=(0,b.T)(t,(0,b.h)(e=>{g.current=e})),x=(0,d.useRef)([]),w=(0,d.useReducer)($,{__demoMode:h,popoverState:h?0:1,buttons:x,button:null,buttonId:null,panel:null,panelId:null,beforePanelSentinel:(0,d.createRef)(),afterPanelSentinel:(0,d.createRef)()}),[{popoverState:k,button:E,buttonId:C,panel:P,panelId:N,beforePanelSentinel:R,afterPanelSentinel:T},A]=w,D=v(null!=(n=g.current)?n:E),z=(0,d.useMemo)(()=>{if(!E||!P)return!1;for(let e of document.querySelectorAll("body > *"))if(Number(null==e?void 0:e.contains(E))^Number(null==e?void 0:e.contains(P)))return!0;let e=(0,Z.GO)(),t=e.indexOf(E),n=(t+e.length-1)%e.length,r=(t+1)%e.length,o=e[n],i=e[r];return!P.contains(o)&&!P.contains(i)},[E,P]),B=(0,j.E)(C),F=(0,j.E)(N),H=(0,d.useMemo)(()=>({buttonId:B,panelId:F,close:()=>A({type:1})}),[B,F,A]),W=G(),U=null==W?void 0:W.registerPopover,V=(0,p.z)(()=>{var e;return null!=(e=null==W?void 0:W.isFocusWithinPopoverGroup())?e:(null==D?void 0:D.activeElement)&&((null==E?void 0:E.contains(D.activeElement))||(null==P?void 0:P.contains(D.activeElement)))});(0,d.useEffect)(()=>null==U?void 0:U(H),[U,H]);let[Y,Q]=(a=(0,d.useContext)(O),l=(0,d.useRef)([]),c=(0,p.z)(e=>(l.current.push(e),a&&a.register(e),()=>s(e))),s=(0,p.z)(e=>{let t=l.current.indexOf(e);-1!==t&&l.current.splice(t,1),a&&a.unregister(e)}),u=(0,d.useMemo)(()=>({register:c,unregister:s,portals:l}),[c,s,l]),[l,(0,d.useMemo)(()=>function(e){let{children:t}=e;return d.createElement(O.Provider,{value:u},t)},[u])]),J=function(){var e;let{defaultContainers:t=[],portals:n,mainTreeNodeRef:r}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},o=(0,d.useRef)(null!=(e=null==r?void 0:r.current)?e:null),i=v(o),a=(0,p.z)(()=>{var e,r,a;let l=[];for(let e of t)null!==e&&(e instanceof HTMLElement?l.push(e):"current"in e&&e.current instanceof HTMLElement&&l.push(e.current));if(null!=n&&n.current)for(let e of n.current)l.push(e);for(let t of null!=(e=null==i?void 0:i.querySelectorAll("html > *, body > *"))?e:[])t!==document.body&&t!==document.head&&t instanceof HTMLElement&&"headlessui-portal-root"!==t.id&&(t.contains(o.current)||t.contains(null==(a=null==(r=o.current)?void 0:r.getRootNode())?void 0:a.host)||l.some(e=>t.contains(e))||l.push(t));return l});return{resolveContainers:a,contains:(0,p.z)(e=>a().some(t=>t.contains(e))),mainTreeNodeRef:o,MainTreeNode:(0,d.useMemo)(()=>function(){return null!=r?null:d.createElement(I._,{features:I.A.Hidden,ref:o})},[o,r])}}({mainTreeNodeRef:null==W?void 0:W.mainTreeNodeRef,portals:Y,defaultContainers:[E,P]});r=null==D?void 0:D.defaultView,o="focus",i=e=>{var t,n,r,o;e.target!==window&&e.target instanceof HTMLElement&&0===k&&(V()||E&&P&&(J.contains(e.target)||null!=(n=null==(t=R.current)?void 0:t.contains)&&n.call(t,e.target)||null!=(o=null==(r=T.current)?void 0:r.contains)&&o.call(r,e.target)||A({type:1})))},f=(0,j.E)(i),(0,d.useEffect)(()=>{function e(e){f.current(e)}return(r=null!=r?r:window).addEventListener(o,e,!0),()=>r.removeEventListener(o,e,!0)},[r,o,!0]),(0,M.O)(J.resolveContainers,(e,t)=>{A({type:1}),(0,Z.sP)(t,Z.tJ.Loose)||(e.preventDefault(),null==E||E.focus())},0===k);let ee=(0,p.z)(e=>{A({type:1});let t=e?e instanceof HTMLElement?e:"current"in e&&e.current instanceof HTMLElement?e.current:E:E;null==t||t.focus()}),et=(0,d.useMemo)(()=>({close:ee,isPortalled:z}),[ee,z]),en=(0,d.useMemo)(()=>({open:0===k,close:ee}),[k,ee]);return d.createElement(X.Provider,{value:null},d.createElement(q.Provider,{value:w},d.createElement(K.Provider,{value:et},d.createElement(_.up,{value:(0,L.E)(k,{0:_.ZM.Open,1:_.ZM.Closed})},d.createElement(Q,null,(0,S.sY)({ourProps:{ref:y},theirProps:m,slot:en,defaultTag:"div",name:"Popover"}),d.createElement(J.MainTreeNode,null))))))}),{Button:(0,S.yV)(function(e,t){let n=(0,P.M)(),{id:r="headlessui-popover-button-".concat(n),...o}=e,[i,a]=W("Popover.Button"),{isPortalled:l}=U("Popover.Button"),c=(0,d.useRef)(null),s="headlessui-focus-sentinel-".concat((0,P.M)()),u=G(),f=null==u?void 0:u.closeOthers,h=null!==(0,d.useContext)(X);(0,d.useEffect)(()=>{if(!h)return a({type:3,buttonId:r}),()=>{a({type:3,buttonId:null})}},[h,r,a]);let[m]=(0,d.useState)(()=>Symbol()),g=(0,b.T)(c,t,h?null:e=>{if(e)i.buttons.current.push(m);else{let e=i.buttons.current.indexOf(m);-1!==e&&i.buttons.current.splice(e,1)}i.buttons.current.length>1&&console.warn("You are already using a but only 1 is supported."),e&&a({type:2,button:e})}),y=(0,b.T)(c,t),x=v(c),w=(0,p.z)(e=>{var t,n,r;if(h){if(1===i.popoverState)return;switch(e.key){case z.R.Space:case z.R.Enter:e.preventDefault(),null==(n=(t=e.target).click)||n.call(t),a({type:1}),null==(r=i.button)||r.focus()}}else switch(e.key){case z.R.Space:case z.R.Enter:e.preventDefault(),e.stopPropagation(),1===i.popoverState&&(null==f||f(i.buttonId)),a({type:0});break;case z.R.Escape:if(0!==i.popoverState)return null==f?void 0:f(i.buttonId);if(!c.current||null!=x&&x.activeElement&&!c.current.contains(x.activeElement))return;e.preventDefault(),e.stopPropagation(),a({type:1})}}),k=(0,p.z)(e=>{h||e.key===z.R.Space&&e.preventDefault()}),E=(0,p.z)(t=>{var n,r;(0,D.P)(t.currentTarget)||e.disabled||(h?(a({type:1}),null==(n=i.button)||n.focus()):(t.preventDefault(),t.stopPropagation(),1===i.popoverState&&(null==f||f(i.buttonId)),a({type:0}),null==(r=i.button)||r.focus()))}),C=(0,p.z)(e=>{e.preventDefault(),e.stopPropagation()}),O=0===i.popoverState,j=(0,d.useMemo)(()=>({open:O}),[O]),M=(0,N.f)(e,c),R=h?{ref:y,type:M,onKeyDown:w,onClick:E}:{ref:g,id:i.buttonId,type:M,"aria-expanded":0===i.popoverState,"aria-controls":i.panel?i.panelId:void 0,onKeyDown:w,onKeyUp:k,onClick:E,onMouseDown:C},_=A(),B=(0,p.z)(()=>{let e=i.panel;e&&(0,L.E)(_.current,{[T.Forwards]:()=>(0,Z.jA)(e,Z.TO.First),[T.Backwards]:()=>(0,Z.jA)(e,Z.TO.Last)})===Z.fE.Error&&(0,Z.jA)((0,Z.GO)().filter(e=>"true"!==e.dataset.headlessuiFocusGuard),(0,L.E)(_.current,{[T.Forwards]:Z.TO.Next,[T.Backwards]:Z.TO.Previous}),{relativeTo:i.button})});return d.createElement(d.Fragment,null,(0,S.sY)({ourProps:R,theirProps:o,slot:j,defaultTag:"button",name:"Popover.Button"}),O&&!h&&l&&d.createElement(I._,{id:s,features:I.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:B}))}),Overlay:(0,S.yV)(function(e,t){let n=(0,P.M)(),{id:r="headlessui-popover-overlay-".concat(n),...o}=e,[{popoverState:i},a]=W("Popover.Overlay"),l=(0,b.T)(t),c=(0,_.oJ)(),s=null!==c?(c&_.ZM.Open)===_.ZM.Open:0===i,u=(0,p.z)(e=>{if((0,D.P)(e.currentTarget))return e.preventDefault();a({type:1})}),f=(0,d.useMemo)(()=>({open:0===i}),[i]);return(0,S.sY)({ourProps:{ref:l,id:r,"aria-hidden":!0,onClick:u},theirProps:o,slot:f,defaultTag:"div",features:Y,visible:s,name:"Popover.Overlay"})}),Panel:(0,S.yV)(function(e,t){let n=(0,P.M)(),{id:r="headlessui-popover-panel-".concat(n),focus:o=!1,...i}=e,[a,l]=W("Popover.Panel"),{close:c,isPortalled:s}=U("Popover.Panel"),u="headlessui-focus-sentinel-before-".concat((0,P.M)()),f="headlessui-focus-sentinel-after-".concat((0,P.M)()),m=(0,d.useRef)(null),g=(0,b.T)(m,t,e=>{l({type:4,panel:e})}),y=v(m),x=(0,S.Y2)();(0,h.e)(()=>(l({type:5,panelId:r}),()=>{l({type:5,panelId:null})}),[r,l]);let w=(0,_.oJ)(),k=null!==w?(w&_.ZM.Open)===_.ZM.Open:0===a.popoverState,E=(0,p.z)(e=>{var t;if(e.key===z.R.Escape){if(0!==a.popoverState||!m.current||null!=y&&y.activeElement&&!m.current.contains(y.activeElement))return;e.preventDefault(),e.stopPropagation(),l({type:1}),null==(t=a.button)||t.focus()}});(0,d.useEffect)(()=>{var t;e.static||1===a.popoverState&&(null==(t=e.unmount)||t)&&l({type:4,panel:null})},[a.popoverState,e.unmount,e.static,l]),(0,d.useEffect)(()=>{if(a.__demoMode||!o||0!==a.popoverState||!m.current)return;let e=null==y?void 0:y.activeElement;m.current.contains(e)||(0,Z.jA)(m.current,Z.TO.First)},[a.__demoMode,o,m,a.popoverState]);let C=(0,d.useMemo)(()=>({open:0===a.popoverState,close:c}),[a,c]),O={ref:g,id:r,onKeyDown:E,onBlur:o&&0===a.popoverState?e=>{var t,n,r,o,i;let c=e.relatedTarget;c&&m.current&&(null!=(t=m.current)&&t.contains(c)||(l({type:1}),(null!=(r=null==(n=a.beforePanelSentinel.current)?void 0:n.contains)&&r.call(n,c)||null!=(i=null==(o=a.afterPanelSentinel.current)?void 0:o.contains)&&i.call(o,c))&&c.focus({preventScroll:!0})))}:void 0,tabIndex:-1},j=A(),M=(0,p.z)(()=>{let e=m.current;e&&(0,L.E)(j.current,{[T.Forwards]:()=>{var t;(0,Z.jA)(e,Z.TO.First)===Z.fE.Error&&(null==(t=a.afterPanelSentinel.current)||t.focus())},[T.Backwards]:()=>{var e;null==(e=a.button)||e.focus({preventScroll:!0})}})}),N=(0,p.z)(()=>{let e=m.current;e&&(0,L.E)(j.current,{[T.Forwards]:()=>{var e;if(!a.button)return;let t=(0,Z.GO)(),n=t.indexOf(a.button),r=t.slice(0,n+1),o=[...t.slice(n+1),...r];for(let t of o.slice())if("true"===t.dataset.headlessuiFocusGuard||null!=(e=a.panel)&&e.contains(t)){let e=o.indexOf(t);-1!==e&&o.splice(e,1)}(0,Z.jA)(o,Z.TO.First,{sorted:!1})},[T.Backwards]:()=>{var t;(0,Z.jA)(e,Z.TO.Previous)===Z.fE.Error&&(null==(t=a.button)||t.focus())}})});return d.createElement(X.Provider,{value:r},k&&s&&d.createElement(I._,{id:u,ref:a.beforePanelSentinel,features:I.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:M}),(0,S.sY)({mergeRefs:x,ourProps:O,theirProps:i,slot:C,defaultTag:"div",features:Q,visible:k,name:"Popover.Panel"}),k&&s&&d.createElement(I._,{id:f,ref:a.afterPanelSentinel,features:I.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:N}))}),Group:(0,S.yV)(function(e,t){let n;let r=(0,d.useRef)(null),o=(0,b.T)(r,t),[i,a]=(0,d.useState)([]),l={mainTreeNodeRef:n=(0,d.useRef)(null),MainTreeNode:(0,d.useMemo)(()=>function(){return d.createElement(I._,{features:I.A.Hidden,ref:n})},[n])},c=(0,p.z)(e=>{a(t=>{let n=t.indexOf(e);if(-1!==n){let e=t.slice();return e.splice(n,1),e}return t})}),s=(0,p.z)(e=>(a(t=>[...t,e]),()=>c(e))),u=(0,p.z)(()=>{var e;let t=(0,g.r)(r);if(!t)return!1;let n=t.activeElement;return!!(null!=(e=r.current)&&e.contains(n))||i.some(e=>{var r,o;return(null==(r=t.getElementById(e.buttonId.current))?void 0:r.contains(n))||(null==(o=t.getElementById(e.panelId.current))?void 0:o.contains(n))})}),f=(0,p.z)(e=>{for(let t of i)t.buttonId.current!==e&&t.close()}),h=(0,d.useMemo)(()=>({registerPopover:s,unregisterPopover:c,isFocusWithinPopoverGroup:u,closeOthers:f,mainTreeNodeRef:l.mainTreeNodeRef}),[s,c,u,f,l.mainTreeNodeRef]),m=(0,d.useMemo)(()=>({}),[]);return d.createElement(V.Provider,{value:h},(0,S.sY)({ourProps:{ref:o},theirProps:e,slot:m,defaultTag:"div",name:"Popover.Group"}),d.createElement(l.MainTreeNode,null))})});var ee=n(33044),et=n(28517);let en=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 20 20",fill:"currentColor"}),d.createElement("path",{fillRule:"evenodd",d:"M6 2a1 1 0 00-1 1v1H4a2 2 0 00-2 2v10a2 2 0 002 2h12a2 2 0 002-2V6a2 2 0 00-2-2h-1V3a1 1 0 10-2 0v1H7V3a1 1 0 00-1-1zm0 5a1 1 0 000 2h8a1 1 0 100-2H6z",clipRule:"evenodd"}))};var er=n(4537),eo=n(99735),ei=n(7656);function ea(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return t.setHours(0,0,0,0),t}function el(){return ea(Date.now())}function ec(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return t.setDate(1),t.setHours(0,0,0,0),t}var es=n(65954),eu=n(96398),ed=n(41154);function ef(e){var t,n;if((0,ei.Z)(1,arguments),e&&"function"==typeof e.forEach)t=e;else{if("object"!==(0,ed.Z)(e)||null===e)return new Date(NaN);t=Array.prototype.slice.call(e)}return t.forEach(function(e){var t=(0,eo.Z)(e);(void 0===n||nt||isNaN(t.getDate()))&&(n=t)}),n||new Date(NaN)}var eh=n(25721),em=n(47869);function eg(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,eh.Z)(e,-n)}var ev=n(55463);function ey(e,t){if((0,ei.Z)(2,arguments),!t||"object"!==(0,ed.Z)(t))return new Date(NaN);var n=t.years?(0,em.Z)(t.years):0,r=t.months?(0,em.Z)(t.months):0,o=t.weeks?(0,em.Z)(t.weeks):0,i=t.days?(0,em.Z)(t.days):0,a=t.hours?(0,em.Z)(t.hours):0,l=t.minutes?(0,em.Z)(t.minutes):0,c=t.seconds?(0,em.Z)(t.seconds):0;return new Date(eg(function(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,ev.Z)(e,-n)}(e,r+12*n),i+7*o).getTime()-1e3*(c+60*(l+60*a)))}function eb(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=new Date(0);return n.setFullYear(t.getFullYear(),0,1),n.setHours(0,0,0,0),n}function ex(e){return(0,ei.Z)(1,arguments),e instanceof Date||"object"===(0,ed.Z)(e)&&"[object Date]"===Object.prototype.toString.call(e)}function ew(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getUTCDay();return t.setUTCDate(t.getUTCDate()-((n<1?7:0)+n-1)),t.setUTCHours(0,0,0,0),t}function eS(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getUTCFullYear(),r=new Date(0);r.setUTCFullYear(n+1,0,4),r.setUTCHours(0,0,0,0);var o=ew(r),i=new Date(0);i.setUTCFullYear(n,0,4),i.setUTCHours(0,0,0,0);var a=ew(i);return t.getTime()>=o.getTime()?n+1:t.getTime()>=a.getTime()?n:n-1}var ek={};function eE(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.weekStartsOn)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.weekStartsOn)&&void 0!==o?o:ek.weekStartsOn)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.weekStartsOn)&&void 0!==n?n:0);if(!(u>=0&&u<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");var d=(0,eo.Z)(e),f=d.getUTCDay();return d.setUTCDate(d.getUTCDate()-((f=1&&f<=7))throw RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var p=new Date(0);p.setUTCFullYear(d+1,0,f),p.setUTCHours(0,0,0,0);var h=eE(p,t),m=new Date(0);m.setUTCFullYear(d,0,f),m.setUTCHours(0,0,0,0);var g=eE(m,t);return u.getTime()>=h.getTime()?d+1:u.getTime()>=g.getTime()?d:d-1}function eO(e,t){for(var n=Math.abs(e).toString();n.length0?n:1-n;return eO("yy"===t?r%100:r,t.length)},M:function(e,t){var n=e.getUTCMonth();return"M"===t?String(n+1):eO(n+1,2)},d:function(e,t){return eO(e.getUTCDate(),t.length)},h:function(e,t){return eO(e.getUTCHours()%12||12,t.length)},H:function(e,t){return eO(e.getUTCHours(),t.length)},m:function(e,t){return eO(e.getUTCMinutes(),t.length)},s:function(e,t){return eO(e.getUTCSeconds(),t.length)},S:function(e,t){var n=t.length;return eO(Math.floor(e.getUTCMilliseconds()*Math.pow(10,n-3)),t.length)}},eP={midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"};function eM(e,t){var n=e>0?"-":"+",r=Math.abs(e),o=Math.floor(r/60),i=r%60;return 0===i?n+String(o):n+String(o)+(t||"")+eO(i,2)}function eN(e,t){return e%60==0?(e>0?"-":"+")+eO(Math.abs(e)/60,2):eI(e,t)}function eI(e,t){var n=Math.abs(e);return(e>0?"-":"+")+eO(Math.floor(n/60),2)+(t||"")+eO(n%60,2)}var eR={G:function(e,t,n){var r=e.getUTCFullYear()>0?1:0;switch(t){case"G":case"GG":case"GGG":return n.era(r,{width:"abbreviated"});case"GGGGG":return n.era(r,{width:"narrow"});default:return n.era(r,{width:"wide"})}},y:function(e,t,n){if("yo"===t){var r=e.getUTCFullYear();return n.ordinalNumber(r>0?r:1-r,{unit:"year"})}return ej.y(e,t)},Y:function(e,t,n,r){var o=eC(e,r),i=o>0?o:1-o;return"YY"===t?eO(i%100,2):"Yo"===t?n.ordinalNumber(i,{unit:"year"}):eO(i,t.length)},R:function(e,t){return eO(eS(e),t.length)},u:function(e,t){return eO(e.getUTCFullYear(),t.length)},Q:function(e,t,n){var r=Math.ceil((e.getUTCMonth()+1)/3);switch(t){case"Q":return String(r);case"QQ":return eO(r,2);case"Qo":return n.ordinalNumber(r,{unit:"quarter"});case"QQQ":return n.quarter(r,{width:"abbreviated",context:"formatting"});case"QQQQQ":return n.quarter(r,{width:"narrow",context:"formatting"});default:return n.quarter(r,{width:"wide",context:"formatting"})}},q:function(e,t,n){var r=Math.ceil((e.getUTCMonth()+1)/3);switch(t){case"q":return String(r);case"qq":return eO(r,2);case"qo":return n.ordinalNumber(r,{unit:"quarter"});case"qqq":return n.quarter(r,{width:"abbreviated",context:"standalone"});case"qqqqq":return n.quarter(r,{width:"narrow",context:"standalone"});default:return n.quarter(r,{width:"wide",context:"standalone"})}},M:function(e,t,n){var r=e.getUTCMonth();switch(t){case"M":case"MM":return ej.M(e,t);case"Mo":return n.ordinalNumber(r+1,{unit:"month"});case"MMM":return n.month(r,{width:"abbreviated",context:"formatting"});case"MMMMM":return n.month(r,{width:"narrow",context:"formatting"});default:return n.month(r,{width:"wide",context:"formatting"})}},L:function(e,t,n){var r=e.getUTCMonth();switch(t){case"L":return String(r+1);case"LL":return eO(r+1,2);case"Lo":return n.ordinalNumber(r+1,{unit:"month"});case"LLL":return n.month(r,{width:"abbreviated",context:"standalone"});case"LLLLL":return n.month(r,{width:"narrow",context:"standalone"});default:return n.month(r,{width:"wide",context:"standalone"})}},w:function(e,t,n,r){var o=function(e,t){(0,ei.Z)(1,arguments);var n=(0,eo.Z)(e);return Math.round((eE(n,t).getTime()-(function(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.firstWeekContainsDate)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.firstWeekContainsDate)&&void 0!==o?o:ek.firstWeekContainsDate)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.firstWeekContainsDate)&&void 0!==n?n:1),d=eC(e,t),f=new Date(0);return f.setUTCFullYear(d,0,u),f.setUTCHours(0,0,0,0),eE(f,t)})(n,t).getTime())/6048e5)+1}(e,r);return"wo"===t?n.ordinalNumber(o,{unit:"week"}):eO(o,t.length)},I:function(e,t,n){var r=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return Math.round((ew(t).getTime()-(function(e){(0,ei.Z)(1,arguments);var t=eS(e),n=new Date(0);return n.setUTCFullYear(t,0,4),n.setUTCHours(0,0,0,0),ew(n)})(t).getTime())/6048e5)+1}(e);return"Io"===t?n.ordinalNumber(r,{unit:"week"}):eO(r,t.length)},d:function(e,t,n){return"do"===t?n.ordinalNumber(e.getUTCDate(),{unit:"date"}):ej.d(e,t)},D:function(e,t,n){var r=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getTime();return t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0),Math.floor((n-t.getTime())/864e5)+1}(e);return"Do"===t?n.ordinalNumber(r,{unit:"dayOfYear"}):eO(r,t.length)},E:function(e,t,n){var r=e.getUTCDay();switch(t){case"E":case"EE":case"EEE":return n.day(r,{width:"abbreviated",context:"formatting"});case"EEEEE":return n.day(r,{width:"narrow",context:"formatting"});case"EEEEEE":return n.day(r,{width:"short",context:"formatting"});default:return n.day(r,{width:"wide",context:"formatting"})}},e:function(e,t,n,r){var o=e.getUTCDay(),i=(o-r.weekStartsOn+8)%7||7;switch(t){case"e":return String(i);case"ee":return eO(i,2);case"eo":return n.ordinalNumber(i,{unit:"day"});case"eee":return n.day(o,{width:"abbreviated",context:"formatting"});case"eeeee":return n.day(o,{width:"narrow",context:"formatting"});case"eeeeee":return n.day(o,{width:"short",context:"formatting"});default:return n.day(o,{width:"wide",context:"formatting"})}},c:function(e,t,n,r){var o=e.getUTCDay(),i=(o-r.weekStartsOn+8)%7||7;switch(t){case"c":return String(i);case"cc":return eO(i,t.length);case"co":return n.ordinalNumber(i,{unit:"day"});case"ccc":return n.day(o,{width:"abbreviated",context:"standalone"});case"ccccc":return n.day(o,{width:"narrow",context:"standalone"});case"cccccc":return n.day(o,{width:"short",context:"standalone"});default:return n.day(o,{width:"wide",context:"standalone"})}},i:function(e,t,n){var r=e.getUTCDay(),o=0===r?7:r;switch(t){case"i":return String(o);case"ii":return eO(o,t.length);case"io":return n.ordinalNumber(o,{unit:"day"});case"iii":return n.day(r,{width:"abbreviated",context:"formatting"});case"iiiii":return n.day(r,{width:"narrow",context:"formatting"});case"iiiiii":return n.day(r,{width:"short",context:"formatting"});default:return n.day(r,{width:"wide",context:"formatting"})}},a:function(e,t,n){var r=e.getUTCHours()/12>=1?"pm":"am";switch(t){case"a":case"aa":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"});case"aaa":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"}).toLowerCase();case"aaaaa":return n.dayPeriod(r,{width:"narrow",context:"formatting"});default:return n.dayPeriod(r,{width:"wide",context:"formatting"})}},b:function(e,t,n){var r,o=e.getUTCHours();switch(r=12===o?eP.noon:0===o?eP.midnight:o/12>=1?"pm":"am",t){case"b":case"bb":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"});case"bbb":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"}).toLowerCase();case"bbbbb":return n.dayPeriod(r,{width:"narrow",context:"formatting"});default:return n.dayPeriod(r,{width:"wide",context:"formatting"})}},B:function(e,t,n){var r,o=e.getUTCHours();switch(r=o>=17?eP.evening:o>=12?eP.afternoon:o>=4?eP.morning:eP.night,t){case"B":case"BB":case"BBB":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"});case"BBBBB":return n.dayPeriod(r,{width:"narrow",context:"formatting"});default:return n.dayPeriod(r,{width:"wide",context:"formatting"})}},h:function(e,t,n){if("ho"===t){var r=e.getUTCHours()%12;return 0===r&&(r=12),n.ordinalNumber(r,{unit:"hour"})}return ej.h(e,t)},H:function(e,t,n){return"Ho"===t?n.ordinalNumber(e.getUTCHours(),{unit:"hour"}):ej.H(e,t)},K:function(e,t,n){var r=e.getUTCHours()%12;return"Ko"===t?n.ordinalNumber(r,{unit:"hour"}):eO(r,t.length)},k:function(e,t,n){var r=e.getUTCHours();return(0===r&&(r=24),"ko"===t)?n.ordinalNumber(r,{unit:"hour"}):eO(r,t.length)},m:function(e,t,n){return"mo"===t?n.ordinalNumber(e.getUTCMinutes(),{unit:"minute"}):ej.m(e,t)},s:function(e,t,n){return"so"===t?n.ordinalNumber(e.getUTCSeconds(),{unit:"second"}):ej.s(e,t)},S:function(e,t){return ej.S(e,t)},X:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();if(0===o)return"Z";switch(t){case"X":return eN(o);case"XXXX":case"XX":return eI(o);default:return eI(o,":")}},x:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();switch(t){case"x":return eN(o);case"xxxx":case"xx":return eI(o);default:return eI(o,":")}},O:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();switch(t){case"O":case"OO":case"OOO":return"GMT"+eM(o,":");default:return"GMT"+eI(o,":")}},z:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();switch(t){case"z":case"zz":case"zzz":return"GMT"+eM(o,":");default:return"GMT"+eI(o,":")}},t:function(e,t,n,r){return eO(Math.floor((r._originalDate||e).getTime()/1e3),t.length)},T:function(e,t,n,r){return eO((r._originalDate||e).getTime(),t.length)}},eT=function(e,t){switch(e){case"P":return t.date({width:"short"});case"PP":return t.date({width:"medium"});case"PPP":return t.date({width:"long"});default:return t.date({width:"full"})}},eA=function(e,t){switch(e){case"p":return t.time({width:"short"});case"pp":return t.time({width:"medium"});case"ppp":return t.time({width:"long"});default:return t.time({width:"full"})}},e_={p:eA,P:function(e,t){var n,r=e.match(/(P+)(p+)?/)||[],o=r[1],i=r[2];if(!i)return eT(e,t);switch(o){case"P":n=t.dateTime({width:"short"});break;case"PP":n=t.dateTime({width:"medium"});break;case"PPP":n=t.dateTime({width:"long"});break;default:n=t.dateTime({width:"full"})}return n.replace("{{date}}",eT(o,t)).replace("{{time}}",eA(i,t))}};function eD(e){var t=new Date(Date.UTC(e.getFullYear(),e.getMonth(),e.getDate(),e.getHours(),e.getMinutes(),e.getSeconds(),e.getMilliseconds()));return t.setUTCFullYear(e.getFullYear()),e.getTime()-t.getTime()}var eZ=["D","DD"],eL=["YY","YYYY"];function ez(e,t,n){if("YYYY"===e)throw RangeError("Use `yyyy` instead of `YYYY` (in `".concat(t,"`) for formatting years to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("YY"===e)throw RangeError("Use `yy` instead of `YY` (in `".concat(t,"`) for formatting years to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("D"===e)throw RangeError("Use `d` instead of `D` (in `".concat(t,"`) for formatting days of the month to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("DD"===e)throw RangeError("Use `dd` instead of `DD` (in `".concat(t,"`) for formatting days of the month to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"))}var eB={lessThanXSeconds:{one:"less than a second",other:"less than {{count}} seconds"},xSeconds:{one:"1 second",other:"{{count}} seconds"},halfAMinute:"half a minute",lessThanXMinutes:{one:"less than a minute",other:"less than {{count}} minutes"},xMinutes:{one:"1 minute",other:"{{count}} minutes"},aboutXHours:{one:"about 1 hour",other:"about {{count}} hours"},xHours:{one:"1 hour",other:"{{count}} hours"},xDays:{one:"1 day",other:"{{count}} days"},aboutXWeeks:{one:"about 1 week",other:"about {{count}} weeks"},xWeeks:{one:"1 week",other:"{{count}} weeks"},aboutXMonths:{one:"about 1 month",other:"about {{count}} months"},xMonths:{one:"1 month",other:"{{count}} months"},aboutXYears:{one:"about 1 year",other:"about {{count}} years"},xYears:{one:"1 year",other:"{{count}} years"},overXYears:{one:"over 1 year",other:"over {{count}} years"},almostXYears:{one:"almost 1 year",other:"almost {{count}} years"}};function eF(e){return function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=t.width?String(t.width):e.defaultWidth;return e.formats[n]||e.formats[e.defaultWidth]}}var eH={date:eF({formats:{full:"EEEE, MMMM do, y",long:"MMMM do, y",medium:"MMM d, y",short:"MM/dd/yyyy"},defaultWidth:"full"}),time:eF({formats:{full:"h:mm:ss a zzzz",long:"h:mm:ss a z",medium:"h:mm:ss a",short:"h:mm a"},defaultWidth:"full"}),dateTime:eF({formats:{full:"{{date}} 'at' {{time}}",long:"{{date}} 'at' {{time}}",medium:"{{date}}, {{time}}",short:"{{date}}, {{time}}"},defaultWidth:"full"})},eq={lastWeek:"'last' eeee 'at' p",yesterday:"'yesterday at' p",today:"'today at' p",tomorrow:"'tomorrow at' p",nextWeek:"eeee 'at' p",other:"P"};function eW(e){return function(t,n){var r;if("formatting"===(null!=n&&n.context?String(n.context):"standalone")&&e.formattingValues){var o=e.defaultFormattingWidth||e.defaultWidth,i=null!=n&&n.width?String(n.width):o;r=e.formattingValues[i]||e.formattingValues[o]}else{var a=e.defaultWidth,l=null!=n&&n.width?String(n.width):e.defaultWidth;r=e.values[l]||e.values[a]}return r[e.argumentCallback?e.argumentCallback(t):t]}}function eK(e){return function(t){var n,r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},o=r.width,i=o&&e.matchPatterns[o]||e.matchPatterns[e.defaultMatchWidth],a=t.match(i);if(!a)return null;var l=a[0],c=o&&e.parsePatterns[o]||e.parsePatterns[e.defaultParseWidth],s=Array.isArray(c)?function(e,t){for(var n=0;n0?"in "+r:r+" ago":r},formatLong:eH,formatRelative:function(e,t,n,r){return eq[e]},localize:{ordinalNumber:function(e,t){var n=Number(e),r=n%100;if(r>20||r<10)switch(r%10){case 1:return n+"st";case 2:return n+"nd";case 3:return n+"rd"}return n+"th"},era:eW({values:{narrow:["B","A"],abbreviated:["BC","AD"],wide:["Before Christ","Anno Domini"]},defaultWidth:"wide"}),quarter:eW({values:{narrow:["1","2","3","4"],abbreviated:["Q1","Q2","Q3","Q4"],wide:["1st quarter","2nd quarter","3rd quarter","4th quarter"]},defaultWidth:"wide",argumentCallback:function(e){return e-1}}),month:eW({values:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],abbreviated:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],wide:["January","February","March","April","May","June","July","August","September","October","November","December"]},defaultWidth:"wide"}),day:eW({values:{narrow:["S","M","T","W","T","F","S"],short:["Su","Mo","Tu","We","Th","Fr","Sa"],abbreviated:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],wide:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},defaultWidth:"wide"}),dayPeriod:eW({values:{narrow:{am:"a",pm:"p",midnight:"mi",noon:"n",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"},abbreviated:{am:"AM",pm:"PM",midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"},wide:{am:"a.m.",pm:"p.m.",midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"}},defaultWidth:"wide",formattingValues:{narrow:{am:"a",pm:"p",midnight:"mi",noon:"n",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"},abbreviated:{am:"AM",pm:"PM",midnight:"midnight",noon:"noon",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"},wide:{am:"a.m.",pm:"p.m.",midnight:"midnight",noon:"noon",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"}},defaultFormattingWidth:"wide"})},match:{ordinalNumber:(a={matchPattern:/^(\d+)(th|st|nd|rd)?/i,parsePattern:/\d+/i,valueCallback:function(e){return parseInt(e,10)}},function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=e.match(a.matchPattern);if(!n)return null;var r=n[0],o=e.match(a.parsePattern);if(!o)return null;var i=a.valueCallback?a.valueCallback(o[0]):o[0];return{value:i=t.valueCallback?t.valueCallback(i):i,rest:e.slice(r.length)}}),era:eK({matchPatterns:{narrow:/^(b|a)/i,abbreviated:/^(b\.?\s?c\.?|b\.?\s?c\.?\s?e\.?|a\.?\s?d\.?|c\.?\s?e\.?)/i,wide:/^(before christ|before common era|anno domini|common era)/i},defaultMatchWidth:"wide",parsePatterns:{any:[/^b/i,/^(a|c)/i]},defaultParseWidth:"any"}),quarter:eK({matchPatterns:{narrow:/^[1234]/i,abbreviated:/^q[1234]/i,wide:/^[1234](th|st|nd|rd)? quarter/i},defaultMatchWidth:"wide",parsePatterns:{any:[/1/i,/2/i,/3/i,/4/i]},defaultParseWidth:"any",valueCallback:function(e){return e+1}}),month:eK({matchPatterns:{narrow:/^[jfmasond]/i,abbreviated:/^(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)/i,wide:/^(january|february|march|april|may|june|july|august|september|october|november|december)/i},defaultMatchWidth:"wide",parsePatterns:{narrow:[/^j/i,/^f/i,/^m/i,/^a/i,/^m/i,/^j/i,/^j/i,/^a/i,/^s/i,/^o/i,/^n/i,/^d/i],any:[/^ja/i,/^f/i,/^mar/i,/^ap/i,/^may/i,/^jun/i,/^jul/i,/^au/i,/^s/i,/^o/i,/^n/i,/^d/i]},defaultParseWidth:"any"}),day:eK({matchPatterns:{narrow:/^[smtwf]/i,short:/^(su|mo|tu|we|th|fr|sa)/i,abbreviated:/^(sun|mon|tue|wed|thu|fri|sat)/i,wide:/^(sunday|monday|tuesday|wednesday|thursday|friday|saturday)/i},defaultMatchWidth:"wide",parsePatterns:{narrow:[/^s/i,/^m/i,/^t/i,/^w/i,/^t/i,/^f/i,/^s/i],any:[/^su/i,/^m/i,/^tu/i,/^w/i,/^th/i,/^f/i,/^sa/i]},defaultParseWidth:"any"}),dayPeriod:eK({matchPatterns:{narrow:/^(a|p|mi|n|(in the|at) (morning|afternoon|evening|night))/i,any:/^([ap]\.?\s?m\.?|midnight|noon|(in the|at) (morning|afternoon|evening|night))/i},defaultMatchWidth:"any",parsePatterns:{any:{am:/^a/i,pm:/^p/i,midnight:/^mi/i,noon:/^no/i,morning:/morning/i,afternoon:/afternoon/i,evening:/evening/i,night:/night/i}},defaultParseWidth:"any"})},options:{weekStartsOn:0,firstWeekContainsDate:1}},eV=/[yYQqMLwIdDecihHKkms]o|(\w)\1*|''|'(''|[^'])+('|$)|./g,eG=/P+p+|P+|p+|''|'(''|[^'])+('|$)|./g,eX=/^'([^]*?)'?$/,e$=/''/g,eY=/[a-zA-Z]/;function eQ(e,t,n){(0,ei.Z)(2,arguments);var r,o,i,a,l,c,s,u,d,f,p,h,m,g,v,y,b,x,w=String(t),S=null!==(r=null!==(o=null==n?void 0:n.locale)&&void 0!==o?o:ek.locale)&&void 0!==r?r:eU,k=(0,em.Z)(null!==(i=null!==(a=null!==(l=null!==(c=null==n?void 0:n.firstWeekContainsDate)&&void 0!==c?c:null==n?void 0:null===(s=n.locale)||void 0===s?void 0:null===(u=s.options)||void 0===u?void 0:u.firstWeekContainsDate)&&void 0!==l?l:ek.firstWeekContainsDate)&&void 0!==a?a:null===(d=ek.locale)||void 0===d?void 0:null===(f=d.options)||void 0===f?void 0:f.firstWeekContainsDate)&&void 0!==i?i:1);if(!(k>=1&&k<=7))throw RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var E=(0,em.Z)(null!==(p=null!==(h=null!==(m=null!==(g=null==n?void 0:n.weekStartsOn)&&void 0!==g?g:null==n?void 0:null===(v=n.locale)||void 0===v?void 0:null===(y=v.options)||void 0===y?void 0:y.weekStartsOn)&&void 0!==m?m:ek.weekStartsOn)&&void 0!==h?h:null===(b=ek.locale)||void 0===b?void 0:null===(x=b.options)||void 0===x?void 0:x.weekStartsOn)&&void 0!==p?p:0);if(!(E>=0&&E<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");if(!S.localize)throw RangeError("locale must contain localize property");if(!S.formatLong)throw RangeError("locale must contain formatLong property");var C=(0,eo.Z)(e);if(!function(e){return(0,ei.Z)(1,arguments),(!!ex(e)||"number"==typeof e)&&!isNaN(Number((0,eo.Z)(e)))}(C))throw RangeError("Invalid time value");var O=eD(C),j=function(e,t){return(0,ei.Z)(2,arguments),function(e,t){return(0,ei.Z)(2,arguments),new Date((0,eo.Z)(e).getTime()+(0,em.Z)(t))}(e,-(0,em.Z)(t))}(C,O),P={firstWeekContainsDate:k,weekStartsOn:E,locale:S,_originalDate:C};return w.match(eG).map(function(e){var t=e[0];return"p"===t||"P"===t?(0,e_[t])(e,S.formatLong):e}).join("").match(eV).map(function(r){if("''"===r)return"'";var o,i=r[0];if("'"===i)return(o=r.match(eX))?o[1].replace(e$,"'"):r;var a=eR[i];if(a)return null!=n&&n.useAdditionalWeekYearTokens||-1===eL.indexOf(r)||ez(r,t,String(e)),null!=n&&n.useAdditionalDayOfYearTokens||-1===eZ.indexOf(r)||ez(r,t,String(e)),a(j,r,S.localize,P);if(i.match(eY))throw RangeError("Format string contains an unescaped latin alphabet character `"+i+"`");return r}).join("")}var eJ=n(1153);let e0=(0,eJ.fn)("DateRangePicker"),e1=(e,t,n,r)=>{var o;if(n&&(e=null===(o=r.get(n))||void 0===o?void 0:o.from),e)return ea(e&&!t?e:ef([e,t]))},e2=(e,t,n,r)=>{var o,i;if(n&&(e=ea(null!==(i=null===(o=r.get(n))||void 0===o?void 0:o.to)&&void 0!==i?i:el())),e)return ea(e&&!t?e:ep([e,t]))},e6=[{value:"tdy",text:"Today",from:el()},{value:"w",text:"Last 7 days",from:ey(el(),{days:7})},{value:"t",text:"Last 30 days",from:ey(el(),{days:30})},{value:"m",text:"Month to Date",from:ec(el())},{value:"y",text:"Year to Date",from:eb(el())}],e3=(e,t,n,r)=>{let o=(null==n?void 0:n.code)||"en-US";if(!e&&!t)return"";if(e&&!t)return r?eQ(e,r):e.toLocaleDateString(o,{year:"numeric",month:"short",day:"numeric"});if(e&&t){if(function(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return n.getTime()===r.getTime()}(e,t))return r?eQ(e,r):e.toLocaleDateString(o,{year:"numeric",month:"short",day:"numeric"});if(e.getMonth()===t.getMonth()&&e.getFullYear()===t.getFullYear())return r?"".concat(eQ(e,r)," - ").concat(eQ(t,r)):"".concat(e.toLocaleDateString(o,{month:"short",day:"numeric"})," - \n ").concat(t.getDate(),", ").concat(t.getFullYear());{if(r)return"".concat(eQ(e,r)," - ").concat(eQ(t,r));let n={year:"numeric",month:"short",day:"numeric"};return"".concat(e.toLocaleDateString(o,n)," - \n ").concat(t.toLocaleDateString(o,n))}}return""};function e4(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(23,59,59,999),t}function e5(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,em.Z)(t),o=n.getFullYear(),i=n.getDate(),a=new Date(0);a.setFullYear(o,r,15),a.setHours(0,0,0,0);var l=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getFullYear(),r=t.getMonth(),o=new Date(0);return o.setFullYear(n,r+1,0),o.setHours(0,0,0,0),o.getDate()}(a);return n.setMonth(r,Math.min(i,l)),n}function e8(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,em.Z)(t);return isNaN(n.getTime())?new Date(NaN):(n.setFullYear(r),n)}function e7(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return 12*(n.getFullYear()-r.getFullYear())+(n.getMonth()-r.getMonth())}function e9(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return n.getFullYear()===r.getFullYear()&&n.getMonth()===r.getMonth()}function te(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return n.getTime()=0&&u<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");var d=(0,eo.Z)(e),f=d.getDay();return d.setDate(d.getDate()-((fr.getTime()}function ti(e,t){(0,ei.Z)(2,arguments);var n=ea(e),r=ea(t);return Math.round((n.getTime()-eD(n)-(r.getTime()-eD(r)))/864e5)}function ta(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,eh.Z)(e,7*n)}function tl(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,ev.Z)(e,12*n)}function tc(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.weekStartsOn)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.weekStartsOn)&&void 0!==o?o:ek.weekStartsOn)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.weekStartsOn)&&void 0!==n?n:0);if(!(u>=0&&u<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");var d=(0,eo.Z)(e),f=d.getDay();return d.setDate(d.getDate()+((fe7(l,a)&&(a=(0,ev.Z)(l,-1*((void 0===s?1:s)-1))),c&&0>e7(a,c)&&(a=c),u=ec(a),f=t.month,h=(p=(0,d.useState)(u))[0],m=[void 0===f?h:f,p[1]])[0],v=m[1],[g,function(e){if(!t.disableNavigation){var n,r=ec(e);v(r),null===(n=t.onMonthChange)||void 0===n||n.call(t,r)}}]),x=b[0],w=b[1],S=function(e,t){for(var n=t.reverseMonths,r=t.numberOfMonths,o=ec(e),i=e7(ec((0,ev.Z)(o,r)),o),a=[],l=0;l=e7(i,n)))return(0,ev.Z)(i,-(r?void 0===o?1:o:1))}}(x,y),C=function(e){return S.some(function(t){return e9(e,t)})};return th.jsx(tM.Provider,{value:{currentMonth:x,displayMonths:S,goToMonth:w,goToDate:function(e,t){C(e)||(t&&te(e,t)?w((0,ev.Z)(e,1+-1*y.numberOfMonths)):w(e))},previousMonth:E,nextMonth:k,isDateDisplayed:C},children:e.children})}function tI(){var e=(0,d.useContext)(tM);if(!e)throw Error("useNavigation must be used within a NavigationProvider");return e}function tR(e){var t,n=tk(),r=n.classNames,o=n.styles,i=n.components,a=tI().goToMonth,l=function(t){a((0,ev.Z)(t,e.displayIndex?-e.displayIndex:0))},c=null!==(t=null==i?void 0:i.CaptionLabel)&&void 0!==t?t:tE,s=th.jsx(c,{id:e.id,displayMonth:e.displayMonth});return th.jsxs("div",{className:r.caption_dropdowns,style:o.caption_dropdowns,children:[th.jsx("div",{className:r.vhidden,children:s}),th.jsx(tj,{onChange:l,displayMonth:e.displayMonth}),th.jsx(tP,{onChange:l,displayMonth:e.displayMonth})]})}function tT(e){return th.jsx("svg",tu({width:"16px",height:"16px",viewBox:"0 0 120 120"},e,{children:th.jsx("path",{d:"M69.490332,3.34314575 C72.6145263,0.218951416 77.6798462,0.218951416 80.8040405,3.34314575 C83.8617626,6.40086786 83.9268205,11.3179931 80.9992143,14.4548388 L80.8040405,14.6568542 L35.461,60 L80.8040405,105.343146 C83.8617626,108.400868 83.9268205,113.317993 80.9992143,116.454839 L80.8040405,116.656854 C77.7463184,119.714576 72.8291931,119.779634 69.6923475,116.852028 L69.490332,116.656854 L18.490332,65.6568542 C15.4326099,62.5991321 15.367552,57.6820069 18.2951583,54.5451612 L18.490332,54.3431458 L69.490332,3.34314575 Z",fill:"currentColor",fillRule:"nonzero"})}))}function tA(e){return th.jsx("svg",tu({width:"16px",height:"16px",viewBox:"0 0 120 120"},e,{children:th.jsx("path",{d:"M49.8040405,3.34314575 C46.6798462,0.218951416 41.6145263,0.218951416 38.490332,3.34314575 C35.4326099,6.40086786 35.367552,11.3179931 38.2951583,14.4548388 L38.490332,14.6568542 L83.8333725,60 L38.490332,105.343146 C35.4326099,108.400868 35.367552,113.317993 38.2951583,116.454839 L38.490332,116.656854 C41.5480541,119.714576 46.4651794,119.779634 49.602025,116.852028 L49.8040405,116.656854 L100.804041,65.6568542 C103.861763,62.5991321 103.926821,57.6820069 100.999214,54.5451612 L100.804041,54.3431458 L49.8040405,3.34314575 Z",fill:"currentColor"})}))}var t_=(0,d.forwardRef)(function(e,t){var n=tk(),r=n.classNames,o=n.styles,i=[r.button_reset,r.button];e.className&&i.push(e.className);var a=i.join(" "),l=tu(tu({},o.button_reset),o.button);return e.style&&Object.assign(l,e.style),th.jsx("button",tu({},e,{ref:t,type:"button",className:a,style:l}))});function tD(e){var t,n,r=tk(),o=r.dir,i=r.locale,a=r.classNames,l=r.styles,c=r.labels,s=c.labelPrevious,u=c.labelNext,d=r.components;if(!e.nextMonth&&!e.previousMonth)return th.jsx(th.Fragment,{});var f=s(e.previousMonth,{locale:i}),p=[a.nav_button,a.nav_button_previous].join(" "),h=u(e.nextMonth,{locale:i}),m=[a.nav_button,a.nav_button_next].join(" "),g=null!==(t=null==d?void 0:d.IconRight)&&void 0!==t?t:tA,v=null!==(n=null==d?void 0:d.IconLeft)&&void 0!==n?n:tT;return th.jsxs("div",{className:a.nav,style:l.nav,children:[!e.hidePrevious&&th.jsx(t_,{name:"previous-month","aria-label":f,className:p,style:l.nav_button_previous,disabled:!e.previousMonth,onClick:e.onPreviousClick,children:"rtl"===o?th.jsx(g,{className:a.nav_icon,style:l.nav_icon}):th.jsx(v,{className:a.nav_icon,style:l.nav_icon})}),!e.hideNext&&th.jsx(t_,{name:"next-month","aria-label":h,className:m,style:l.nav_button_next,disabled:!e.nextMonth,onClick:e.onNextClick,children:"rtl"===o?th.jsx(v,{className:a.nav_icon,style:l.nav_icon}):th.jsx(g,{className:a.nav_icon,style:l.nav_icon})})]})}function tZ(e){var t=tk().numberOfMonths,n=tI(),r=n.previousMonth,o=n.nextMonth,i=n.goToMonth,a=n.displayMonths,l=a.findIndex(function(t){return e9(e.displayMonth,t)}),c=0===l,s=l===a.length-1;return th.jsx(tD,{displayMonth:e.displayMonth,hideNext:t>1&&(c||!s),hidePrevious:t>1&&(s||!c),nextMonth:o,previousMonth:r,onPreviousClick:function(){r&&i(r)},onNextClick:function(){o&&i(o)}})}function tL(e){var t,n,r=tk(),o=r.classNames,i=r.disableNavigation,a=r.styles,l=r.captionLayout,c=r.components,s=null!==(t=null==c?void 0:c.CaptionLabel)&&void 0!==t?t:tE;return n=i?th.jsx(s,{id:e.id,displayMonth:e.displayMonth}):"dropdown"===l?th.jsx(tR,{displayMonth:e.displayMonth,id:e.id}):"dropdown-buttons"===l?th.jsxs(th.Fragment,{children:[th.jsx(tR,{displayMonth:e.displayMonth,displayIndex:e.displayIndex,id:e.id}),th.jsx(tZ,{displayMonth:e.displayMonth,displayIndex:e.displayIndex,id:e.id})]}):th.jsxs(th.Fragment,{children:[th.jsx(s,{id:e.id,displayMonth:e.displayMonth,displayIndex:e.displayIndex}),th.jsx(tZ,{displayMonth:e.displayMonth,id:e.id})]}),th.jsx("div",{className:o.caption,style:a.caption,children:n})}function tz(e){var t=tk(),n=t.footer,r=t.styles,o=t.classNames.tfoot;return n?th.jsx("tfoot",{className:o,style:r.tfoot,children:th.jsx("tr",{children:th.jsx("td",{colSpan:8,children:n})})}):th.jsx(th.Fragment,{})}function tB(){var e=tk(),t=e.classNames,n=e.styles,r=e.showWeekNumber,o=e.locale,i=e.weekStartsOn,a=e.ISOWeek,l=e.formatters.formatWeekdayName,c=e.labels.labelWeekday,s=function(e,t,n){for(var r=n?tn(new Date):tt(new Date,{locale:e,weekStartsOn:t}),o=[],i=0;i<7;i++){var a=(0,eh.Z)(r,i);o.push(a)}return o}(o,i,a);return th.jsxs("tr",{style:n.head_row,className:t.head_row,children:[r&&th.jsx("td",{style:n.head_cell,className:t.head_cell}),s.map(function(e,r){return th.jsx("th",{scope:"col",className:t.head_cell,style:n.head_cell,"aria-label":c(e,{locale:o}),children:l(e,{locale:o})},r)})]})}function tF(){var e,t=tk(),n=t.classNames,r=t.styles,o=t.components,i=null!==(e=null==o?void 0:o.HeadRow)&&void 0!==e?e:tB;return th.jsx("thead",{style:r.head,className:n.head,children:th.jsx(i,{})})}function tH(e){var t=tk(),n=t.locale,r=t.formatters.formatDay;return th.jsx(th.Fragment,{children:r(e.date,{locale:n})})}var tq=(0,d.createContext)(void 0);function tW(e){return tm(e.initialProps)?th.jsx(tK,{initialProps:e.initialProps,children:e.children}):th.jsx(tq.Provider,{value:{selected:void 0,modifiers:{disabled:[]}},children:e.children})}function tK(e){var t=e.initialProps,n=e.children,r=t.selected,o=t.min,i=t.max,a={disabled:[]};return r&&a.disabled.push(function(e){var t=i&&r.length>i-1,n=r.some(function(t){return tr(t,e)});return!!(t&&!n)}),th.jsx(tq.Provider,{value:{selected:r,onDayClick:function(e,n,a){if(null===(l=t.onDayClick)||void 0===l||l.call(t,e,n,a),(!n.selected||!o||(null==r?void 0:r.length)!==o)&&(n.selected||!i||(null==r?void 0:r.length)!==i)){var l,c,s=r?td([],r,!0):[];if(n.selected){var u=s.findIndex(function(t){return tr(e,t)});s.splice(u,1)}else s.push(e);null===(c=t.onSelect)||void 0===c||c.call(t,s,e,n,a)}},modifiers:a},children:n})}function tU(){var e=(0,d.useContext)(tq);if(!e)throw Error("useSelectMultiple must be used within a SelectMultipleProvider");return e}var tV=(0,d.createContext)(void 0);function tG(e){return tg(e.initialProps)?th.jsx(tX,{initialProps:e.initialProps,children:e.children}):th.jsx(tV.Provider,{value:{selected:void 0,modifiers:{range_start:[],range_end:[],range_middle:[],disabled:[]}},children:e.children})}function tX(e){var t=e.initialProps,n=e.children,r=t.selected,o=r||{},i=o.from,a=o.to,l=t.min,c=t.max,s={range_start:[],range_end:[],range_middle:[],disabled:[]};if(i?(s.range_start=[i],a?(s.range_end=[a],tr(i,a)||(s.range_middle=[{after:i,before:a}])):s.range_end=[i]):a&&(s.range_start=[a],s.range_end=[a]),l&&(i&&!a&&s.disabled.push({after:eg(i,l-1),before:(0,eh.Z)(i,l-1)}),i&&a&&s.disabled.push({after:i,before:(0,eh.Z)(i,l-1)}),!i&&a&&s.disabled.push({after:eg(a,l-1),before:(0,eh.Z)(a,l-1)})),c){if(i&&!a&&(s.disabled.push({before:(0,eh.Z)(i,-c+1)}),s.disabled.push({after:(0,eh.Z)(i,c-1)})),i&&a){var u=c-(ti(a,i)+1);s.disabled.push({before:eg(i,u)}),s.disabled.push({after:(0,eh.Z)(a,u)})}!i&&a&&(s.disabled.push({before:(0,eh.Z)(a,-c+1)}),s.disabled.push({after:(0,eh.Z)(a,c-1)}))}return th.jsx(tV.Provider,{value:{selected:r,onDayClick:function(e,n,o){null===(c=t.onDayClick)||void 0===c||c.call(t,e,n,o);var i,a,l,c,s,u=(a=(i=r||{}).from,l=i.to,a&&l?tr(l,e)&&tr(a,e)?void 0:tr(l,e)?{from:l,to:void 0}:tr(a,e)?void 0:to(a,e)?{from:e,to:l}:{from:a,to:e}:l?to(e,l)?{from:l,to:e}:{from:e,to:l}:a?te(e,a)?{from:e,to:a}:{from:a,to:e}:{from:e,to:void 0});null===(s=t.onSelect)||void 0===s||s.call(t,u,e,n,o)},modifiers:s},children:n})}function t$(){var e=(0,d.useContext)(tV);if(!e)throw Error("useSelectRange must be used within a SelectRangeProvider");return e}function tY(e){return Array.isArray(e)?td([],e,!0):void 0!==e?[e]:[]}(l=s||(s={})).Outside="outside",l.Disabled="disabled",l.Selected="selected",l.Hidden="hidden",l.Today="today",l.RangeStart="range_start",l.RangeEnd="range_end",l.RangeMiddle="range_middle";var tQ=s.Selected,tJ=s.Disabled,t0=s.Hidden,t1=s.Today,t2=s.RangeEnd,t6=s.RangeMiddle,t3=s.RangeStart,t4=s.Outside,t5=(0,d.createContext)(void 0);function t8(e){var t,n,r,o=tk(),i=tU(),a=t$(),l=((t={})[tQ]=tY(o.selected),t[tJ]=tY(o.disabled),t[t0]=tY(o.hidden),t[t1]=[o.today],t[t2]=[],t[t6]=[],t[t3]=[],t[t4]=[],o.fromDate&&t[tJ].push({before:o.fromDate}),o.toDate&&t[tJ].push({after:o.toDate}),tm(o)?t[tJ]=t[tJ].concat(i.modifiers[tJ]):tg(o)&&(t[tJ]=t[tJ].concat(a.modifiers[tJ]),t[t3]=a.modifiers[t3],t[t6]=a.modifiers[t6],t[t2]=a.modifiers[t2]),t),c=(n=o.modifiers,r={},Object.entries(n).forEach(function(e){var t=e[0],n=e[1];r[t]=tY(n)}),r),s=tu(tu({},l),c);return th.jsx(t5.Provider,{value:s,children:e.children})}function t7(){var e=(0,d.useContext)(t5);if(!e)throw Error("useModifiers must be used within a ModifiersProvider");return e}function t9(e,t,n){var r=Object.keys(t).reduce(function(n,r){return t[r].some(function(t){if("boolean"==typeof t)return t;if(ex(t))return tr(e,t);if(Array.isArray(t)&&t.every(ex))return t.includes(e);if(t&&"object"==typeof t&&"from"in t)return r=t.from,o=t.to,r&&o?(0>ti(o,r)&&(r=(n=[o,r])[0],o=n[1]),ti(e,r)>=0&&ti(o,e)>=0):o?tr(o,e):!!r&&tr(r,e);if(t&&"object"==typeof t&&"dayOfWeek"in t)return t.dayOfWeek.includes(e.getDay());if(t&&"object"==typeof t&&"before"in t&&"after"in t){var n,r,o,i=ti(t.before,e),a=ti(t.after,e),l=i>0,c=a<0;return to(t.before,t.after)?c&&l:l||c}return t&&"object"==typeof t&&"after"in t?ti(e,t.after)>0:t&&"object"==typeof t&&"before"in t?ti(t.before,e)>0:"function"==typeof t&&t(e)})&&n.push(r),n},[]),o={};return r.forEach(function(e){return o[e]=!0}),n&&!e9(e,n)&&(o.outside=!0),o}var ne=(0,d.createContext)(void 0);function nt(e){var t=tI(),n=t7(),r=(0,d.useState)(),o=r[0],i=r[1],a=(0,d.useState)(),l=a[0],c=a[1],s=function(e,t){for(var n,r,o=ec(e[0]),i=e4(e[e.length-1]),a=o;a<=i;){var l=t9(a,t);if(!(!l.disabled&&!l.hidden)){a=(0,eh.Z)(a,1);continue}if(l.selected)return a;l.today&&!r&&(r=a),n||(n=a),a=(0,eh.Z)(a,1)}return r||n}(t.displayMonths,n),u=(null!=o?o:l&&t.isDateDisplayed(l))?l:s,f=function(e){i(e)},p=tk(),h=function(e,r){if(o){var i=function e(t,n){var r=n.moveBy,o=n.direction,i=n.context,a=n.modifiers,l=n.retry,c=void 0===l?{count:0,lastFocused:t}:l,s=i.weekStartsOn,u=i.fromDate,d=i.toDate,f=i.locale,p=({day:eh.Z,week:ta,month:ev.Z,year:tl,startOfWeek:function(e){return i.ISOWeek?tn(e):tt(e,{locale:f,weekStartsOn:s})},endOfWeek:function(e){return i.ISOWeek?ts(e):tc(e,{locale:f,weekStartsOn:s})}})[r](t,"after"===o?1:-1);"before"===o&&u?p=ef([u,p]):"after"===o&&d&&(p=ep([d,p]));var h=!0;if(a){var m=t9(p,a);h=!m.disabled&&!m.hidden}return h?p:c.count>365?c.lastFocused:e(p,{moveBy:r,direction:o,context:i,modifiers:a,retry:tu(tu({},c),{count:c.count+1})})}(o,{moveBy:e,direction:r,context:p,modifiers:n});tr(o,i)||(t.goToDate(i,o),f(i))}};return th.jsx(ne.Provider,{value:{focusedDay:o,focusTarget:u,blur:function(){c(o),i(void 0)},focus:f,focusDayAfter:function(){return h("day","after")},focusDayBefore:function(){return h("day","before")},focusWeekAfter:function(){return h("week","after")},focusWeekBefore:function(){return h("week","before")},focusMonthBefore:function(){return h("month","before")},focusMonthAfter:function(){return h("month","after")},focusYearBefore:function(){return h("year","before")},focusYearAfter:function(){return h("year","after")},focusStartOfWeek:function(){return h("startOfWeek","before")},focusEndOfWeek:function(){return h("endOfWeek","after")}},children:e.children})}function nn(){var e=(0,d.useContext)(ne);if(!e)throw Error("useFocusContext must be used within a FocusProvider");return e}var nr=(0,d.createContext)(void 0);function no(e){return tv(e.initialProps)?th.jsx(ni,{initialProps:e.initialProps,children:e.children}):th.jsx(nr.Provider,{value:{selected:void 0},children:e.children})}function ni(e){var t=e.initialProps,n=e.children,r={selected:t.selected,onDayClick:function(e,n,r){var o,i,a;if(null===(o=t.onDayClick)||void 0===o||o.call(t,e,n,r),n.selected&&!t.required){null===(i=t.onSelect)||void 0===i||i.call(t,void 0,e,n,r);return}null===(a=t.onSelect)||void 0===a||a.call(t,e,e,n,r)}};return th.jsx(nr.Provider,{value:r,children:n})}function na(){var e=(0,d.useContext)(nr);if(!e)throw Error("useSelectSingle must be used within a SelectSingleProvider");return e}function nl(e){var t,n,r,o,i,a,l,c,u,f,p,h,m,g,v,y,b,x,w,S,k,E,C,O,j,P,M,N,I,R,T,A,_,D,Z,L,z,B,F,H,q,W,K=(0,d.useRef)(null),U=(t=e.date,n=e.displayMonth,a=tk(),l=nn(),c=t9(t,t7(),n),u=tk(),f=na(),p=tU(),h=t$(),g=(m=nn()).focusDayAfter,v=m.focusDayBefore,y=m.focusWeekAfter,b=m.focusWeekBefore,x=m.blur,w=m.focus,S=m.focusMonthBefore,k=m.focusMonthAfter,E=m.focusYearBefore,C=m.focusYearAfter,O=m.focusStartOfWeek,j=m.focusEndOfWeek,P={onClick:function(e){var n,r,o,i;tv(u)?null===(n=f.onDayClick)||void 0===n||n.call(f,t,c,e):tm(u)?null===(r=p.onDayClick)||void 0===r||r.call(p,t,c,e):tg(u)?null===(o=h.onDayClick)||void 0===o||o.call(h,t,c,e):null===(i=u.onDayClick)||void 0===i||i.call(u,t,c,e)},onFocus:function(e){var n;w(t),null===(n=u.onDayFocus)||void 0===n||n.call(u,t,c,e)},onBlur:function(e){var n;x(),null===(n=u.onDayBlur)||void 0===n||n.call(u,t,c,e)},onKeyDown:function(e){var n;switch(e.key){case"ArrowLeft":e.preventDefault(),e.stopPropagation(),"rtl"===u.dir?g():v();break;case"ArrowRight":e.preventDefault(),e.stopPropagation(),"rtl"===u.dir?v():g();break;case"ArrowDown":e.preventDefault(),e.stopPropagation(),y();break;case"ArrowUp":e.preventDefault(),e.stopPropagation(),b();break;case"PageUp":e.preventDefault(),e.stopPropagation(),e.shiftKey?E():S();break;case"PageDown":e.preventDefault(),e.stopPropagation(),e.shiftKey?C():k();break;case"Home":e.preventDefault(),e.stopPropagation(),O();break;case"End":e.preventDefault(),e.stopPropagation(),j()}null===(n=u.onDayKeyDown)||void 0===n||n.call(u,t,c,e)},onKeyUp:function(e){var n;null===(n=u.onDayKeyUp)||void 0===n||n.call(u,t,c,e)},onMouseEnter:function(e){var n;null===(n=u.onDayMouseEnter)||void 0===n||n.call(u,t,c,e)},onMouseLeave:function(e){var n;null===(n=u.onDayMouseLeave)||void 0===n||n.call(u,t,c,e)},onPointerEnter:function(e){var n;null===(n=u.onDayPointerEnter)||void 0===n||n.call(u,t,c,e)},onPointerLeave:function(e){var n;null===(n=u.onDayPointerLeave)||void 0===n||n.call(u,t,c,e)},onTouchCancel:function(e){var n;null===(n=u.onDayTouchCancel)||void 0===n||n.call(u,t,c,e)},onTouchEnd:function(e){var n;null===(n=u.onDayTouchEnd)||void 0===n||n.call(u,t,c,e)},onTouchMove:function(e){var n;null===(n=u.onDayTouchMove)||void 0===n||n.call(u,t,c,e)},onTouchStart:function(e){var n;null===(n=u.onDayTouchStart)||void 0===n||n.call(u,t,c,e)}},M=tk(),N=na(),I=tU(),R=t$(),T=tv(M)?N.selected:tm(M)?I.selected:tg(M)?R.selected:void 0,A=!!(a.onDayClick||"default"!==a.mode),(0,d.useEffect)(function(){var e;!c.outside&&l.focusedDay&&A&&tr(l.focusedDay,t)&&(null===(e=K.current)||void 0===e||e.focus())},[l.focusedDay,t,K,A,c.outside]),D=(_=[a.classNames.day],Object.keys(c).forEach(function(e){var t=a.modifiersClassNames[e];if(t)_.push(t);else if(Object.values(s).includes(e)){var n=a.classNames["day_".concat(e)];n&&_.push(n)}}),_).join(" "),Z=tu({},a.styles.day),Object.keys(c).forEach(function(e){var t;Z=tu(tu({},Z),null===(t=a.modifiersStyles)||void 0===t?void 0:t[e])}),L=Z,z=!!(c.outside&&!a.showOutsideDays||c.hidden),B=null!==(i=null===(o=a.components)||void 0===o?void 0:o.DayContent)&&void 0!==i?i:tH,F={style:L,className:D,children:th.jsx(B,{date:t,displayMonth:n,activeModifiers:c}),role:"gridcell"},H=l.focusTarget&&tr(l.focusTarget,t)&&!c.outside,q=l.focusedDay&&tr(l.focusedDay,t),W=tu(tu(tu({},F),((r={disabled:c.disabled,role:"gridcell"})["aria-selected"]=c.selected,r.tabIndex=q||H?0:-1,r)),P),{isButton:A,isHidden:z,activeModifiers:c,selectedDays:T,buttonProps:W,divProps:F});return U.isHidden?th.jsx("div",{role:"gridcell"}):U.isButton?th.jsx(t_,tu({name:"day",ref:K},U.buttonProps)):th.jsx("div",tu({},U.divProps))}function nc(e){var t=e.number,n=e.dates,r=tk(),o=r.onWeekNumberClick,i=r.styles,a=r.classNames,l=r.locale,c=r.labels.labelWeekNumber,s=(0,r.formatters.formatWeekNumber)(Number(t),{locale:l});if(!o)return th.jsx("span",{className:a.weeknumber,style:i.weeknumber,children:s});var u=c(Number(t),{locale:l});return th.jsx(t_,{name:"week-number","aria-label":u,className:a.weeknumber,style:i.weeknumber,onClick:function(e){o(t,n,e)},children:s})}function ns(e){var t,n,r,o=tk(),i=o.styles,a=o.classNames,l=o.showWeekNumber,c=o.components,s=null!==(t=null==c?void 0:c.Day)&&void 0!==t?t:nl,u=null!==(n=null==c?void 0:c.WeekNumber)&&void 0!==n?n:nc;return l&&(r=th.jsx("td",{className:a.cell,style:i.cell,children:th.jsx(u,{number:e.weekNumber,dates:e.dates})})),th.jsxs("tr",{className:a.row,style:i.row,children:[r,e.dates.map(function(t){return th.jsx("td",{className:a.cell,style:i.cell,role:"presentation",children:th.jsx(s,{displayMonth:e.displayMonth,date:t})},function(e){return(0,ei.Z)(1,arguments),Math.floor(function(e){return(0,ei.Z)(1,arguments),(0,eo.Z)(e).getTime()}(e)/1e3)}(t))})]})}function nu(e,t,n){for(var r=(null==n?void 0:n.ISOWeek)?ts(t):tc(t,n),o=(null==n?void 0:n.ISOWeek)?tn(e):tt(e,n),i=ti(r,o),a=[],l=0;l<=i;l++)a.push((0,eh.Z)(o,l));return a.reduce(function(e,t){var r=(null==n?void 0:n.ISOWeek)?function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return Math.round((tn(t).getTime()-(function(e){(0,ei.Z)(1,arguments);var t=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getFullYear(),r=new Date(0);r.setFullYear(n+1,0,4),r.setHours(0,0,0,0);var o=tn(r),i=new Date(0);i.setFullYear(n,0,4),i.setHours(0,0,0,0);var a=tn(i);return t.getTime()>=o.getTime()?n+1:t.getTime()>=a.getTime()?n:n-1}(e),n=new Date(0);return n.setFullYear(t,0,4),n.setHours(0,0,0,0),tn(n)})(t).getTime())/6048e5)+1}(t):function(e,t){(0,ei.Z)(1,arguments);var n=(0,eo.Z)(e);return Math.round((tt(n,t).getTime()-(function(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.firstWeekContainsDate)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.firstWeekContainsDate)&&void 0!==o?o:ek.firstWeekContainsDate)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.firstWeekContainsDate)&&void 0!==n?n:1),d=function(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,eo.Z)(e),d=u.getFullYear(),f=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.firstWeekContainsDate)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.firstWeekContainsDate)&&void 0!==o?o:ek.firstWeekContainsDate)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.firstWeekContainsDate)&&void 0!==n?n:1);if(!(f>=1&&f<=7))throw RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var p=new Date(0);p.setFullYear(d+1,0,f),p.setHours(0,0,0,0);var h=tt(p,t),m=new Date(0);m.setFullYear(d,0,f),m.setHours(0,0,0,0);var g=tt(m,t);return u.getTime()>=h.getTime()?d+1:u.getTime()>=g.getTime()?d:d-1}(e,t),f=new Date(0);return f.setFullYear(d,0,u),f.setHours(0,0,0,0),tt(f,t)})(n,t).getTime())/6048e5)+1}(t,n),o=e.find(function(e){return e.weekNumber===r});return o?o.dates.push(t):e.push({weekNumber:r,dates:[t]}),e},[])}function nd(e){var t,n,r,o=tk(),i=o.locale,a=o.classNames,l=o.styles,c=o.hideHead,s=o.fixedWeeks,u=o.components,d=o.weekStartsOn,f=o.firstWeekContainsDate,p=o.ISOWeek,h=function(e,t){var n=nu(ec(e),e4(e),t);if(null==t?void 0:t.useFixedWeeks){var r=function(e,t){return(0,ei.Z)(1,arguments),function(e,t,n){(0,ei.Z)(2,arguments);var r=tt(e,n),o=tt(t,n);return Math.round((r.getTime()-eD(r)-(o.getTime()-eD(o)))/6048e5)}(function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(0,0,0,0),t}(e),ec(e),t)+1}(e,t);if(r<6){var o=n[n.length-1],i=o.dates[o.dates.length-1],a=ta(i,6-r),l=nu(ta(i,1),a,t);n.push.apply(n,l)}}return n}(e.displayMonth,{useFixedWeeks:!!s,ISOWeek:p,locale:i,weekStartsOn:d,firstWeekContainsDate:f}),m=null!==(t=null==u?void 0:u.Head)&&void 0!==t?t:tF,g=null!==(n=null==u?void 0:u.Row)&&void 0!==n?n:ns,v=null!==(r=null==u?void 0:u.Footer)&&void 0!==r?r:tz;return th.jsxs("table",{id:e.id,className:a.table,style:l.table,role:"grid","aria-labelledby":e["aria-labelledby"],children:[!c&&th.jsx(m,{}),th.jsx("tbody",{className:a.tbody,style:l.tbody,children:h.map(function(t){return th.jsx(g,{displayMonth:e.displayMonth,dates:t.dates,weekNumber:t.weekNumber},t.weekNumber)})}),th.jsx(v,{displayMonth:e.displayMonth})]})}var nf="undefined"!=typeof window&&window.document&&window.document.createElement?d.useLayoutEffect:d.useEffect,np=!1,nh=0;function nm(){return"react-day-picker-".concat(++nh)}function ng(e){var t,n,r,o,i,a,l,c,s=tk(),u=s.dir,f=s.classNames,p=s.styles,h=s.components,m=tI().displayMonths,g=(r=null!=(t=s.id?"".concat(s.id,"-").concat(e.displayIndex):void 0)?t:np?nm():null,i=(o=(0,d.useState)(r))[0],a=o[1],nf(function(){null===i&&a(nm())},[]),(0,d.useEffect)(function(){!1===np&&(np=!0)},[]),null!==(n=null!=t?t:i)&&void 0!==n?n:void 0),v=s.id?"".concat(s.id,"-grid-").concat(e.displayIndex):void 0,y=[f.month],b=p.month,x=0===e.displayIndex,w=e.displayIndex===m.length-1,S=!x&&!w;"rtl"===u&&(w=(l=[x,w])[0],x=l[1]),x&&(y.push(f.caption_start),b=tu(tu({},b),p.caption_start)),w&&(y.push(f.caption_end),b=tu(tu({},b),p.caption_end)),S&&(y.push(f.caption_between),b=tu(tu({},b),p.caption_between));var k=null!==(c=null==h?void 0:h.Caption)&&void 0!==c?c:tL;return th.jsxs("div",{className:y.join(" "),style:b,children:[th.jsx(k,{id:g,displayMonth:e.displayMonth,displayIndex:e.displayIndex}),th.jsx(nd,{id:v,"aria-labelledby":g,displayMonth:e.displayMonth})]},e.displayIndex)}function nv(e){var t=tk(),n=t.classNames,r=t.styles;return th.jsx("div",{className:n.months,style:r.months,children:e.children})}function ny(e){var t,n,r=e.initialProps,o=tk(),i=nn(),a=tI(),l=(0,d.useState)(!1),c=l[0],s=l[1];(0,d.useEffect)(function(){o.initialFocus&&i.focusTarget&&(c||(i.focus(i.focusTarget),s(!0)))},[o.initialFocus,c,i.focus,i.focusTarget,i]);var u=[o.classNames.root,o.className];o.numberOfMonths>1&&u.push(o.classNames.multiple_months),o.showWeekNumber&&u.push(o.classNames.with_weeknumber);var f=tu(tu({},o.styles.root),o.style),p=Object.keys(r).filter(function(e){return e.startsWith("data-")}).reduce(function(e,t){var n;return tu(tu({},e),((n={})[t]=r[t],n))},{}),h=null!==(n=null===(t=r.components)||void 0===t?void 0:t.Months)&&void 0!==n?n:nv;return th.jsx("div",tu({className:u.join(" "),style:f,dir:o.dir,id:o.id,nonce:r.nonce,title:r.title,lang:r.lang},p,{children:th.jsx(h,{children:a.displayMonths.map(function(e,t){return th.jsx(ng,{displayIndex:t,displayMonth:e},t)})})}))}function nb(e){var t=e.children,n=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n}(e,["children"]);return th.jsx(tS,{initialProps:n,children:th.jsx(tN,{children:th.jsx(no,{initialProps:n,children:th.jsx(tW,{initialProps:n,children:th.jsx(tG,{initialProps:n,children:th.jsx(t8,{children:th.jsx(nt,{children:t})})})})})})})}function nx(e){return th.jsx(nb,tu({},e,{children:th.jsx(ny,{initialProps:e})}))}let nw=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M10.8284 12.0007L15.7782 16.9504L14.364 18.3646L8 12.0007L14.364 5.63672L15.7782 7.05093L10.8284 12.0007Z"}))},nS=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M13.1717 12.0007L8.22192 7.05093L9.63614 5.63672L16.0001 12.0007L9.63614 18.3646L8.22192 16.9504L13.1717 12.0007Z"}))},nk=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M4.83582 12L11.0429 18.2071L12.4571 16.7929L7.66424 12L12.4571 7.20712L11.0429 5.79291L4.83582 12ZM10.4857 12L16.6928 18.2071L18.107 16.7929L13.3141 12L18.107 7.20712L16.6928 5.79291L10.4857 12Z"}))},nE=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M19.1642 12L12.9571 5.79291L11.5429 7.20712L16.3358 12L11.5429 16.7929L12.9571 18.2071L19.1642 12ZM13.5143 12L7.30722 5.79291L5.89301 7.20712L10.6859 12L5.89301 16.7929L7.30722 18.2071L13.5143 12Z"}))};var nC=n(84264);n(41649);var nO=n(1526),nj=n(7084),nP=n(26898);let nM={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-1",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-1.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-1.5",fontSize:"text-lg"},xl:{paddingX:"px-3.5",paddingY:"py-1.5",fontSize:"text-xl"}},nN={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},nI={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},nR={[nj.wu.Increase]:{bgColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.text).textColor},[nj.wu.ModerateIncrease]:{bgColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.text).textColor},[nj.wu.Decrease]:{bgColor:(0,eJ.bM)(nj.fr.Rose,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Rose,nP.K.text).textColor},[nj.wu.ModerateDecrease]:{bgColor:(0,eJ.bM)(nj.fr.Rose,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Rose,nP.K.text).textColor},[nj.wu.Unchanged]:{bgColor:(0,eJ.bM)(nj.fr.Orange,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Orange,nP.K.text).textColor}},nT={[nj.wu.Increase]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M13.0001 7.82843V20H11.0001V7.82843L5.63614 13.1924L4.22192 11.7782L12.0001 4L19.7783 11.7782L18.3641 13.1924L13.0001 7.82843Z"}))},[nj.wu.ModerateIncrease]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M16.0037 9.41421L7.39712 18.0208L5.98291 16.6066L14.5895 8H7.00373V6H18.0037V17H16.0037V9.41421Z"}))},[nj.wu.Decrease]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M13.0001 16.1716L18.3641 10.8076L19.7783 12.2218L12.0001 20L4.22192 12.2218L5.63614 10.8076L11.0001 16.1716V4H13.0001V16.1716Z"}))},[nj.wu.ModerateDecrease]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M14.5895 16.0032L5.98291 7.39664L7.39712 5.98242L16.0037 14.589V7.00324H18.0037V18.0032H7.00373V16.0032H14.5895Z"}))},[nj.wu.Unchanged]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M16.1716 10.9999L10.8076 5.63589L12.2218 4.22168L20 11.9999L12.2218 19.778L10.8076 18.3638L16.1716 12.9999H4V10.9999H16.1716Z"}))}},nA=(0,eJ.fn)("BadgeDelta");d.forwardRef((e,t)=>{let{deltaType:n=nj.wu.Increase,isIncreasePositive:r=!0,size:o=nj.u8.SM,tooltip:i,children:a,className:l}=e,c=(0,u._T)(e,["deltaType","isIncreasePositive","size","tooltip","children","className"]),s=nT[n],f=(0,eJ.Fo)(n,r),p=a?nN:nM,{tooltipProps:h,getReferenceProps:m}=(0,nO.l)();return d.createElement("span",Object.assign({ref:(0,eJ.lq)([t,h.refs.setReference]),className:(0,es.q)(nA("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full bg-opacity-20 dark:bg-opacity-25",nR[f].bgColor,nR[f].textColor,p[o].paddingX,p[o].paddingY,p[o].fontSize,l)},m,c),d.createElement(nO.Z,Object.assign({text:i},h)),d.createElement(s,{className:(0,es.q)(nA("icon"),"shrink-0",a?(0,es.q)("-ml-1 mr-1.5"):nI[o].height,nI[o].width)}),a?d.createElement("p",{className:(0,es.q)(nA("text"),"text-sm whitespace-nowrap")},a):null)}).displayName="BadgeDelta";var n_=n(47323);let nD=e=>{var{onClick:t,icon:n}=e,r=(0,u._T)(e,["onClick","icon"]);return d.createElement("button",Object.assign({type:"button",className:(0,es.q)("flex items-center justify-center p-1 h-7 w-7 outline-none focus:ring-2 transition duration-100 border border-tremor-border dark:border-dark-tremor-border hover:bg-tremor-background-muted dark:hover:bg-dark-tremor-background-muted rounded-tremor-small focus:border-tremor-brand-subtle select-none dark:focus:border-dark-tremor-brand-subtle focus:ring-tremor-brand-muted dark:focus:ring-dark-tremor-brand-muted text-tremor-content-subtle dark:text-dark-tremor-content-subtle hover:text-tremor-content dark:hover:text-dark-tremor-content")},r),d.createElement(n_.Z,{onClick:t,icon:n,variant:"simple",color:"slate",size:"sm"}))};function nZ(e){var{mode:t,defaultMonth:n,selected:r,onSelect:o,locale:i,disabled:a,enableYearNavigation:l,classNames:c,weekStartsOn:s=0}=e,f=(0,u._T)(e,["mode","defaultMonth","selected","onSelect","locale","disabled","enableYearNavigation","classNames","weekStartsOn"]);return d.createElement(nx,Object.assign({showOutsideDays:!0,mode:t,defaultMonth:n,selected:r,onSelect:o,locale:i,disabled:a,weekStartsOn:s,classNames:Object.assign({months:"flex flex-col sm:flex-row space-y-4 sm:space-x-4 sm:space-y-0",month:"space-y-4",caption:"flex justify-center pt-2 relative items-center",caption_label:"text-tremor-default text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis font-medium",nav:"space-x-1 flex items-center",nav_button:"flex items-center justify-center p-1 h-7 w-7 outline-none focus:ring-2 transition duration-100 border border-tremor-border dark:border-dark-tremor-border hover:bg-tremor-background-muted dark:hover:bg-dark-tremor-background-muted rounded-tremor-small focus:border-tremor-brand-subtle dark:focus:border-dark-tremor-brand-subtle focus:ring-tremor-brand-muted dark:focus:ring-dark-tremor-brand-muted text-tremor-content-subtle dark:text-dark-tremor-content-subtle hover:text-tremor-content dark:hover:text-dark-tremor-content",nav_button_previous:"absolute left-1",nav_button_next:"absolute right-1",table:"w-full border-collapse space-y-1",head_row:"flex",head_cell:"w-9 font-normal text-center text-tremor-content-subtle dark:text-dark-tremor-content-subtle",row:"flex w-full mt-0.5",cell:"text-center p-0 relative focus-within:relative text-tremor-default text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis",day:"h-9 w-9 p-0 hover:bg-tremor-background-subtle dark:hover:bg-dark-tremor-background-subtle outline-tremor-brand dark:outline-dark-tremor-brand rounded-tremor-default",day_today:"font-bold",day_selected:"aria-selected:bg-tremor-background-emphasis aria-selected:text-tremor-content-inverted dark:aria-selected:bg-dark-tremor-background-emphasis dark:aria-selected:text-dark-tremor-content-inverted ",day_disabled:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle disabled:hover:bg-transparent",day_outside:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle"},c),components:{IconLeft:e=>{var t=(0,u._T)(e,[]);return d.createElement(nw,Object.assign({className:"h-4 w-4"},t))},IconRight:e=>{var t=(0,u._T)(e,[]);return d.createElement(nS,Object.assign({className:"h-4 w-4"},t))},Caption:e=>{var t=(0,u._T)(e,[]);let{goToMonth:n,nextMonth:r,previousMonth:o,currentMonth:a}=tI();return d.createElement("div",{className:"flex justify-between items-center"},d.createElement("div",{className:"flex items-center space-x-1"},l&&d.createElement(nD,{onClick:()=>a&&n(tl(a,-1)),icon:nk}),d.createElement(nD,{onClick:()=>o&&n(o),icon:nw})),d.createElement(nC.Z,{className:"text-tremor-default tabular-nums capitalize text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis font-medium"},eQ(t.displayMonth,"LLLL yyy",{locale:i})),d.createElement("div",{className:"flex items-center space-x-1"},d.createElement(nD,{onClick:()=>r&&n(r),icon:nS}),l&&d.createElement(nD,{onClick:()=>a&&n(tl(a,1)),icon:nE})))}}},f))}nZ.displayName="DateRangePicker",n(27281);var nL=n(57365),nz=n(44140);let nB=el(),nF=d.forwardRef((e,t)=>{var n,r;let{value:o,defaultValue:i,onValueChange:a,enableSelect:l=!0,minDate:c,maxDate:s,placeholder:f="Select range",selectPlaceholder:p="Select range",disabled:h=!1,locale:m=eU,enableClear:g=!0,displayFormat:v,children:y,className:b,enableYearNavigation:x=!1,weekStartsOn:w=0,disabledDates:S}=e,k=(0,u._T)(e,["value","defaultValue","onValueChange","enableSelect","minDate","maxDate","placeholder","selectPlaceholder","disabled","locale","enableClear","displayFormat","children","className","enableYearNavigation","weekStartsOn","disabledDates"]),[E,C]=(0,nz.Z)(i,o),[O,j]=(0,d.useState)(!1),[P,M]=(0,d.useState)(!1),N=(0,d.useMemo)(()=>{let e=[];return c&&e.push({before:c}),s&&e.push({after:s}),[...e,...null!=S?S:[]]},[c,s,S]),I=(0,d.useMemo)(()=>{let e=new Map;return y?d.Children.forEach(y,t=>{var n;e.set(t.props.value,{text:null!==(n=(0,eu.qg)(t))&&void 0!==n?n:t.props.value,from:t.props.from,to:t.props.to})}):e6.forEach(t=>{e.set(t.value,{text:t.text,from:t.from,to:nB})}),e},[y]),R=(0,d.useMemo)(()=>{if(y)return(0,eu.sl)(y);let e=new Map;return e6.forEach(t=>e.set(t.value,t.text)),e},[y]),T=(null==E?void 0:E.selectValue)||"",A=e1(null==E?void 0:E.from,c,T,I),_=e2(null==E?void 0:E.to,s,T,I),D=A||_?e3(A,_,m,v):f,Z=ec(null!==(r=null!==(n=null!=_?_:A)&&void 0!==n?n:s)&&void 0!==r?r:nB),L=g&&!h;return d.createElement("div",Object.assign({ref:t,className:(0,es.q)("w-full min-w-[10rem] relative flex justify-between text-tremor-default max-w-sm shadow-tremor-input dark:shadow-dark-tremor-input rounded-tremor-default",b)},k),d.createElement(J,{as:"div",className:(0,es.q)("w-full",l?"rounded-l-tremor-default":"rounded-tremor-default",O&&"ring-2 ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted z-10")},d.createElement("div",{className:"relative w-full"},d.createElement(J.Button,{onFocus:()=>j(!0),onBlur:()=>j(!1),disabled:h,className:(0,es.q)("w-full outline-none text-left whitespace-nowrap truncate focus:ring-2 transition duration-100 rounded-l-tremor-default flex flex-nowrap border pl-3 py-2","rounded-l-tremor-default border-tremor-border text-tremor-content-emphasis focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:text-dark-tremor-content-emphasis dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",l?"rounded-l-tremor-default":"rounded-tremor-default",L?"pr-8":"pr-4",(0,eu.um)((0,eu.Uh)(A||_),h))},d.createElement(en,{className:(0,es.q)(e0("calendarIcon"),"flex-none shrink-0 h-5 w-5 -ml-0.5 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle"),"aria-hidden":"true"}),d.createElement("p",{className:"truncate"},D)),L&&A?d.createElement("button",{type:"button",className:(0,es.q)("absolute outline-none inset-y-0 right-0 flex items-center transition duration-100 mr-4"),onClick:e=>{e.preventDefault(),null==a||a({}),C({})}},d.createElement(er.Z,{className:(0,es.q)(e0("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null),d.createElement(ee.u,{className:"absolute z-10 min-w-min left-0",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},d.createElement(J.Panel,{focus:!0,className:(0,es.q)("divide-y overflow-y-auto outline-none rounded-tremor-default p-3 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},d.createElement(nZ,Object.assign({mode:"range",showOutsideDays:!0,defaultMonth:Z,selected:{from:A,to:_},onSelect:e=>{null==a||a({from:null==e?void 0:e.from,to:null==e?void 0:e.to}),C({from:null==e?void 0:e.from,to:null==e?void 0:e.to})},locale:m,disabled:N,enableYearNavigation:x,classNames:{day_range_middle:(0,es.q)("!rounded-none aria-selected:!bg-tremor-background-subtle aria-selected:dark:!bg-dark-tremor-background-subtle aria-selected:!text-tremor-content aria-selected:dark:!bg-dark-tremor-background-subtle"),day_range_start:"rounded-r-none rounded-l-tremor-small aria-selected:text-tremor-brand-inverted dark:aria-selected:text-dark-tremor-brand-inverted",day_range_end:"rounded-l-none rounded-r-tremor-small aria-selected:text-tremor-brand-inverted dark:aria-selected:text-dark-tremor-brand-inverted"},weekStartsOn:w},e))))),l&&d.createElement(et.R,{as:"div",className:(0,es.q)("w-48 -ml-px rounded-r-tremor-default",P&&"ring-2 ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted z-10"),value:T,onChange:e=>{let{from:t,to:n}=I.get(e),r=null!=n?n:nB;null==a||a({from:t,to:r,selectValue:e}),C({from:t,to:r,selectValue:e})},disabled:h},e=>{var t;let{value:n}=e;return d.createElement(d.Fragment,null,d.createElement(et.R.Button,{onFocus:()=>M(!0),onBlur:()=>M(!1),className:(0,es.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-r-tremor-default transition duration-100 border px-4 py-2","border-tremor-border shadow-tremor-input text-tremor-content-emphasis focus:border-tremor-brand-subtle","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:text-dark-tremor-content-emphasis dark:focus:border-dark-tremor-brand-subtle",(0,eu.um)((0,eu.Uh)(n),h))},n&&null!==(t=R.get(n))&&void 0!==t?t:p),d.createElement(ee.u,{className:"absolute z-10 w-full inset-x-0 right-0",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},d.createElement(et.R.Options,{className:(0,es.q)("divide-y overflow-y-auto outline-none border my-1","shadow-tremor-dropdown bg-tremor-background border-tremor-border divide-tremor-border rounded-tremor-default","dark:shadow-dark-tremor-dropdown dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border")},null!=y?y:e6.map(e=>d.createElement(nL.Z,{key:e.value,value:e.value},e.text)))))}))});nF.displayName="DateRangePicker"},92414:function(e,t,n){"use strict";n.d(t,{Z:function(){return v}});var r=n(5853),o=n(2265);n(42698),n(64016),n(8710);var i=n(33232),a=n(44140),l=n(58747);let c=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M18.031 16.6168L22.3137 20.8995L20.8995 22.3137L16.6168 18.031C15.0769 19.263 13.124 20 11 20C6.032 20 2 15.968 2 11C2 6.032 6.032 2 11 2C15.968 2 20 6.032 20 11C20 13.124 19.263 15.0769 18.031 16.6168ZM16.0247 15.8748C17.2475 14.6146 18 12.8956 18 11C18 7.1325 14.8675 4 11 4C7.1325 4 4 7.1325 4 11C4 14.8675 7.1325 18 11 18C12.8956 18 14.6146 17.2475 15.8748 16.0247L16.0247 15.8748Z"}))};var s=n(4537),u=n(28517),d=n(33044);let f=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",width:"100%",height:"100%",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"},t),o.createElement("line",{x1:"18",y1:"6",x2:"6",y2:"18"}),o.createElement("line",{x1:"6",y1:"6",x2:"18",y2:"18"}))};var p=n(65954),h=n(1153),m=n(96398);let g=(0,h.fn)("MultiSelect"),v=o.forwardRef((e,t)=>{let{defaultValue:n,value:h,onValueChange:v,placeholder:y="Select...",placeholderSearch:b="Search",disabled:x=!1,icon:w,children:S,className:k}=e,E=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","placeholderSearch","disabled","icon","children","className"]),[C,O]=(0,a.Z)(n,h),{reactElementChildren:j,optionsAvailable:P}=(0,o.useMemo)(()=>{let e=o.Children.toArray(S).filter(o.isValidElement);return{reactElementChildren:e,optionsAvailable:(0,m.n0)("",e)}},[S]),[M,N]=(0,o.useState)(""),I=(null!=C?C:[]).length>0,R=(0,o.useMemo)(()=>M?(0,m.n0)(M,j):P,[M,j,P]),T=()=>{N("")};return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:C,value:C,onChange:e=>{null==v||v(e),O(e)},disabled:x,className:(0,p.q)("w-full min-w-[10rem] relative text-tremor-default",k)},E,{multiple:!0}),e=>{let{value:t}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,p.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-1.5","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",w?"pl-11 -ml-0.5":"pl-3",(0,m.um)(t.length>0,x))},w&&o.createElement("span",{className:(0,p.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(w,{className:(0,p.q)(g("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("div",{className:"h-6 flex items-center"},t.length>0?o.createElement("div",{className:"flex flex-nowrap overflow-x-scroll [&::-webkit-scrollbar]:hidden [scrollbar-width:none] gap-x-1 mr-5 -ml-1.5 relative"},P.filter(e=>t.includes(e.props.value)).map((e,n)=>{var r;return o.createElement("div",{key:n,className:(0,p.q)("max-w-[100px] lg:max-w-[200px] flex justify-center items-center pl-2 pr-1.5 py-1 font-medium","rounded-tremor-small","bg-tremor-background-muted dark:bg-dark-tremor-background-muted","bg-tremor-background-subtle dark:bg-dark-tremor-background-subtle","text-tremor-content-default dark:text-dark-tremor-content-default","text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis")},o.createElement("div",{className:"text-xs truncate "},null!==(r=e.props.children)&&void 0!==r?r:e.props.value),o.createElement("div",{onClick:n=>{n.preventDefault();let r=t.filter(t=>t!==e.props.value);null==v||v(r),O(r)}},o.createElement(f,{className:(0,p.q)(g("clearIconItem"),"cursor-pointer rounded-tremor-full w-3.5 h-3.5 ml-2","text-tremor-content-subtle hover:text-tremor-content","dark:text-dark-tremor-content-subtle dark:hover:text-tremor-content")})))})):o.createElement("span",null,y)),o.createElement("span",{className:(0,p.q)("absolute inset-y-0 right-0 flex items-center mr-2.5")},o.createElement(l.Z,{className:(0,p.q)(g("arrowDownIcon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),I&&!x?o.createElement("button",{type:"button",className:(0,p.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),O([]),null==v||v([])}},o.createElement(s.Z,{className:(0,p.q)(g("clearIconAllItems"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,p.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},o.createElement("div",{className:(0,p.q)("flex items-center w-full px-2.5","bg-tremor-background-muted","dark:bg-dark-tremor-background-muted")},o.createElement("span",null,o.createElement(c,{className:(0,p.q)("flex-none w-4 h-4 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("input",{name:"search",type:"input",autoComplete:"off",placeholder:b,className:(0,p.q)("w-full focus:outline-none focus:ring-none bg-transparent text-tremor-default py-2","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis"),onKeyDown:e=>{"Space"===e.code&&""!==e.target.value&&e.stopPropagation()},onChange:e=>N(e.target.value),value:M})),o.createElement(i.Z.Provider,Object.assign({},{onBlur:{handleResetSearch:T}},{value:{selectedValue:t}}),R))))})});v.displayName="MultiSelect"},46030:function(e,t,n){"use strict";n.d(t,{Z:function(){return u}});var r=n(5853);n(42698),n(64016),n(8710);var o=n(33232),i=n(2265),a=n(65954),l=n(1153),c=n(28517);let s=(0,l.fn)("MultiSelectItem"),u=i.forwardRef((e,t)=>{let{value:n,className:u,children:d}=e,f=(0,r._T)(e,["value","className","children"]),{selectedValue:p}=(0,i.useContext)(o.Z),h=(0,l.NZ)(n,p);return i.createElement(c.R.Option,Object.assign({className:(0,a.q)(s("root"),"flex justify-start items-center cursor-default text-tremor-default p-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",u),ref:t,key:n,value:n},f),i.createElement("input",{type:"checkbox",className:(0,a.q)(s("checkbox"),"flex-none focus:ring-none focus:outline-none cursor-pointer mr-2.5","accent-tremor-brand","dark:accent-dark-tremor-brand"),checked:h,readOnly:!0}),i.createElement("span",{className:"whitespace-nowrap truncate"},null!=d?d:n))});u.displayName="MultiSelectItem"},30150:function(e,t,n){"use strict";n.d(t,{Z:function(){return f}});var r=n(5853),o=n(2265);let i=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),o.createElement("path",{d:"M12 4v16m8-8H4"}))},a=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),o.createElement("path",{d:"M20 12H4"}))};var l=n(65954),c=n(1153),s=n(69262);let u="flex mx-auto text-tremor-content-subtle dark:text-dark-tremor-content-subtle",d="cursor-pointer hover:text-tremor-content dark:hover:text-dark-tremor-content",f=o.forwardRef((e,t)=>{let{onSubmit:n,enableStepper:f=!0,disabled:p,onValueChange:h,onChange:m}=e,g=(0,r._T)(e,["onSubmit","enableStepper","disabled","onValueChange","onChange"]),v=(0,o.useRef)(null),[y,b]=o.useState(!1),x=o.useCallback(()=>{b(!0)},[]),w=o.useCallback(()=>{b(!1)},[]),[S,k]=o.useState(!1),E=o.useCallback(()=>{k(!0)},[]),C=o.useCallback(()=>{k(!1)},[]);return o.createElement(s.Z,Object.assign({type:"number",ref:(0,c.lq)([v,t]),disabled:p,makeInputClassName:(0,c.fn)("NumberInput"),onKeyDown:e=>{var t;if("Enter"===e.key&&!e.ctrlKey&&!e.altKey&&!e.shiftKey){let e=null===(t=v.current)||void 0===t?void 0:t.value;null==n||n(parseFloat(null!=e?e:""))}"ArrowDown"===e.key&&x(),"ArrowUp"===e.key&&E()},onKeyUp:e=>{"ArrowDown"===e.key&&w(),"ArrowUp"===e.key&&C()},onChange:e=>{p||(null==h||h(parseFloat(e.target.value)),null==m||m(e))},stepper:f?o.createElement("div",{className:(0,l.q)("flex justify-center align-middle")},o.createElement("div",{tabIndex:-1,onClick:e=>e.preventDefault(),onMouseDown:e=>e.preventDefault(),onTouchStart:e=>{e.cancelable&&e.preventDefault()},onMouseUp:()=>{var e,t;p||(null===(e=v.current)||void 0===e||e.stepDown(),null===(t=v.current)||void 0===t||t.dispatchEvent(new Event("input",{bubbles:!0})))},className:(0,l.q)(!p&&d,u,"group py-[10px] px-2.5 border-l border-tremor-border dark:border-dark-tremor-border")},o.createElement(a,{"data-testid":"step-down",className:(y?"scale-95":"")+" h-4 w-4 duration-75 transition group-active:scale-95"})),o.createElement("div",{tabIndex:-1,onClick:e=>e.preventDefault(),onMouseDown:e=>e.preventDefault(),onTouchStart:e=>{e.cancelable&&e.preventDefault()},onMouseUp:()=>{var e,t;p||(null===(e=v.current)||void 0===e||e.stepUp(),null===(t=v.current)||void 0===t||t.dispatchEvent(new Event("input",{bubbles:!0})))},className:(0,l.q)(!p&&d,u,"group py-[10px] px-2.5 border-l border-tremor-border dark:border-dark-tremor-border")},o.createElement(i,{"data-testid":"step-up",className:(S?"scale-95":"")+" h-4 w-4 duration-75 transition group-active:scale-95"}))):null},g))});f.displayName="NumberInput"},27281:function(e,t,n){"use strict";n.d(t,{Z:function(){return h}});var r=n(5853),o=n(2265),i=n(58747),a=n(4537),l=n(65954),c=n(1153),s=n(96398),u=n(28517),d=n(33044),f=n(44140);let p=(0,c.fn)("Select"),h=o.forwardRef((e,t)=>{let{defaultValue:n,value:c,onValueChange:h,placeholder:m="Select...",disabled:g=!1,icon:v,enableClear:y=!0,children:b,className:x}=e,w=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","disabled","icon","enableClear","children","className"]),[S,k]=(0,f.Z)(n,c),E=(0,o.useMemo)(()=>{let e=o.Children.toArray(b).filter(o.isValidElement);return(0,s.sl)(e)},[b]);return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:S,value:S,onChange:e=>{null==h||h(e),k(e)},disabled:g,className:(0,l.q)("w-full min-w-[10rem] relative text-tremor-default",x)},w),e=>{var t;let{value:n}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,l.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-2","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",v?"pl-10":"pl-3",(0,s.um)((0,s.Uh)(n),g))},v&&o.createElement("span",{className:(0,l.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(v,{className:(0,l.q)(p("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("span",{className:"w-[90%] block truncate"},n&&null!==(t=E.get(n))&&void 0!==t?t:m),o.createElement("span",{className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-3")},o.createElement(i.Z,{className:(0,l.q)(p("arrowDownIcon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),y&&S?o.createElement("button",{type:"button",className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),k(""),null==h||h("")}},o.createElement(a.Z,{className:(0,l.q)(p("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,l.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},b)))})});h.displayName="Select"},57365:function(e,t,n){"use strict";n.d(t,{Z:function(){return c}});var r=n(5853),o=n(2265),i=n(28517),a=n(65954);let l=(0,n(1153).fn)("SelectItem"),c=o.forwardRef((e,t)=>{let{value:n,icon:c,className:s,children:u}=e,d=(0,r._T)(e,["value","icon","className","children"]);return o.createElement(i.R.Option,Object.assign({className:(0,a.q)(l("root"),"flex justify-start items-center cursor-default text-tremor-default px-2.5 py-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong ui-selected:bg-tremor-background-muted text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",s),ref:t,key:n,value:n},d),c&&o.createElement(c,{className:(0,a.q)(l("icon"),"flex-none w-5 h-5 mr-1.5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}),o.createElement("span",{className:"whitespace-nowrap truncate"},null!=u?u:n))});c.displayName="SelectItem"},92858:function(e,t,n){"use strict";n.d(t,{Z:function(){return N}});var r=n(5853),o=n(2265),i=n(62963),a=n(90945),l=n(13323),c=n(17684),s=n(80004),u=n(93689),d=n(38198),f=n(47634),p=n(56314),h=n(27847),m=n(64518);let g=(0,o.createContext)(null),v=Object.assign((0,h.yV)(function(e,t){let n=(0,c.M)(),{id:r="headlessui-description-".concat(n),...i}=e,a=function e(){let t=(0,o.useContext)(g);if(null===t){let t=Error("You used a component, but it is not inside a relevant parent.");throw Error.captureStackTrace&&Error.captureStackTrace(t,e),t}return t}(),l=(0,u.T)(t);(0,m.e)(()=>a.register(r),[r,a.register]);let s={ref:l,...a.props,id:r};return(0,h.sY)({ourProps:s,theirProps:i,slot:a.slot||{},defaultTag:"p",name:a.name||"Description"})}),{});var y=n(37388);let b=(0,o.createContext)(null),x=Object.assign((0,h.yV)(function(e,t){let n=(0,c.M)(),{id:r="headlessui-label-".concat(n),passive:i=!1,...a}=e,l=function e(){let t=(0,o.useContext)(b);if(null===t){let t=Error("You used a