diff --git a/.circleci/config.yml b/.circleci/config.yml
index 0a12aa73b8..feb425a38e 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -3,6 +3,18 @@ orbs:
codecov: codecov/codecov@4.0.1
node: circleci/node@5.1.0 # Add this line to declare the node orb
+commands:
+ setup_google_dns:
+ steps:
+ - run:
+ name: "Configure Google DNS"
+ command: |
+ # Backup original resolv.conf
+ sudo cp /etc/resolv.conf /etc/resolv.conf.backup
+ # Add both local and Google DNS servers
+ echo "nameserver 127.0.0.11" | sudo tee /etc/resolv.conf
+ echo "nameserver 8.8.8.8" | sudo tee -a /etc/resolv.conf
+ echo "nameserver 8.8.4.4" | sudo tee -a /etc/resolv.conf
jobs:
local_testing:
@@ -15,7 +27,7 @@ jobs:
steps:
- checkout
-
+ - setup_google_dns
- run:
name: Show git commit hash
command: |
@@ -49,7 +61,7 @@ jobs:
pip install opentelemetry-api==1.25.0
pip install opentelemetry-sdk==1.25.0
pip install opentelemetry-exporter-otlp==1.25.0
- pip install openai==1.66.1
+ pip install openai==1.68.2
pip install prisma==0.11.0
pip install "detect_secrets==1.5.0"
pip install "httpx==0.24.1"
@@ -66,7 +78,7 @@ jobs:
pip install python-multipart
pip install google-cloud-aiplatform
pip install prometheus-client==0.20.0
- pip install "pydantic==2.7.1"
+ pip install "pydantic==2.10.2"
pip install "diskcache==5.6.1"
pip install "Pillow==10.3.0"
pip install "jsonschema==4.22.0"
@@ -134,7 +146,7 @@ jobs:
steps:
- checkout
-
+ - setup_google_dns
- run:
name: Show git commit hash
command: |
@@ -168,7 +180,7 @@ jobs:
pip install opentelemetry-api==1.25.0
pip install opentelemetry-sdk==1.25.0
pip install opentelemetry-exporter-otlp==1.25.0
- pip install openai==1.66.1
+ pip install openai==1.68.2
pip install prisma==0.11.0
pip install "detect_secrets==1.5.0"
pip install "httpx==0.24.1"
@@ -185,7 +197,7 @@ jobs:
pip install python-multipart
pip install google-cloud-aiplatform
pip install prometheus-client==0.20.0
- pip install "pydantic==2.7.1"
+ pip install "pydantic==2.10.2"
pip install "diskcache==5.6.1"
pip install "Pillow==10.3.0"
pip install "jsonschema==4.22.0"
@@ -234,7 +246,13 @@ jobs:
steps:
- checkout
-
+ - setup_google_dns
+ - run:
+ name: DNS lookup for Redis host
+ command: |
+ sudo apt-get update
+ sudo apt-get install -y dnsutils
+ dig redis-19899.c239.us-east-1-2.ec2.redns.redis-cloud.com +short
- run:
name: Show git commit hash
command: |
@@ -268,7 +286,7 @@ jobs:
pip install opentelemetry-api==1.25.0
pip install opentelemetry-sdk==1.25.0
pip install opentelemetry-exporter-otlp==1.25.0
- pip install openai==1.66.1
+ pip install openai==1.68.2
pip install prisma==0.11.0
pip install "detect_secrets==1.5.0"
pip install "httpx==0.24.1"
@@ -285,7 +303,7 @@ jobs:
pip install python-multipart
pip install google-cloud-aiplatform
pip install prometheus-client==0.20.0
- pip install "pydantic==2.7.1"
+ pip install "pydantic==2.10.2"
pip install "diskcache==5.6.1"
pip install "Pillow==10.3.0"
pip install "jsonschema==4.22.0"
@@ -334,6 +352,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -388,6 +407,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -404,7 +424,7 @@ jobs:
command: |
pwd
ls
- python -m pytest tests/local_testing tests/router_unit_tests --cov=litellm --cov-report=xml -vv -k "router" -x -s -v --junitxml=test-results/junit.xml --durations=5
+ python -m pytest tests/local_testing tests/router_unit_tests --cov=litellm --cov-report=xml -vv -k "router" -x -v --junitxml=test-results/junit.xml --durations=5
no_output_timeout: 120m
- run:
name: Rename the coverage files
@@ -429,6 +449,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Show git commit hash
command: |
@@ -479,7 +500,13 @@ jobs:
working_directory: ~/project
steps:
- checkout
-
+ - run:
+ name: Install PostgreSQL
+ command: |
+ sudo apt-get update
+ sudo apt-get install postgresql postgresql-contrib
+ echo 'export PATH=/usr/lib/postgresql/*/bin:$PATH' >> $BASH_ENV
+ - setup_google_dns
- run:
name: Show git commit hash
command: |
@@ -513,7 +540,7 @@ jobs:
pip install opentelemetry-api==1.25.0
pip install opentelemetry-sdk==1.25.0
pip install opentelemetry-exporter-otlp==1.25.0
- pip install openai==1.66.1
+ pip install openai==1.68.2
pip install prisma==0.11.0
pip install "detect_secrets==1.5.0"
pip install "httpx==0.24.1"
@@ -530,10 +557,11 @@ jobs:
pip install python-multipart
pip install google-cloud-aiplatform
pip install prometheus-client==0.20.0
- pip install "pydantic==2.7.1"
+ pip install "pydantic==2.10.2"
pip install "diskcache==5.6.1"
pip install "Pillow==10.3.0"
pip install "jsonschema==4.22.0"
+ pip install "pytest-postgresql==7.0.1"
- save_cache:
paths:
- ./venv
@@ -569,7 +597,7 @@ jobs:
- litellm_proxy_unit_tests_coverage
litellm_assistants_api_testing: # Runs all tests with the "assistants" keyword
docker:
- - image: cimg/python:3.11
+ - image: cimg/python:3.13.1
auth:
username: ${DOCKERHUB_USERNAME}
password: ${DOCKERHUB_PASSWORD}
@@ -577,10 +605,13 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
python -m pip install --upgrade pip
+ pip install wheel
+ pip install --upgrade pip wheel setuptools
python -m pip install -r requirements.txt
pip install "pytest==7.3.1"
pip install "respx==0.21.1"
@@ -618,6 +649,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -625,7 +657,13 @@ jobs:
python -m pip install -r requirements.txt
pip install "pytest==7.3.1"
pip install "pytest-retry==1.6.3"
+ pip install "pytest-cov==5.0.0"
pip install "pytest-asyncio==0.21.1"
+ pip install "respx==0.21.1"
+ - run:
+ name: Show current pydantic version
+ command: |
+ python -m pip show pydantic
# Run pytest and generate JUnit XML report
- run:
name: Run tests
@@ -648,6 +686,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -664,7 +703,7 @@ jobs:
command: |
pwd
ls
- python -m pytest -vv tests/llm_translation --cov=litellm --cov-report=xml -x -s -v --junitxml=test-results/junit.xml --durations=5
+ python -m pytest -vv tests/llm_translation --cov=litellm --cov-report=xml -x -v --junitxml=test-results/junit.xml --durations=5
no_output_timeout: 120m
- run:
name: Rename the coverage files
@@ -680,6 +719,51 @@ jobs:
paths:
- llm_translation_coverage.xml
- llm_translation_coverage
+ mcp_testing:
+ docker:
+ - image: cimg/python:3.11
+ auth:
+ username: ${DOCKERHUB_USERNAME}
+ password: ${DOCKERHUB_PASSWORD}
+ working_directory: ~/project
+
+ steps:
+ - checkout
+ - setup_google_dns
+ - run:
+ name: Install Dependencies
+ command: |
+ python -m pip install --upgrade pip
+ python -m pip install -r requirements.txt
+ pip install "pytest==7.3.1"
+ pip install "pytest-retry==1.6.3"
+ pip install "pytest-cov==5.0.0"
+ pip install "pytest-asyncio==0.21.1"
+ pip install "respx==0.21.1"
+ pip install "pydantic==2.10.2"
+ pip install "mcp==1.5.0"
+ # Run pytest and generate JUnit XML report
+ - run:
+ name: Run tests
+ command: |
+ pwd
+ ls
+ python -m pytest -vv tests/mcp_tests --cov=litellm --cov-report=xml -x -s -v --junitxml=test-results/junit.xml --durations=5
+ no_output_timeout: 120m
+ - run:
+ name: Rename the coverage files
+ command: |
+ mv coverage.xml mcp_coverage.xml
+ mv .coverage mcp_coverage
+
+ # Store test results
+ - store_test_results:
+ path: test-results
+ - persist_to_workspace:
+ root: .
+ paths:
+ - mcp_coverage.xml
+ - mcp_coverage
llm_responses_api_testing:
docker:
- image: cimg/python:3.11
@@ -690,6 +774,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -732,6 +817,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -744,6 +830,8 @@ jobs:
pip install "pytest-asyncio==0.21.1"
pip install "respx==0.21.1"
pip install "hypercorn==0.17.3"
+ pip install "pydantic==2.10.2"
+ pip install "mcp==1.5.0"
# Run pytest and generate JUnit XML report
- run:
name: Run tests
@@ -776,6 +864,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -820,10 +909,12 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
python -m pip install --upgrade pip
+ pip install numpydoc
python -m pip install -r requirements.txt
pip install "respx==0.21.1"
pip install "pytest==7.3.1"
@@ -832,7 +923,6 @@ jobs:
pip install "pytest-cov==5.0.0"
pip install "google-generativeai==0.3.2"
pip install "google-cloud-aiplatform==1.43.0"
- pip install numpydoc
# Run pytest and generate JUnit XML report
- run:
name: Run tests
@@ -866,6 +956,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -908,6 +999,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -950,6 +1042,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -996,6 +1089,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -1008,8 +1102,8 @@ jobs:
pip install click
pip install "boto3==1.34.34"
pip install jinja2
- pip install tokenizers=="0.20.0"
- pip install uvloop==0.21.0
+ pip install "tokenizers==0.20.0"
+ pip install "uvloop==0.21.0"
pip install jsonschema
- run:
name: Run tests
@@ -1028,10 +1122,12 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
python -m pip install --upgrade pip
+ python -m pip install wheel setuptools
python -m pip install -r requirements.txt
pip install "pytest==7.3.1"
pip install "pytest-retry==1.6.3"
@@ -1052,6 +1148,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
# Install Helm
- run:
name: Install Helm
@@ -1121,6 +1218,7 @@ jobs:
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Dependencies
command: |
@@ -1157,6 +1255,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Python 3.9
command: |
@@ -1231,6 +1330,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Docker CLI (In case it's not already installed)
command: |
@@ -1278,7 +1378,7 @@ jobs:
pip install "aiodynamo==23.10.1"
pip install "asyncio==3.4.3"
pip install "PyGithub==1.59.1"
- pip install "openai==1.66.1"
+ pip install "openai==1.68.2"
- run:
name: Install Grype
command: |
@@ -1353,7 +1453,7 @@ jobs:
command: |
pwd
ls
- python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/llm_responses_api_testing --ignore=tests/image_gen_tests --ignore=tests/pass_through_unit_tests
+ python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/spend_tracking_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/llm_responses_api_testing --ignore=tests/mcp_tests --ignore=tests/image_gen_tests --ignore=tests/pass_through_unit_tests
no_output_timeout: 120m
# Store test results
@@ -1366,6 +1466,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Docker CLI (In case it's not already installed)
command: |
@@ -1402,6 +1503,7 @@ jobs:
pip install "boto3==1.34.34"
pip install "aioboto3==12.3.0"
pip install langchain
+ pip install "langchain_mcp_adapters==0.0.5"
pip install "langfuse>=2.0.0"
pip install "logfire==0.29.0"
pip install numpydoc
@@ -1414,7 +1516,7 @@ jobs:
pip install "aiodynamo==23.10.1"
pip install "asyncio==3.4.3"
pip install "PyGithub==1.59.1"
- pip install "openai==1.66.1"
+ pip install "openai==1.68.2"
# Run pytest and generate JUnit XML report
- run:
name: Build Docker image
@@ -1489,6 +1591,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Docker CLI (In case it's not already installed)
command: |
@@ -1536,7 +1639,7 @@ jobs:
pip install "aiodynamo==23.10.1"
pip install "asyncio==3.4.3"
pip install "PyGithub==1.59.1"
- pip install "openai==1.66.1"
+ pip install "openai==1.68.2"
- run:
name: Build Docker image
command: docker build -t my-app:latest -f ./docker/Dockerfile.database .
@@ -1643,6 +1746,96 @@ jobs:
# Store test results
- store_test_results:
path: test-results
+ proxy_spend_accuracy_tests:
+ machine:
+ image: ubuntu-2204:2023.10.1
+ resource_class: xlarge
+ working_directory: ~/project
+ steps:
+ - checkout
+ - setup_google_dns
+ - run:
+ name: Install Docker CLI (In case it's not already installed)
+ command: |
+ sudo apt-get update
+ sudo apt-get install -y docker-ce docker-ce-cli containerd.io
+ - run:
+ name: Install Python 3.9
+ command: |
+ curl https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh --output miniconda.sh
+ bash miniconda.sh -b -p $HOME/miniconda
+ export PATH="$HOME/miniconda/bin:$PATH"
+ conda init bash
+ source ~/.bashrc
+ conda create -n myenv python=3.9 -y
+ conda activate myenv
+ python --version
+ - run:
+ name: Install Dependencies
+ command: |
+ pip install "pytest==7.3.1"
+ pip install "pytest-asyncio==0.21.1"
+ pip install aiohttp
+ python -m pip install --upgrade pip
+ python -m pip install -r requirements.txt
+ - run:
+ name: Build Docker image
+ command: docker build -t my-app:latest -f ./docker/Dockerfile.database .
+ - run:
+ name: Run Docker container
+ # intentionally give bad redis credentials here
+ # the OTEL test - should get this as a trace
+ command: |
+ docker run -d \
+ -p 4000:4000 \
+ -e DATABASE_URL=$PROXY_DATABASE_URL \
+ -e REDIS_HOST=$REDIS_HOST \
+ -e REDIS_PASSWORD=$REDIS_PASSWORD \
+ -e REDIS_PORT=$REDIS_PORT \
+ -e LITELLM_MASTER_KEY="sk-1234" \
+ -e OPENAI_API_KEY=$OPENAI_API_KEY \
+ -e LITELLM_LICENSE=$LITELLM_LICENSE \
+ -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
+ -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
+ -e USE_DDTRACE=True \
+ -e DD_API_KEY=$DD_API_KEY \
+ -e DD_SITE=$DD_SITE \
+ -e AWS_REGION_NAME=$AWS_REGION_NAME \
+ --name my-app \
+ -v $(pwd)/litellm/proxy/example_config_yaml/spend_tracking_config.yaml:/app/config.yaml \
+ my-app:latest \
+ --config /app/config.yaml \
+ --port 4000 \
+ --detailed_debug \
+ - run:
+ name: Install curl and dockerize
+ command: |
+ sudo apt-get update
+ sudo apt-get install -y curl
+ sudo wget https://github.com/jwilder/dockerize/releases/download/v0.6.1/dockerize-linux-amd64-v0.6.1.tar.gz
+ sudo tar -C /usr/local/bin -xzvf dockerize-linux-amd64-v0.6.1.tar.gz
+ sudo rm dockerize-linux-amd64-v0.6.1.tar.gz
+ - run:
+ name: Start outputting logs
+ command: docker logs -f my-app
+ background: true
+ - run:
+ name: Wait for app to be ready
+ command: dockerize -wait http://localhost:4000 -timeout 5m
+ - run:
+ name: Run tests
+ command: |
+ pwd
+ ls
+ python -m pytest -vv tests/spend_tracking_tests -x --junitxml=test-results/junit.xml --durations=5
+ no_output_timeout:
+ 120m
+ # Clean up first container
+ - run:
+ name: Stop and remove first container
+ command: |
+ docker stop my-app
+ docker rm my-app
proxy_multi_instance_tests:
machine:
@@ -1651,6 +1844,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Docker CLI (In case it's not already installed)
command: |
@@ -1762,6 +1956,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Docker CLI (In case it's not already installed)
command: |
@@ -1801,7 +1996,7 @@ jobs:
command: |
docker run -d \
-p 4000:4000 \
- -e DATABASE_URL=$PROXY_DATABASE_URL \
+ -e DATABASE_URL=$CLEAN_STORE_MODEL_IN_DB_DATABASE_URL \
-e STORE_MODEL_IN_DB="True" \
-e LITELLM_MASTER_KEY="sk-1234" \
-e LITELLM_LICENSE=$LITELLM_LICENSE \
@@ -1844,6 +2039,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
# Remove Docker CLI installation since it's already available in machine executor
- run:
name: Install Python 3.13
@@ -1941,6 +2137,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Install Docker CLI (In case it's not already installed)
command: |
@@ -1965,10 +2162,10 @@ jobs:
pip install "pytest-asyncio==0.21.1"
pip install "google-cloud-aiplatform==1.43.0"
pip install aiohttp
- pip install "openai==1.66.1"
+ pip install "openai==1.68.2"
pip install "assemblyai==0.37.0"
python -m pip install --upgrade pip
- pip install "pydantic==2.7.1"
+ pip install "pydantic==2.10.2"
pip install "pytest==7.3.1"
pip install "pytest-mock==3.12.0"
pip install "pytest-asyncio==0.21.1"
@@ -1985,6 +2182,9 @@ jobs:
pip install "PyGithub==1.59.1"
pip install "google-cloud-aiplatform==1.59.0"
pip install "anthropic==0.49.0"
+ pip install "langchain_mcp_adapters==0.0.5"
+ pip install "langchain_openai==0.2.1"
+ pip install "langgraph==0.3.18"
# Run pytest and generate JUnit XML report
- run:
name: Build Docker image
@@ -2112,7 +2312,7 @@ jobs:
python -m venv venv
. venv/bin/activate
pip install coverage
- coverage combine llm_translation_coverage llm_responses_api_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage pass_through_unit_tests_coverage batches_coverage litellm_proxy_security_tests_coverage
+ coverage combine llm_translation_coverage llm_responses_api_coverage mcp_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage pass_through_unit_tests_coverage batches_coverage litellm_proxy_security_tests_coverage
coverage xml
- codecov/upload:
file: ./coverage.xml
@@ -2190,6 +2390,114 @@ jobs:
echo "triggering load testing server for version ${VERSION} and commit ${CIRCLE_SHA1}"
curl -X POST "https://proxyloadtester-production.up.railway.app/start/load/test?version=${VERSION}&commit_hash=${CIRCLE_SHA1}&release_type=nightly"
+ publish_proxy_extras:
+ docker:
+ - image: cimg/python:3.8
+ working_directory: ~/project/litellm-proxy-extras
+ environment:
+ TWINE_USERNAME: __token__
+
+ steps:
+ - checkout:
+ path: ~/project
+
+ - run:
+ name: Check if litellm-proxy-extras dir or pyproject.toml was modified
+ command: |
+ echo "Install TOML package."
+ python -m pip install toml
+ # Get current version from pyproject.toml
+ CURRENT_VERSION=$(python -c "import toml; print(toml.load('pyproject.toml')['tool']['poetry']['version'])")
+
+ # Get last published version from PyPI
+ LAST_VERSION=$(curl -s https://pypi.org/pypi/litellm-proxy-extras/json | python -c "import json, sys; print(json.load(sys.stdin)['info']['version'])")
+
+ echo "Current version: $CURRENT_VERSION"
+ echo "Last published version: $LAST_VERSION"
+
+ # Compare versions using Python's packaging.version
+ VERSION_COMPARE=$(python -c "from packaging import version; print(1 if version.parse('$CURRENT_VERSION') < version.parse('$LAST_VERSION') else 0)")
+
+ echo "Version compare: $VERSION_COMPARE"
+ if [ "$VERSION_COMPARE" = "1" ]; then
+ echo "Error: Current version ($CURRENT_VERSION) is less than last published version ($LAST_VERSION)"
+ exit 1
+ fi
+
+ # If versions are equal or current is greater, check contents
+ pip download --no-deps litellm-proxy-extras==$LAST_VERSION -d /tmp
+
+ echo "Contents of /tmp directory:"
+ ls -la /tmp
+
+ # Find the downloaded file (could be .whl or .tar.gz)
+ DOWNLOADED_FILE=$(ls /tmp/litellm_proxy_extras-*)
+ echo "Downloaded file: $DOWNLOADED_FILE"
+
+ # Extract based on file extension
+ if [[ "$DOWNLOADED_FILE" == *.whl ]]; then
+ echo "Extracting wheel file..."
+ unzip -q "$DOWNLOADED_FILE" -d /tmp/extracted
+ EXTRACTED_DIR="/tmp/extracted"
+ else
+ echo "Extracting tar.gz file..."
+ tar -xzf "$DOWNLOADED_FILE" -C /tmp
+ EXTRACTED_DIR="/tmp/litellm_proxy_extras-$LAST_VERSION"
+ fi
+
+ echo "Contents of extracted package:"
+ ls -R "$EXTRACTED_DIR"
+
+ # Compare contents
+ if ! diff -r "$EXTRACTED_DIR/litellm_proxy_extras" ./litellm_proxy_extras; then
+ if [ "$CURRENT_VERSION" = "$LAST_VERSION" ]; then
+ echo "Error: Changes detected in litellm-proxy-extras but version was not bumped"
+ echo "Current version: $CURRENT_VERSION"
+ echo "Last published version: $LAST_VERSION"
+ echo "Changes:"
+ diff -r "$EXTRACTED_DIR/litellm_proxy_extras" ./litellm_proxy_extras
+ exit 1
+ fi
+ else
+ echo "No changes detected in litellm-proxy-extras. Skipping PyPI publish."
+ circleci step halt
+ fi
+
+ - run:
+ name: Get new version
+ command: |
+ cd litellm-proxy-extras
+ NEW_VERSION=$(python -c "import toml; print(toml.load('pyproject.toml')['tool']['poetry']['version'])")
+ echo "export NEW_VERSION=$NEW_VERSION" >> $BASH_ENV
+
+ - run:
+ name: Check if versions match
+ command: |
+ cd ~/project
+ # Check pyproject.toml
+ CURRENT_VERSION=$(python -c "import toml; print(toml.load('pyproject.toml')['tool']['poetry']['dependencies']['litellm-proxy-extras'].split('\"')[1])")
+ if [ "$CURRENT_VERSION" != "$NEW_VERSION" ]; then
+ echo "Error: Version in pyproject.toml ($CURRENT_VERSION) doesn't match new version ($NEW_VERSION)"
+ exit 1
+ fi
+
+ # Check requirements.txt
+ REQ_VERSION=$(grep -oP 'litellm-proxy-extras==\K[0-9.]+' requirements.txt)
+ if [ "$REQ_VERSION" != "$NEW_VERSION" ]; then
+ echo "Error: Version in requirements.txt ($REQ_VERSION) doesn't match new version ($NEW_VERSION)"
+ exit 1
+ fi
+
+ - run:
+ name: Publish to PyPI
+ command: |
+ cd litellm-proxy-extras
+ echo -e "[pypi]\nusername = $PYPI_PUBLISH_USERNAME\npassword = $PYPI_PUBLISH_PASSWORD" > ~/.pypirc
+ python -m pip install --upgrade pip build twine setuptools wheel
+ rm -rf build dist
+ python -m build
+ twine upload --verbose dist/*
+
e2e_ui_testing:
machine:
image: ubuntu-2204:2023.10.1
@@ -2197,6 +2505,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Build UI
command: |
@@ -2241,9 +2550,9 @@ jobs:
pip install "pytest-retry==1.6.3"
pip install "pytest-asyncio==0.21.1"
pip install aiohttp
- pip install "openai==1.66.1"
+ pip install "openai==1.68.2"
python -m pip install --upgrade pip
- pip install "pydantic==2.7.1"
+ pip install "pydantic==2.10.2"
pip install "pytest==7.3.1"
pip install "pytest-mock==3.12.0"
pip install "pytest-asyncio==0.21.1"
@@ -2311,6 +2620,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Build Docker image
command: |
@@ -2333,6 +2643,7 @@ jobs:
working_directory: ~/project
steps:
- checkout
+ - setup_google_dns
- run:
name: Build Docker image
command: |
@@ -2443,6 +2754,12 @@ workflows:
only:
- main
- /litellm_.*/
+ - proxy_spend_accuracy_tests:
+ filters:
+ branches:
+ only:
+ - main
+ - /litellm_.*/
- proxy_multi_instance_tests:
filters:
branches:
@@ -2473,6 +2790,12 @@ workflows:
only:
- main
- /litellm_.*/
+ - mcp_testing:
+ filters:
+ branches:
+ only:
+ - main
+ - /litellm_.*/
- llm_responses_api_testing:
filters:
branches:
@@ -2518,6 +2841,7 @@ workflows:
- upload-coverage:
requires:
- llm_translation_testing
+ - mcp_testing
- llm_responses_api_testing
- litellm_mapped_tests
- batches_testing
@@ -2569,6 +2893,11 @@ workflows:
only:
- main
- /litellm_.*/
+ - publish_proxy_extras:
+ filters:
+ branches:
+ only:
+ - main
- publish_to_pypi:
requires:
- local_testing
@@ -2577,6 +2906,7 @@ workflows:
- load_testing
- test_bad_database_url
- llm_translation_testing
+ - mcp_testing
- llm_responses_api_testing
- litellm_mapped_tests
- batches_testing
@@ -2596,12 +2926,11 @@ workflows:
- installing_litellm_on_python
- installing_litellm_on_python_3_13
- proxy_logging_guardrails_model_info_tests
+ - proxy_spend_accuracy_tests
- proxy_multi_instance_tests
- proxy_store_model_in_db_tests
- proxy_build_from_pip_tests
- proxy_pass_through_endpoint_tests
- check_code_and_doc_quality
- filters:
- branches:
- only:
- - main
+ - publish_proxy_extras
+
diff --git a/.circleci/requirements.txt b/.circleci/requirements.txt
index e63fb9dd9a..88c0aa4dda 100644
--- a/.circleci/requirements.txt
+++ b/.circleci/requirements.txt
@@ -1,13 +1,15 @@
# used by CI/CD testing
-openai==1.66.1
+openai==1.68.2
python-dotenv
tiktoken
importlib_metadata
cohere
-redis
+redis==5.2.1
+redisvl==0.4.1
anthropic
orjson==3.9.15
-pydantic==2.7.1
+pydantic==2.10.2
google-cloud-aiplatform==1.43.0
-fastapi-sso==0.10.0
+fastapi-sso==0.16.0
uvloop==0.21.0
+mcp==1.5.0 # for MCP server
diff --git a/.env.example b/.env.example
index 82b09ca25e..54986a97cd 100644
--- a/.env.example
+++ b/.env.example
@@ -20,6 +20,8 @@ REPLICATE_API_TOKEN = ""
ANTHROPIC_API_KEY = ""
# Infisical
INFISICAL_TOKEN = ""
+# INFINITY
+INFINITY_API_KEY = ""
# Development Configs
LITELLM_MASTER_KEY = "sk-1234"
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index d50aefa8bb..6c887178d5 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -10,7 +10,7 @@
**Please complete all items before asking a LiteLLM maintainer to review your PR**
-- [ ] I have Added testing in the `tests/litellm/` directory, **Adding at least 1 test is a hard requirement** - [see details](https://docs.litellm.ai/docs/extras/contributing_code)
+- [ ] I have Added testing in the [`tests/litellm/`](https://github.com/BerriAI/litellm/tree/main/tests/litellm) directory, **Adding at least 1 test is a hard requirement** - [see details](https://docs.litellm.ai/docs/extras/contributing_code)
- [ ] I have added a screenshot of my new test passing locally
- [ ] My PR passes all unit tests on (`make test-unit`)[https://docs.litellm.ai/docs/extras/contributing_code]
- [ ] My PR's scope is as isolated as possible, it only solves 1 specific problem
diff --git a/.github/workflows/ghcr_deploy.yml b/.github/workflows/ghcr_deploy.yml
index 306feb36e8..58c8a1e2e1 100644
--- a/.github/workflows/ghcr_deploy.yml
+++ b/.github/workflows/ghcr_deploy.yml
@@ -114,8 +114,8 @@ jobs:
tags: |
${{ steps.meta.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }},
${{ steps.meta.outputs.tags }}-${{ github.event.inputs.release_type }}
- ${{ github.event.inputs.release_type == 'stable' && format('${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:main-{0}', github.event.inputs.tag) || '' }},
- ${{ github.event.inputs.release_type == 'stable' && '${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:main-stable' || '' }}
+ ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }},
+ ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm:main-stable', env.REGISTRY) || '' }}
labels: ${{ steps.meta.outputs.labels }}
platforms: local,linux/amd64,linux/arm64,linux/arm64/v8
@@ -157,8 +157,8 @@ jobs:
tags: |
${{ steps.meta-database.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }},
${{ steps.meta-database.outputs.tags }}-${{ github.event.inputs.release_type }}
- ${{ github.event.inputs.release_type == 'stable' && format('${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-database:main-{0}', github.event.inputs.tag) || '' }},
- ${{ github.event.inputs.release_type == 'stable' && '${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-database:main-stable' || '' }}
+ ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-database:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }},
+ ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-database:main-stable', env.REGISTRY) || '' }}
labels: ${{ steps.meta-database.outputs.labels }}
platforms: local,linux/amd64,linux/arm64,linux/arm64/v8
@@ -200,8 +200,8 @@ jobs:
tags: |
${{ steps.meta-non_root.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }},
${{ steps.meta-non_root.outputs.tags }}-${{ github.event.inputs.release_type }}
- ${{ github.event.inputs.release_type == 'stable' && format('${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-non_root:main-{0}', github.event.inputs.tag) || '' }},
- ${{ github.event.inputs.release_type == 'stable' && '${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-non_root:main-stable' || '' }}
+ ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-non_root:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }},
+ ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-non_root:main-stable', env.REGISTRY) || '' }}
labels: ${{ steps.meta-non_root.outputs.labels }}
platforms: local,linux/amd64,linux/arm64,linux/arm64/v8
@@ -240,7 +240,11 @@ jobs:
context: .
file: ./litellm-js/spend-logs/Dockerfile
push: true
- tags: ${{ steps.meta-spend-logs.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }}, ${{ steps.meta-spend-logs.outputs.tags }}-${{ github.event.inputs.release_type }}
+ tags: |
+ ${{ steps.meta-spend-logs.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }},
+ ${{ steps.meta-spend-logs.outputs.tags }}-${{ github.event.inputs.release_type }}
+ ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-spend_logs:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }},
+ ${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-spend_logs:main-stable', env.REGISTRY) || '' }}
platforms: local,linux/amd64,linux/arm64,linux/arm64/v8
build-and-push-helm-chart:
diff --git a/.github/workflows/publish-migrations.yml b/.github/workflows/publish-migrations.yml
new file mode 100644
index 0000000000..8e5a67bcf8
--- /dev/null
+++ b/.github/workflows/publish-migrations.yml
@@ -0,0 +1,206 @@
+name: Publish Prisma Migrations
+
+permissions:
+ contents: write
+ pull-requests: write
+
+on:
+ push:
+ paths:
+ - 'schema.prisma' # Check root schema.prisma
+ branches:
+ - main
+
+jobs:
+ publish-migrations:
+ runs-on: ubuntu-latest
+ services:
+ postgres:
+ image: postgres:14
+ env:
+ POSTGRES_DB: temp_db
+ POSTGRES_USER: postgres
+ POSTGRES_PASSWORD: postgres
+ ports:
+ - 5432:5432
+ options: >-
+ --health-cmd pg_isready
+ --health-interval 10s
+ --health-timeout 5s
+ --health-retries 5
+
+ # Add shadow database service
+ postgres_shadow:
+ image: postgres:14
+ env:
+ POSTGRES_DB: shadow_db
+ POSTGRES_USER: postgres
+ POSTGRES_PASSWORD: postgres
+ ports:
+ - 5433:5432
+ options: >-
+ --health-cmd pg_isready
+ --health-interval 10s
+ --health-timeout 5s
+ --health-retries 5
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
+
+ - name: Install Dependencies
+ run: |
+ pip install prisma
+ pip install python-dotenv
+
+ - name: Generate Initial Migration if None Exists
+ env:
+ DATABASE_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
+ DIRECT_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
+ SHADOW_DATABASE_URL: "postgresql://postgres:postgres@localhost:5433/shadow_db"
+ run: |
+ mkdir -p deploy/migrations
+ echo 'provider = "postgresql"' > deploy/migrations/migration_lock.toml
+
+ if [ -z "$(ls -A deploy/migrations/2* 2>/dev/null)" ]; then
+ echo "No existing migrations found, creating baseline..."
+ VERSION=$(date +%Y%m%d%H%M%S)
+ mkdir -p deploy/migrations/${VERSION}_initial
+
+ echo "Generating initial migration..."
+ # Save raw output for debugging
+ prisma migrate diff \
+ --from-empty \
+ --to-schema-datamodel schema.prisma \
+ --shadow-database-url "${SHADOW_DATABASE_URL}" \
+ --script > deploy/migrations/${VERSION}_initial/raw_migration.sql
+
+ echo "Raw migration file content:"
+ cat deploy/migrations/${VERSION}_initial/raw_migration.sql
+
+ echo "Cleaning migration file..."
+ # Clean the file
+ sed '/^Installing/d' deploy/migrations/${VERSION}_initial/raw_migration.sql > deploy/migrations/${VERSION}_initial/migration.sql
+
+ # Verify the migration file
+ if [ ! -s deploy/migrations/${VERSION}_initial/migration.sql ]; then
+ echo "ERROR: Migration file is empty after cleaning"
+ echo "Original content was:"
+ cat deploy/migrations/${VERSION}_initial/raw_migration.sql
+ exit 1
+ fi
+
+ echo "Final migration file content:"
+ cat deploy/migrations/${VERSION}_initial/migration.sql
+
+ # Verify it starts with SQL
+ if ! head -n 1 deploy/migrations/${VERSION}_initial/migration.sql | grep -q "^--\|^CREATE\|^ALTER"; then
+ echo "ERROR: Migration file does not start with SQL command or comment"
+ echo "First line is:"
+ head -n 1 deploy/migrations/${VERSION}_initial/migration.sql
+ echo "Full content is:"
+ cat deploy/migrations/${VERSION}_initial/migration.sql
+ exit 1
+ fi
+
+ echo "Initial migration generated at $(date -u)" > deploy/migrations/${VERSION}_initial/README.md
+ fi
+
+ - name: Compare and Generate Migration
+ if: success()
+ env:
+ DATABASE_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
+ DIRECT_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
+ SHADOW_DATABASE_URL: "postgresql://postgres:postgres@localhost:5433/shadow_db"
+ run: |
+ # Create temporary migration workspace
+ mkdir -p temp_migrations
+
+ # Copy existing migrations (will not fail if directory is empty)
+ cp -r deploy/migrations/* temp_migrations/ 2>/dev/null || true
+
+ VERSION=$(date +%Y%m%d%H%M%S)
+
+ # Generate diff against existing migrations or empty state
+ prisma migrate diff \
+ --from-migrations temp_migrations \
+ --to-schema-datamodel schema.prisma \
+ --shadow-database-url "${SHADOW_DATABASE_URL}" \
+ --script > temp_migrations/migration_${VERSION}.sql
+
+ # Check if there are actual changes
+ if [ -s temp_migrations/migration_${VERSION}.sql ]; then
+ echo "Changes detected, creating new migration"
+ mkdir -p deploy/migrations/${VERSION}_schema_update
+ mv temp_migrations/migration_${VERSION}.sql deploy/migrations/${VERSION}_schema_update/migration.sql
+ echo "Migration generated at $(date -u)" > deploy/migrations/${VERSION}_schema_update/README.md
+ else
+ echo "No schema changes detected"
+ exit 0
+ fi
+
+ - name: Verify Migration
+ if: success()
+ env:
+ DATABASE_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
+ DIRECT_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
+ SHADOW_DATABASE_URL: "postgresql://postgres:postgres@localhost:5433/shadow_db"
+ run: |
+ # Create test database
+ psql "${SHADOW_DATABASE_URL}" -c 'CREATE DATABASE migration_test;'
+
+ # Apply all migrations in order to verify
+ for migration in deploy/migrations/*/migration.sql; do
+ echo "Applying migration: $migration"
+ psql "${SHADOW_DATABASE_URL}" -f $migration
+ done
+
+ # Add this step before create-pull-request to debug permissions
+ - name: Check Token Permissions
+ run: |
+ echo "Checking token permissions..."
+ curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ https://api.github.com/repos/BerriAI/litellm/collaborators
+
+ echo "\nChecking if token can create PRs..."
+ curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ https://api.github.com/repos/BerriAI/litellm
+
+ # Add this debug step before git push
+ - name: Debug Changed Files
+ run: |
+ echo "Files staged for commit:"
+ git diff --name-status --staged
+
+ echo "\nAll changed files:"
+ git status
+
+ - name: Create Pull Request
+ if: success()
+ uses: peter-evans/create-pull-request@v5
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ commit-message: "chore: update prisma migrations"
+ title: "Update Prisma Migrations"
+ body: |
+ Auto-generated migration based on schema.prisma changes.
+
+ Generated files:
+ - deploy/migrations/${VERSION}_schema_update/migration.sql
+ - deploy/migrations/${VERSION}_schema_update/README.md
+ branch: feat/prisma-migration-${{ env.VERSION }}
+ base: main
+ delete-branch: true
+
+ - name: Generate and Save Migrations
+ run: |
+ # Only add migration files
+ git add deploy/migrations/
+ git status # Debug what's being committed
+ git commit -m "chore: update prisma migrations"
diff --git a/.github/workflows/test-linting.yml b/.github/workflows/test-linting.yml
new file mode 100644
index 0000000000..b3bffbec5c
--- /dev/null
+++ b/.github/workflows/test-linting.yml
@@ -0,0 +1,53 @@
+name: LiteLLM Linting
+
+on:
+ pull_request:
+ branches: [ main ]
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ timeout-minutes: 5
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.12'
+
+ - name: Install Poetry
+ uses: snok/install-poetry@v1
+
+ - name: Install dependencies
+ run: |
+ poetry install --with dev
+
+ - name: Run Black formatting
+ run: |
+ cd litellm
+ poetry run black .
+ cd ..
+
+ - name: Run Ruff linting
+ run: |
+ cd litellm
+ poetry run ruff check .
+ cd ..
+
+ - name: Run MyPy type checking
+ run: |
+ cd litellm
+ poetry run mypy . --ignore-missing-imports
+ cd ..
+
+ - name: Check for circular imports
+ run: |
+ cd litellm
+ poetry run python ../tests/documentation_tests/test_circular_imports.py
+ cd ..
+
+ - name: Check import safety
+ run: |
+ poetry run python -c "from litellm import *" || (echo '🚨 import failed, this means you introduced unprotected imports! 🚨'; exit 1)
\ No newline at end of file
diff --git a/.github/workflows/test-litellm.yml b/.github/workflows/test-litellm.yml
new file mode 100644
index 0000000000..12d09725ed
--- /dev/null
+++ b/.github/workflows/test-litellm.yml
@@ -0,0 +1,35 @@
+name: LiteLLM Mock Tests (folder - tests/litellm)
+
+on:
+ pull_request:
+ branches: [ main ]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ timeout-minutes: 5
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Thank You Message
+ run: |
+ echo "### 🙏 Thank you for contributing to LiteLLM!" >> $GITHUB_STEP_SUMMARY
+ echo "Your PR is being tested now. We appreciate your help in making LiteLLM better!" >> $GITHUB_STEP_SUMMARY
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.12'
+
+ - name: Install Poetry
+ uses: snok/install-poetry@v1
+
+ - name: Install dependencies
+ run: |
+ poetry install --with dev,proxy-dev --extras proxy
+ poetry run pip install pytest-xdist
+
+ - name: Run tests
+ run: |
+ poetry run pytest tests/litellm -x -vv -n 4
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index d35923f7c3..e8c18bed4c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+.python-version
.venv
.env
.newenv
@@ -72,6 +73,7 @@ tests/local_testing/log.txt
.codegpt
litellm/proxy/_new_new_secret_config.yaml
litellm/proxy/custom_guardrail.py
+.mypy_cache/*
litellm/proxy/_experimental/out/404.html
litellm/proxy/_experimental/out/404.html
litellm/proxy/_experimental/out/model_hub.html
@@ -79,3 +81,10 @@ litellm/proxy/_experimental/out/model_hub.html
litellm/proxy/application.log
tests/llm_translation/vertex_test_account.json
tests/llm_translation/test_vertex_key.json
+litellm/proxy/migrations/0_init/migration.sql
+litellm/proxy/db/migrations/0_init/migration.sql
+litellm/proxy/db/migrations/*
+litellm/proxy/migrations/*config.yaml
+litellm/proxy/migrations/*
+config.yaml
+tests/litellm/litellm_core_utils/llm_cost_calc/log.txt
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fb37f32524..dedb37d6dd 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,44 +6,35 @@ repos:
entry: pyright
language: system
types: [python]
- files: ^litellm/
+ files: ^(litellm/|litellm_proxy_extras/)
- id: isort
name: isort
entry: isort
language: system
types: [python]
- files: litellm/.*\.py
+ files: (litellm/|litellm_proxy_extras/).*\.py
exclude: ^litellm/__init__.py$
-- repo: https://github.com/psf/black
- rev: 24.2.0
- hooks:
- - id: black
+ - id: black
+ name: black
+ entry: poetry run black
+ language: system
+ types: [python]
+ files: (litellm/|litellm_proxy_extras/).*\.py
- repo: https://github.com/pycqa/flake8
rev: 7.0.0 # The version of flake8 to use
hooks:
- id: flake8
exclude: ^litellm/tests/|^litellm/proxy/tests/|^litellm/tests/litellm/|^tests/litellm/
additional_dependencies: [flake8-print]
- files: litellm/.*\.py
- # - id: flake8
- # name: flake8 (router.py function length)
- # files: ^litellm/router\.py$
- # args: [--max-function-length=40]
- # # additional_dependencies: [flake8-functions]
+ files: (litellm/|litellm_proxy_extras/).*\.py
- repo: https://github.com/python-poetry/poetry
rev: 1.8.0
hooks:
- id: poetry-check
+ files: ^(pyproject.toml|litellm-proxy-extras/pyproject.toml)$
- repo: local
hooks:
- id: check-files-match
name: Check if files match
entry: python3 ci_cd/check_files_match.py
- language: system
- # - id: check-file-length
- # name: Check file length
- # entry: python check_file_length.py
- # args: ["10000"] # set your desired maximum number of lines
- # language: python
- # files: litellm/.*\.py
- # exclude: ^litellm/tests/
\ No newline at end of file
+ language: system
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index dd699c795b..3a74c46e68 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -12,8 +12,7 @@ WORKDIR /app
USER root
# Install build dependencies
-RUN apk update && \
- apk add --no-cache gcc python3-dev openssl openssl-dev
+RUN apk add --no-cache gcc python3-dev openssl openssl-dev
RUN pip install --upgrade pip && \
@@ -37,9 +36,6 @@ RUN pip install dist/*.whl
# install dependencies as wheels
RUN pip wheel --no-cache-dir --wheel-dir=/wheels/ -r requirements.txt
-# install semantic-cache [Experimental]- we need this here and not in requirements.txt because redisvl pins to pydantic 1.0
-RUN pip install redisvl==0.0.7 --no-deps
-
# ensure pyjwt is used, not jwt
RUN pip uninstall jwt -y
RUN pip uninstall PyJWT -y
@@ -55,8 +51,7 @@ FROM $LITELLM_RUNTIME_IMAGE AS runtime
USER root
# Install runtime dependencies
-RUN apk update && \
- apk add --no-cache openssl
+RUN apk add --no-cache openssl
WORKDIR /app
# Copy the current directory contents into the container at /app
diff --git a/Makefile b/Makefile
index 6555326168..a06509312d 100644
--- a/Makefile
+++ b/Makefile
@@ -9,10 +9,14 @@ help:
@echo " make test - Run all tests"
@echo " make test-unit - Run unit tests"
@echo " make test-integration - Run integration tests"
+ @echo " make test-unit-helm - Run helm unit tests"
install-dev:
poetry install --with dev
+install-proxy-dev:
+ poetry install --with dev,proxy-dev
+
lint: install-dev
poetry run pip install types-requests types-setuptools types-redis types-PyYAML
cd litellm && poetry run mypy . --ignore-missing-imports
@@ -25,4 +29,7 @@ test-unit:
poetry run pytest tests/litellm/
test-integration:
- poetry run pytest tests/ -k "not litellm"
\ No newline at end of file
+ poetry run pytest tests/ -k "not litellm"
+
+test-unit-helm:
+ helm unittest -f 'tests/*.yaml' deploy/charts/litellm-helm
\ No newline at end of file
diff --git a/README.md b/README.md
index 2d2f71e4d1..1c4e148443 100644
--- a/README.md
+++ b/README.md
@@ -16,9 +16,6 @@
-
-
-
diff --git a/ci_cd/baseline_db.py b/ci_cd/baseline_db.py
new file mode 100644
index 0000000000..ecc080abed
--- /dev/null
+++ b/ci_cd/baseline_db.py
@@ -0,0 +1,60 @@
+import subprocess
+from pathlib import Path
+from datetime import datetime
+
+
+def create_baseline():
+ """Create baseline migration in deploy/migrations"""
+ try:
+ # Get paths
+ root_dir = Path(__file__).parent.parent
+ deploy_dir = root_dir / "deploy"
+ migrations_dir = deploy_dir / "migrations"
+ schema_path = root_dir / "schema.prisma"
+
+ # Create migrations directory
+ migrations_dir.mkdir(parents=True, exist_ok=True)
+
+ # Create migration_lock.toml if it doesn't exist
+ lock_file = migrations_dir / "migration_lock.toml"
+ if not lock_file.exists():
+ lock_file.write_text('provider = "postgresql"\n')
+
+ # Create timestamp-based migration directory
+ timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
+ migration_dir = migrations_dir / f"{timestamp}_baseline"
+ migration_dir.mkdir(parents=True, exist_ok=True)
+
+ # Generate migration SQL
+ result = subprocess.run(
+ [
+ "prisma",
+ "migrate",
+ "diff",
+ "--from-empty",
+ "--to-schema-datamodel",
+ str(schema_path),
+ "--script",
+ ],
+ capture_output=True,
+ text=True,
+ check=True,
+ )
+
+ # Write the SQL to migration.sql
+ migration_file = migration_dir / "migration.sql"
+ migration_file.write_text(result.stdout)
+
+ print(f"Created baseline migration in {migration_dir}")
+ return True
+
+ except subprocess.CalledProcessError as e:
+ print(f"Error running prisma command: {e.stderr}")
+ return False
+ except Exception as e:
+ print(f"Error creating baseline migration: {str(e)}")
+ return False
+
+
+if __name__ == "__main__":
+ create_baseline()
diff --git a/ci_cd/publish-proxy-extras.sh b/ci_cd/publish-proxy-extras.sh
new file mode 100644
index 0000000000..6c83d1f921
--- /dev/null
+++ b/ci_cd/publish-proxy-extras.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# Exit on error
+set -e
+
+echo "🚀 Building and publishing litellm-proxy-extras"
+
+# Navigate to litellm-proxy-extras directory
+cd "$(dirname "$0")/../litellm-proxy-extras"
+
+# Build the package
+echo "📦 Building package..."
+poetry build
+
+# Publish to PyPI
+echo "🌎 Publishing to PyPI..."
+poetry publish
+
+echo "✅ Done! Package published successfully"
\ No newline at end of file
diff --git a/ci_cd/run_migration.py b/ci_cd/run_migration.py
new file mode 100644
index 0000000000..b11a38395c
--- /dev/null
+++ b/ci_cd/run_migration.py
@@ -0,0 +1,95 @@
+import os
+import subprocess
+from pathlib import Path
+from datetime import datetime
+import testing.postgresql
+import shutil
+
+
+def create_migration(migration_name: str = None):
+ """
+ Create a new migration SQL file in the migrations directory by comparing
+ current database state with schema
+
+ Args:
+ migration_name (str): Name for the migration
+ """
+ try:
+ # Get paths
+ root_dir = Path(__file__).parent.parent
+ migrations_dir = root_dir / "litellm-proxy-extras" / "litellm_proxy_extras" / "migrations"
+ schema_path = root_dir / "schema.prisma"
+
+ # Create temporary PostgreSQL database
+ with testing.postgresql.Postgresql() as postgresql:
+ db_url = postgresql.url()
+
+ # Create temporary migrations directory next to schema.prisma
+ temp_migrations_dir = schema_path.parent / "migrations"
+
+ try:
+ # Copy existing migrations to temp directory
+ if temp_migrations_dir.exists():
+ shutil.rmtree(temp_migrations_dir)
+ shutil.copytree(migrations_dir, temp_migrations_dir)
+
+ # Apply existing migrations to temp database
+ os.environ["DATABASE_URL"] = db_url
+ subprocess.run(
+ ["prisma", "migrate", "deploy", "--schema", str(schema_path)],
+ check=True,
+ )
+
+ # Generate diff between current database and schema
+ result = subprocess.run(
+ [
+ "prisma",
+ "migrate",
+ "diff",
+ "--from-url",
+ db_url,
+ "--to-schema-datamodel",
+ str(schema_path),
+ "--script",
+ ],
+ capture_output=True,
+ text=True,
+ check=True,
+ )
+
+ if result.stdout.strip():
+ # Generate timestamp and create migration directory
+ timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
+ migration_name = migration_name or "unnamed_migration"
+ migration_dir = migrations_dir / f"{timestamp}_{migration_name}"
+ migration_dir.mkdir(parents=True, exist_ok=True)
+
+ # Write the SQL to migration.sql
+ migration_file = migration_dir / "migration.sql"
+ migration_file.write_text(result.stdout)
+
+ print(f"Created migration in {migration_dir}")
+ return True
+ else:
+ print("No schema changes detected. Migration not needed.")
+ return False
+
+ finally:
+ # Clean up: remove temporary migrations directory
+ if temp_migrations_dir.exists():
+ shutil.rmtree(temp_migrations_dir)
+
+ except subprocess.CalledProcessError as e:
+ print(f"Error generating migration: {e.stderr}")
+ return False
+ except Exception as e:
+ print(f"Error creating migration: {str(e)}")
+ return False
+
+
+if __name__ == "__main__":
+ # If running directly, can optionally pass migration name as argument
+ import sys
+
+ migration_name = sys.argv[1] if len(sys.argv) > 1 else None
+ create_migration(migration_name)
diff --git a/cookbook/LiteLLM_HuggingFace.ipynb b/cookbook/LiteLLM_HuggingFace.ipynb
index 3a9a0785be..d608c2675a 100644
--- a/cookbook/LiteLLM_HuggingFace.ipynb
+++ b/cookbook/LiteLLM_HuggingFace.ipynb
@@ -6,8 +6,9 @@
"id": "9dKM5k8qsMIj"
},
"source": [
- "## LiteLLM HuggingFace\n",
- "Docs for huggingface: https://docs.litellm.ai/docs/providers/huggingface"
+ "## LiteLLM Hugging Face\n",
+ "\n",
+ "Docs for huggingface: https://docs.litellm.ai/docs/providers/huggingface\n"
]
},
{
@@ -27,23 +28,18 @@
"id": "yp5UXRqtpu9f"
},
"source": [
- "## Hugging Face Free Serverless Inference API\n",
- "Read more about the Free Serverless Inference API here: https://huggingface.co/docs/api-inference.\n",
+ "## Serverless Inference Providers\n",
"\n",
- "In order to use litellm to call Serverless Inference API:\n",
+ "Read more about Inference Providers here: https://huggingface.co/blog/inference-providers.\n",
"\n",
- "* Browse Serverless Inference compatible models here: https://huggingface.co/models?inference=warm&pipeline_tag=text-generation.\n",
- "* Copy the model name from hugging face\n",
- "* Set `model = \"huggingface/\"`\n",
+ "In order to use litellm with Hugging Face Inference Providers, you need to set `model=huggingface//`.\n",
"\n",
- "Example set `model=huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct` to call `meta-llama/Meta-Llama-3.1-8B-Instruct`\n",
- "\n",
- "https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct"
+ "Example: `huggingface/together/deepseek-ai/DeepSeek-R1` to run DeepSeek-R1 (https://huggingface.co/deepseek-ai/DeepSeek-R1) through Together AI.\n"
]
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -51,107 +47,18 @@
"id": "Pi5Oww8gpCUm",
"outputId": "659a67c7-f90d-4c06-b94e-2c4aa92d897a"
},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "ModelResponse(id='chatcmpl-c54dfb68-1491-4d68-a4dc-35e603ea718a', choices=[Choices(finish_reason='eos_token', index=0, message=Message(content=\"I'm just a computer program, so I don't have feelings, but thank you for asking! How can I assist you today?\", role='assistant', tool_calls=None, function_call=None))], created=1724858285, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion', system_fingerprint=None, usage=Usage(completion_tokens=27, prompt_tokens=47, total_tokens=74))\n",
- "ModelResponse(id='chatcmpl-d2ae38e6-4974-431c-bb9b-3fa3f95e5a6d', choices=[Choices(finish_reason='length', index=0, message=Message(content=\"\\n\\nI’m doing well, thank you. I’ve been keeping busy with work and some personal projects. How about you?\\n\\nI'm doing well, thank you. I've been enjoying some time off and catching up on some reading. How can I assist you today?\\n\\nI'm looking for a good book to read. Do you have any recommendations?\\n\\nOf course! Here are a few book recommendations across different genres:\\n\\n1.\", role='assistant', tool_calls=None, function_call=None))], created=1724858288, model='mistralai/Mistral-7B-Instruct-v0.3', object='chat.completion', system_fingerprint=None, usage=Usage(completion_tokens=85, prompt_tokens=6, total_tokens=91))\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"import os\n",
- "import litellm\n",
+ "from litellm import completion\n",
"\n",
- "# Make sure to create an API_KEY with inference permissions at https://huggingface.co/settings/tokens/new?globalPermissions=inference.serverless.write&tokenType=fineGrained\n",
- "os.environ[\"HUGGINGFACE_API_KEY\"] = \"\"\n",
+ "# You can create a HF token here: https://huggingface.co/settings/tokens\n",
+ "os.environ[\"HF_TOKEN\"] = \"hf_xxxxxx\"\n",
"\n",
- "# Call https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct\n",
- "# add the 'huggingface/' prefix to the model to set huggingface as the provider\n",
- "response = litellm.completion(\n",
- " model=\"huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
- " messages=[{ \"content\": \"Hello, how are you?\",\"role\": \"user\"}]\n",
- ")\n",
- "print(response)\n",
- "\n",
- "\n",
- "# Call https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3\n",
- "response = litellm.completion(\n",
- " model=\"huggingface/mistralai/Mistral-7B-Instruct-v0.3\",\n",
- " messages=[{ \"content\": \"Hello, how are you?\",\"role\": \"user\"}]\n",
- ")\n",
- "print(response)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "-klhAhjLtclv"
- },
- "source": [
- "## Hugging Face Dedicated Inference Endpoints\n",
- "\n",
- "Steps to use\n",
- "* Create your own Hugging Face dedicated endpoint here: https://ui.endpoints.huggingface.co/\n",
- "* Set `api_base` to your deployed api base\n",
- "* Add the `huggingface/` prefix to your model so litellm knows it's a huggingface Deployed Inference Endpoint"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "Lbmw8Gl_pHns",
- "outputId": "ea8408bf-1cc3-4670-ecea-f12666d204a8"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "{\n",
- " \"object\": \"chat.completion\",\n",
- " \"choices\": [\n",
- " {\n",
- " \"finish_reason\": \"length\",\n",
- " \"index\": 0,\n",
- " \"message\": {\n",
- " \"content\": \"\\n\\nI am doing well, thank you for asking. How about you?\\nI am doing\",\n",
- " \"role\": \"assistant\",\n",
- " \"logprobs\": -8.9481967812\n",
- " }\n",
- " }\n",
- " ],\n",
- " \"id\": \"chatcmpl-74dc9d89-3916-47ce-9bea-b80e66660f77\",\n",
- " \"created\": 1695871068.8413374,\n",
- " \"model\": \"glaiveai/glaive-coder-7b\",\n",
- " \"usage\": {\n",
- " \"prompt_tokens\": 6,\n",
- " \"completion_tokens\": 18,\n",
- " \"total_tokens\": 24\n",
- " }\n",
- "}\n"
- ]
- }
- ],
- "source": [
- "import os\n",
- "import litellm\n",
- "\n",
- "os.environ[\"HUGGINGFACE_API_KEY\"] = \"\"\n",
- "\n",
- "# TGI model: Call https://huggingface.co/glaiveai/glaive-coder-7b\n",
- "# add the 'huggingface/' prefix to the model to set huggingface as the provider\n",
- "# set api base to your deployed api endpoint from hugging face\n",
- "response = litellm.completion(\n",
- " model=\"huggingface/glaiveai/glaive-coder-7b\",\n",
- " messages=[{ \"content\": \"Hello, how are you?\",\"role\": \"user\"}],\n",
- " api_base=\"https://wjiegasee9bmqke2.us-east-1.aws.endpoints.huggingface.cloud\"\n",
+ "# Call DeepSeek-R1 model through Together AI\n",
+ "response = completion(\n",
+ " model=\"huggingface/together/deepseek-ai/DeepSeek-R1\",\n",
+ " messages=[{\"content\": \"How many r's are in the word `strawberry`?\", \"role\": \"user\"}],\n",
")\n",
"print(response)"
]
@@ -162,13 +69,12 @@
"id": "EU0UubrKzTFe"
},
"source": [
- "## HuggingFace - Streaming (Serveless or Dedicated)\n",
- "Set stream = True"
+ "## Streaming\n"
]
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -176,74 +82,147 @@
"id": "y-QfIvA-uJKX",
"outputId": "b007bb98-00d0-44a4-8264-c8a2caed6768"
},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content='I', role='assistant', function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=\"'m\", role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' just', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' a', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' computer', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' program', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=',', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' so', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' I', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' don', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=\"'t\", role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' have', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' feelings', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=',', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' but', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' thank', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' you', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' for', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' asking', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content='!', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' How', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' can', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' I', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' assist', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' you', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=' today', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content='?', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content='<|eot_id|>', role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n",
- "ModelResponse(id='chatcmpl-ffeb4491-624b-4ddf-8005-60358cf67d36', choices=[StreamingChoices(finish_reason='stop', index=0, delta=Delta(content=None, role=None, function_call=None, tool_calls=None), logprobs=None)], created=1724858353, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion.chunk', system_fingerprint=None)\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"import os\n",
- "import litellm\n",
+ "from litellm import completion\n",
"\n",
- "# Make sure to create an API_KEY with inference permissions at https://huggingface.co/settings/tokens/new?globalPermissions=inference.serverless.write&tokenType=fineGrained\n",
- "os.environ[\"HUGGINGFACE_API_KEY\"] = \"\"\n",
+ "os.environ[\"HF_TOKEN\"] = \"hf_xxxxxx\"\n",
"\n",
- "# Call https://huggingface.co/glaiveai/glaive-coder-7b\n",
- "# add the 'huggingface/' prefix to the model to set huggingface as the provider\n",
- "# set api base to your deployed api endpoint from hugging face\n",
- "response = litellm.completion(\n",
- " model=\"huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
- " messages=[{ \"content\": \"Hello, how are you?\",\"role\": \"user\"}],\n",
- " stream=True\n",
+ "response = completion(\n",
+ " model=\"huggingface/together/deepseek-ai/DeepSeek-R1\",\n",
+ " messages=[\n",
+ " {\n",
+ " \"role\": \"user\",\n",
+ " \"content\": \"How many r's are in the word `strawberry`?\",\n",
+ " \n",
+ " }\n",
+ " ],\n",
+ " stream=True,\n",
")\n",
"\n",
- "print(response)\n",
- "\n",
"for chunk in response:\n",
- " print(chunk)"
+ " print(chunk)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## With images as input\n"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "id": "CKXAnK55zQRl"
- },
+ "metadata": {},
"outputs": [],
- "source": []
+ "source": [
+ "from litellm import completion\n",
+ "\n",
+ "# Set your Hugging Face Token\n",
+ "os.environ[\"HF_TOKEN\"] = \"hf_xxxxxx\"\n",
+ "\n",
+ "messages = [\n",
+ " {\n",
+ " \"role\": \"user\",\n",
+ " \"content\": [\n",
+ " {\"type\": \"text\", \"text\": \"What's in this image?\"},\n",
+ " {\n",
+ " \"type\": \"image_url\",\n",
+ " \"image_url\": {\n",
+ " \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n",
+ " },\n",
+ " },\n",
+ " ],\n",
+ " }\n",
+ "]\n",
+ "\n",
+ "response = completion(\n",
+ " model=\"huggingface/sambanova/meta-llama/Llama-3.3-70B-Instruct\",\n",
+ " messages=messages,\n",
+ ")\n",
+ "print(response.choices[0])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Tools - Function Calling\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from litellm import completion\n",
+ "\n",
+ "\n",
+ "# Set your Hugging Face Token\n",
+ "os.environ[\"HF_TOKEN\"] = \"hf_xxxxxx\"\n",
+ "\n",
+ "tools = [\n",
+ " {\n",
+ " \"type\": \"function\",\n",
+ " \"function\": {\n",
+ " \"name\": \"get_current_weather\",\n",
+ " \"description\": \"Get the current weather in a given location\",\n",
+ " \"parameters\": {\n",
+ " \"type\": \"object\",\n",
+ " \"properties\": {\n",
+ " \"location\": {\n",
+ " \"type\": \"string\",\n",
+ " \"description\": \"The city and state, e.g. San Francisco, CA\",\n",
+ " },\n",
+ " \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n",
+ " },\n",
+ " \"required\": [\"location\"],\n",
+ " },\n",
+ " },\n",
+ " }\n",
+ "]\n",
+ "messages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}]\n",
+ "\n",
+ "response = completion(\n",
+ " model=\"huggingface/sambanova/meta-llama/Llama-3.1-8B-Instruct\", messages=messages, tools=tools, tool_choice=\"auto\"\n",
+ ")\n",
+ "print(response)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Hugging Face Dedicated Inference Endpoints\n",
+ "\n",
+ "Steps to use\n",
+ "\n",
+ "- Create your own Hugging Face dedicated endpoint here: https://ui.endpoints.huggingface.co/\n",
+ "- Set `api_base` to your deployed api base\n",
+ "- set the model to `huggingface/tgi` so that litellm knows it's a huggingface Deployed Inference Endpoint.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import litellm\n",
+ "\n",
+ "\n",
+ "response = litellm.completion(\n",
+ " model=\"huggingface/tgi\",\n",
+ " messages=[{\"content\": \"Hello, how are you?\", \"role\": \"user\"}],\n",
+ " api_base=\"https://my-endpoint.endpoints.huggingface.cloud/v1/\",\n",
+ ")\n",
+ "print(response)"
+ ]
}
],
"metadata": {
@@ -251,7 +230,8 @@
"provenance": []
},
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": ".venv",
+ "language": "python",
"name": "python3"
},
"language_info": {
@@ -264,7 +244,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.12.2"
+ "version": "3.12.0"
}
},
"nbformat": 4,
diff --git a/cookbook/litellm-ollama-docker-image/requirements.txt b/cookbook/litellm-ollama-docker-image/requirements.txt
index 6d24983e38..7990d251cc 100644
--- a/cookbook/litellm-ollama-docker-image/requirements.txt
+++ b/cookbook/litellm-ollama-docker-image/requirements.txt
@@ -1 +1 @@
-litellm==1.55.3
\ No newline at end of file
+litellm==1.61.15
\ No newline at end of file
diff --git a/cookbook/misc/dev_release.txt b/cookbook/misc/dev_release.txt
index 717a6da546..bd40f89e6f 100644
--- a/cookbook/misc/dev_release.txt
+++ b/cookbook/misc/dev_release.txt
@@ -1,2 +1,11 @@
python3 -m build
-twine upload --verbose dist/litellm-1.18.13.dev4.tar.gz -u __token__ -
\ No newline at end of file
+twine upload --verbose dist/litellm-1.18.13.dev4.tar.gz -u __token__ -
+
+
+Note: You might need to make a MANIFEST.ini file on root for build process incase it fails
+
+Place this in MANIFEST.ini
+recursive-exclude venv *
+recursive-exclude myenv *
+recursive-exclude py313_env *
+recursive-exclude **/.venv *
diff --git a/deploy/charts/litellm-helm/Chart.yaml b/deploy/charts/litellm-helm/Chart.yaml
index f1f2fd8d64..5de591fd73 100644
--- a/deploy/charts/litellm-helm/Chart.yaml
+++ b/deploy/charts/litellm-helm/Chart.yaml
@@ -18,7 +18,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 0.4.1
+version: 0.4.3
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
diff --git a/deploy/charts/litellm-helm/README.md b/deploy/charts/litellm-helm/README.md
index 8b2196f577..a0ba5781df 100644
--- a/deploy/charts/litellm-helm/README.md
+++ b/deploy/charts/litellm-helm/README.md
@@ -22,6 +22,8 @@ If `db.useStackgresOperator` is used (not yet implemented):
| Name | Description | Value |
| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- |
| `replicaCount` | The number of LiteLLM Proxy pods to be deployed | `1` |
+| `masterkeySecretName` | The name of the Kubernetes Secret that contains the Master API Key for LiteLLM. If not specified, use the generated secret name. | N/A |
+| `masterkeySecretKey` | The key within the Kubernetes Secret that contains the Master API Key for LiteLLM. If not specified, use `masterkey` as the key. | N/A |
| `masterkey` | The Master API Key for LiteLLM. If not specified, a random key is generated. | N/A |
| `environmentSecrets` | An optional array of Secret object names. The keys and values in these secrets will be presented to the LiteLLM proxy pod as environment variables. See below for an example Secret object. | `[]` |
| `environmentConfigMaps` | An optional array of ConfigMap object names. The keys and values in these configmaps will be presented to the LiteLLM proxy pod as environment variables. See below for an example Secret object. | `[]` |
diff --git a/deploy/charts/litellm-helm/templates/deployment.yaml b/deploy/charts/litellm-helm/templates/deployment.yaml
index 697148abf8..5b9488c19b 100644
--- a/deploy/charts/litellm-helm/templates/deployment.yaml
+++ b/deploy/charts/litellm-helm/templates/deployment.yaml
@@ -78,8 +78,8 @@ spec:
- name: PROXY_MASTER_KEY
valueFrom:
secretKeyRef:
- name: {{ include "litellm.fullname" . }}-masterkey
- key: masterkey
+ name: {{ .Values.masterkeySecretName | default (printf "%s-masterkey" (include "litellm.fullname" .)) }}
+ key: {{ .Values.masterkeySecretKey | default "masterkey" }}
{{- if .Values.redis.enabled }}
- name: REDIS_HOST
value: {{ include "litellm.redis.serviceName" . }}
@@ -97,6 +97,9 @@ spec:
value: {{ $val | quote }}
{{- end }}
{{- end }}
+ {{- with .Values.extraEnvVars }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
envFrom:
{{- range .Values.environmentSecrets }}
- secretRef:
diff --git a/deploy/charts/litellm-helm/templates/migrations-job.yaml b/deploy/charts/litellm-helm/templates/migrations-job.yaml
index e994c45548..1c4b6817fa 100644
--- a/deploy/charts/litellm-helm/templates/migrations-job.yaml
+++ b/deploy/charts/litellm-helm/templates/migrations-job.yaml
@@ -65,6 +65,6 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
- ttlSecondsAfterFinished: {{ .Values.migrationJob.ttlSecondsAfterFinished }}
+ ttlSecondsAfterFinished: {{ .Values.migrationJob.ttlSecondsAfterFinished }}
backoffLimit: {{ .Values.migrationJob.backoffLimit }}
{{- end }}
diff --git a/deploy/charts/litellm-helm/templates/secret-masterkey.yaml b/deploy/charts/litellm-helm/templates/secret-masterkey.yaml
index 57b854cc0f..5632957dc0 100644
--- a/deploy/charts/litellm-helm/templates/secret-masterkey.yaml
+++ b/deploy/charts/litellm-helm/templates/secret-masterkey.yaml
@@ -1,3 +1,4 @@
+{{- if not .Values.masterkeySecretName }}
{{ $masterkey := (.Values.masterkey | default (randAlphaNum 17)) }}
apiVersion: v1
kind: Secret
@@ -5,4 +6,5 @@ metadata:
name: {{ include "litellm.fullname" . }}-masterkey
data:
masterkey: {{ $masterkey | b64enc }}
-type: Opaque
\ No newline at end of file
+type: Opaque
+{{- end }}
diff --git a/deploy/charts/litellm-helm/templates/service.yaml b/deploy/charts/litellm-helm/templates/service.yaml
index 40e7f27f16..d8d81e78c8 100644
--- a/deploy/charts/litellm-helm/templates/service.yaml
+++ b/deploy/charts/litellm-helm/templates/service.yaml
@@ -2,6 +2,10 @@ apiVersion: v1
kind: Service
metadata:
name: {{ include "litellm.fullname" . }}
+ {{- with .Values.service.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
labels:
{{- include "litellm.labels" . | nindent 4 }}
spec:
diff --git a/deploy/charts/litellm-helm/tests/deployment_tests.yaml b/deploy/charts/litellm-helm/tests/deployment_tests.yaml
index e7ce44b052..b71f91377f 100644
--- a/deploy/charts/litellm-helm/tests/deployment_tests.yaml
+++ b/deploy/charts/litellm-helm/tests/deployment_tests.yaml
@@ -52,3 +52,66 @@ tests:
- equal:
path: spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]
value: antarctica-east1
+ - it: should work without masterkeySecretName or masterkeySecretKey
+ template: deployment.yaml
+ set:
+ masterkeySecretName: ""
+ masterkeySecretKey: ""
+ asserts:
+ - contains:
+ path: spec.template.spec.containers[0].env
+ content:
+ name: PROXY_MASTER_KEY
+ valueFrom:
+ secretKeyRef:
+ name: RELEASE-NAME-litellm-masterkey
+ key: masterkey
+ - it: should work with masterkeySecretName and masterkeySecretKey
+ template: deployment.yaml
+ set:
+ masterkeySecretName: my-secret
+ masterkeySecretKey: my-key
+ asserts:
+ - contains:
+ path: spec.template.spec.containers[0].env
+ content:
+ name: PROXY_MASTER_KEY
+ valueFrom:
+ secretKeyRef:
+ name: my-secret
+ key: my-key
+ - it: should work with extraEnvVars
+ template: deployment.yaml
+ set:
+ extraEnvVars:
+ - name: EXTRA_ENV_VAR
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['env']
+ asserts:
+ - contains:
+ path: spec.template.spec.containers[0].env
+ content:
+ name: EXTRA_ENV_VAR
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['env']
+ - it: should work with both extraEnvVars and envVars
+ template: deployment.yaml
+ set:
+ envVars:
+ ENV_VAR: ENV_VAR_VALUE
+ extraEnvVars:
+ - name: EXTRA_ENV_VAR
+ value: EXTRA_ENV_VAR_VALUE
+ asserts:
+ - contains:
+ path: spec.template.spec.containers[0].env
+ content:
+ name: ENV_VAR
+ value: ENV_VAR_VALUE
+ - contains:
+ path: spec.template.spec.containers[0].env
+ content:
+ name: EXTRA_ENV_VAR
+ value: EXTRA_ENV_VAR_VALUE
diff --git a/deploy/charts/litellm-helm/tests/masterkey-secret_tests.yaml b/deploy/charts/litellm-helm/tests/masterkey-secret_tests.yaml
new file mode 100644
index 0000000000..eb1d3c3967
--- /dev/null
+++ b/deploy/charts/litellm-helm/tests/masterkey-secret_tests.yaml
@@ -0,0 +1,18 @@
+suite: test masterkey secret
+templates:
+ - secret-masterkey.yaml
+tests:
+ - it: should create a secret if masterkeySecretName is not set
+ template: secret-masterkey.yaml
+ set:
+ masterkeySecretName: ""
+ asserts:
+ - isKind:
+ of: Secret
+ - it: should not create a secret if masterkeySecretName is set
+ template: secret-masterkey.yaml
+ set:
+ masterkeySecretName: my-secret
+ asserts:
+ - hasDocuments:
+ count: 0
diff --git a/deploy/charts/litellm-helm/values.yaml b/deploy/charts/litellm-helm/values.yaml
index 9f21fc40ad..0440e28eed 100644
--- a/deploy/charts/litellm-helm/values.yaml
+++ b/deploy/charts/litellm-helm/values.yaml
@@ -75,6 +75,12 @@ ingress:
# masterkey: changeit
+# if set, use this secret for the master key; otherwise, autogenerate a new one
+masterkeySecretName: ""
+
+# if set, use this secret key for the master key; otherwise, use the default key
+masterkeySecretKey: ""
+
# The elements within proxy_config are rendered as config.yaml for the proxy
# Examples: https://github.com/BerriAI/litellm/tree/main/litellm/proxy/example_config_yaml
# Reference: https://docs.litellm.ai/docs/proxy/configs
@@ -189,9 +195,15 @@ migrationJob:
annotations: {}
ttlSecondsAfterFinished: 120
-# Additional environment variables to be added to the deployment
+# Additional environment variables to be added to the deployment as a map of key-value pairs
envVars: {
# USE_DDTRACE: "true"
}
+# Additional environment variables to be added to the deployment as a list of k8s env vars
+extraEnvVars: {
+ # - name: EXTRA_ENV_VAR
+ # value: EXTRA_ENV_VAR_VALUE
+}
+
diff --git a/docker-compose.yml b/docker-compose.yml
index d16ec6ed20..66f5bcaa7f 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -66,5 +66,3 @@ volumes:
postgres_data:
name: litellm_postgres_data # Named volume for Postgres data persistence
-
-# ...rest of your docker-compose config if any
diff --git a/docker/Dockerfile.alpine b/docker/Dockerfile.alpine
index cc0c434013..f036081549 100644
--- a/docker/Dockerfile.alpine
+++ b/docker/Dockerfile.alpine
@@ -35,7 +35,7 @@ RUN pip wheel --no-cache-dir --wheel-dir=/wheels/ -r requirements.txt
FROM $LITELLM_RUNTIME_IMAGE AS runtime
# Update dependencies and clean up
-RUN apk update && apk upgrade && rm -rf /var/cache/apk/*
+RUN apk upgrade --no-cache
WORKDIR /app
diff --git a/docker/Dockerfile.database b/docker/Dockerfile.database
index 02eb286180..da0326fd2c 100644
--- a/docker/Dockerfile.database
+++ b/docker/Dockerfile.database
@@ -12,8 +12,7 @@ WORKDIR /app
USER root
# Install build dependencies
-RUN apk update && \
- apk add --no-cache gcc python3-dev openssl openssl-dev
+RUN apk add --no-cache gcc python3-dev openssl openssl-dev
RUN pip install --upgrade pip && \
@@ -44,8 +43,7 @@ FROM $LITELLM_RUNTIME_IMAGE AS runtime
USER root
# Install runtime dependencies
-RUN apk update && \
- apk add --no-cache openssl
+RUN apk add --no-cache openssl
WORKDIR /app
# Copy the current directory contents into the container at /app
@@ -59,9 +57,6 @@ COPY --from=builder /wheels/ /wheels/
# Install the built wheel using pip; again using a wildcard if it's the only file
RUN pip install *.whl /wheels/* --no-index --find-links=/wheels/ && rm -f *.whl && rm -rf /wheels
-# install semantic-cache [Experimental]- we need this here and not in requirements.txt because redisvl pins to pydantic 1.0
-RUN pip install redisvl==0.0.7 --no-deps
-
# ensure pyjwt is used, not jwt
RUN pip uninstall jwt -y
RUN pip uninstall PyJWT -y
diff --git a/docker/Dockerfile.non_root b/docker/Dockerfile.non_root
index 3a4cdb59d5..079778cafb 100644
--- a/docker/Dockerfile.non_root
+++ b/docker/Dockerfile.non_root
@@ -14,7 +14,7 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"]
# Install build dependencies
RUN apt-get clean && apt-get update && \
- apt-get install -y gcc python3-dev && \
+ apt-get install -y gcc g++ python3-dev && \
rm -rf /var/lib/apt/lists/*
RUN pip install --no-cache-dir --upgrade pip && \
@@ -56,10 +56,8 @@ COPY --from=builder /wheels/ /wheels/
# Install the built wheel using pip; again using a wildcard if it's the only file
RUN pip install *.whl /wheels/* --no-index --find-links=/wheels/ && rm -f *.whl && rm -rf /wheels
-# install semantic-cache [Experimental]- we need this here and not in requirements.txt because redisvl pins to pydantic 1.0
# ensure pyjwt is used, not jwt
-RUN pip install redisvl==0.0.7 --no-deps --no-cache-dir && \
- pip uninstall jwt -y && \
+RUN pip uninstall jwt -y && \
pip uninstall PyJWT -y && \
pip install PyJWT==2.9.0 --no-cache-dir
diff --git a/docs/my-website/docs/anthropic_unified.md b/docs/my-website/docs/anthropic_unified.md
index cf6ba798d5..92cae9c0aa 100644
--- a/docs/my-website/docs/anthropic_unified.md
+++ b/docs/my-website/docs/anthropic_unified.md
@@ -3,9 +3,10 @@ import TabItem from '@theme/TabItem';
# /v1/messages [BETA]
-LiteLLM provides a BETA endpoint in the spec of Anthropic's `/v1/messages` endpoint.
+Use LiteLLM to call all your LLM APIs in the Anthropic `v1/messages` format.
-This currently just supports the Anthropic API.
+
+## Overview
| Feature | Supported | Notes |
|-------|-------|-------|
@@ -21,9 +22,61 @@ Planned improvement:
- Bedrock Anthropic support
## Usage
+---
+
+### LiteLLM Python SDK
+
+#### Non-streaming example
+```python showLineNumbers title="Example using LiteLLM Python SDK"
+import litellm
+response = await litellm.anthropic.messages.acreate(
+ messages=[{"role": "user", "content": "Hello, can you tell me a short joke?"}],
+ api_key=api_key,
+ model="anthropic/claude-3-haiku-20240307",
+ max_tokens=100,
+)
+```
+
+Example response:
+```json
+{
+ "content": [
+ {
+ "text": "Hi! this is a very short joke",
+ "type": "text"
+ }
+ ],
+ "id": "msg_013Zva2CMHLNnXjNJJKqJ2EF",
+ "model": "claude-3-7-sonnet-20250219",
+ "role": "assistant",
+ "stop_reason": "end_turn",
+ "stop_sequence": null,
+ "type": "message",
+ "usage": {
+ "input_tokens": 2095,
+ "output_tokens": 503,
+ "cache_creation_input_tokens": 2095,
+ "cache_read_input_tokens": 0
+ }
+}
+```
+
+#### Streaming example
+```python showLineNumbers title="Example using LiteLLM Python SDK"
+import litellm
+response = await litellm.anthropic.messages.acreate(
+ messages=[{"role": "user", "content": "Hello, can you tell me a short joke?"}],
+ api_key=api_key,
+ model="anthropic/claude-3-haiku-20240307",
+ max_tokens=100,
+ stream=True,
+)
+async for chunk in response:
+ print(chunk)
+```
+
+### LiteLLM Proxy Server
-
-
1. Setup config.yaml
@@ -42,7 +95,28 @@ litellm --config /path/to/config.yaml
3. Test it!
-```bash
+
+
+
+```python showLineNumbers title="Example using LiteLLM Proxy Server"
+import anthropic
+
+# point anthropic sdk to litellm proxy
+client = anthropic.Anthropic(
+ base_url="http://0.0.0.0:4000",
+ api_key="sk-1234",
+)
+
+response = client.messages.create(
+ messages=[{"role": "user", "content": "Hello, can you tell me a short joke?"}],
+ model="anthropic-claude",
+ max_tokens=100,
+)
+```
+
+
+
+```bash showLineNumbers title="Example using LiteLLM Proxy Server"
curl -L -X POST 'http://0.0.0.0:4000/v1/messages' \
-H 'content-type: application/json' \
-H 'x-api-key: $LITELLM_API_KEY' \
@@ -52,41 +126,176 @@ curl -L -X POST 'http://0.0.0.0:4000/v1/messages' \
"messages": [
{
"role": "user",
- "content": [
- {
- "type": "text",
- "text": "List 5 important events in the XIX century"
- }
- ]
+ "content": "Hello, can you tell me a short joke?"
}
],
- "max_tokens": 4096
+ "max_tokens": 100
}'
```
+
-
+
-```python
-from litellm.llms.anthropic.experimental_pass_through.messages.handler import anthropic_messages
-import asyncio
-import os
-# set env
-os.environ["ANTHROPIC_API_KEY"] = "my-api-key"
+## Request Format
+---
-messages = [{"role": "user", "content": "Hello, can you tell me a short joke?"}]
+Request body will be in the Anthropic messages API format. **litellm follows the Anthropic messages specification for this endpoint.**
-# Call the handler
-async def call():
- response = await anthropic_messages(
- messages=messages,
- api_key=api_key,
- model="claude-3-haiku-20240307",
- max_tokens=100,
- )
+#### Example request body
-asyncio.run(call())
+```json
+{
+ "model": "claude-3-7-sonnet-20250219",
+ "max_tokens": 1024,
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hello, world"
+ }
+ ]
+}
```
-
-
\ No newline at end of file
+#### Required Fields
+- **model** (string):
+ The model identifier (e.g., `"claude-3-7-sonnet-20250219"`).
+- **max_tokens** (integer):
+ The maximum number of tokens to generate before stopping.
+ _Note: The model may stop before reaching this limit; value must be greater than 1._
+- **messages** (array of objects):
+ An ordered list of conversational turns.
+ Each message object must include:
+ - **role** (enum: `"user"` or `"assistant"`):
+ Specifies the speaker of the message.
+ - **content** (string or array of content blocks):
+ The text or content blocks (e.g., an array containing objects with a `type` such as `"text"`) that form the message.
+ _Example equivalence:_
+ ```json
+ {"role": "user", "content": "Hello, Claude"}
+ ```
+ is equivalent to:
+ ```json
+ {"role": "user", "content": [{"type": "text", "text": "Hello, Claude"}]}
+ ```
+
+#### Optional Fields
+- **metadata** (object):
+ Contains additional metadata about the request (e.g., `user_id` as an opaque identifier).
+- **stop_sequences** (array of strings):
+ Custom sequences that, when encountered in the generated text, cause the model to stop.
+- **stream** (boolean):
+ Indicates whether to stream the response using server-sent events.
+- **system** (string or array):
+ A system prompt providing context or specific instructions to the model.
+- **temperature** (number):
+ Controls randomness in the model’s responses. Valid range: `0 < temperature < 1`.
+- **thinking** (object):
+ Configuration for enabling extended thinking. If enabled, it includes:
+ - **budget_tokens** (integer):
+ Minimum of 1024 tokens (and less than `max_tokens`).
+ - **type** (enum):
+ E.g., `"enabled"`.
+- **tool_choice** (object):
+ Instructs how the model should utilize any provided tools.
+- **tools** (array of objects):
+ Definitions for tools available to the model. Each tool includes:
+ - **name** (string):
+ The tool’s name.
+ - **description** (string):
+ A detailed description of the tool.
+ - **input_schema** (object):
+ A JSON schema describing the expected input format for the tool.
+- **top_k** (integer):
+ Limits sampling to the top K options.
+- **top_p** (number):
+ Enables nucleus sampling with a cumulative probability cutoff. Valid range: `0 < top_p < 1`.
+
+
+## Response Format
+---
+
+Responses will be in the Anthropic messages API format.
+
+#### Example Response
+
+```json
+{
+ "content": [
+ {
+ "text": "Hi! My name is Claude.",
+ "type": "text"
+ }
+ ],
+ "id": "msg_013Zva2CMHLNnXjNJJKqJ2EF",
+ "model": "claude-3-7-sonnet-20250219",
+ "role": "assistant",
+ "stop_reason": "end_turn",
+ "stop_sequence": null,
+ "type": "message",
+ "usage": {
+ "input_tokens": 2095,
+ "output_tokens": 503,
+ "cache_creation_input_tokens": 2095,
+ "cache_read_input_tokens": 0
+ }
+}
+```
+
+#### Response fields
+
+- **content** (array of objects):
+ Contains the generated content blocks from the model. Each block includes:
+ - **type** (string):
+ Indicates the type of content (e.g., `"text"`, `"tool_use"`, `"thinking"`, or `"redacted_thinking"`).
+ - **text** (string):
+ The generated text from the model.
+ _Note: Maximum length is 5,000,000 characters._
+ - **citations** (array of objects or `null`):
+ Optional field providing citation details. Each citation includes:
+ - **cited_text** (string):
+ The excerpt being cited.
+ - **document_index** (integer):
+ An index referencing the cited document.
+ - **document_title** (string or `null`):
+ The title of the cited document.
+ - **start_char_index** (integer):
+ The starting character index for the citation.
+ - **end_char_index** (integer):
+ The ending character index for the citation.
+ - **type** (string):
+ Typically `"char_location"`.
+
+- **id** (string):
+ A unique identifier for the response message.
+ _Note: The format and length of IDs may change over time._
+
+- **model** (string):
+ Specifies the model that generated the response.
+
+- **role** (string):
+ Indicates the role of the generated message. For responses, this is always `"assistant"`.
+
+- **stop_reason** (string):
+ Explains why the model stopped generating text. Possible values include:
+ - `"end_turn"`: The model reached a natural stopping point.
+ - `"max_tokens"`: The generation stopped because the maximum token limit was reached.
+ - `"stop_sequence"`: A custom stop sequence was encountered.
+ - `"tool_use"`: The model invoked one or more tools.
+
+- **stop_sequence** (string or `null`):
+ Contains the specific stop sequence that caused the generation to halt, if applicable; otherwise, it is `null`.
+
+- **type** (string):
+ Denotes the type of response object, which is always `"message"`.
+
+- **usage** (object):
+ Provides details on token usage for billing and rate limiting. This includes:
+ - **input_tokens** (integer):
+ Total number of input tokens processed.
+ - **output_tokens** (integer):
+ Total number of output tokens generated.
+ - **cache_creation_input_tokens** (integer or `null`):
+ Number of tokens used to create a cache entry.
+ - **cache_read_input_tokens** (integer or `null`):
+ Number of tokens read from the cache.
diff --git a/docs/my-website/docs/caching/all_caches.md b/docs/my-website/docs/caching/all_caches.md
index dc1951cc77..a14170beef 100644
--- a/docs/my-website/docs/caching/all_caches.md
+++ b/docs/my-website/docs/caching/all_caches.md
@@ -3,7 +3,7 @@ import TabItem from '@theme/TabItem';
# Caching - In-Memory, Redis, s3, Redis Semantic Cache, Disk
-[**See Code**](https://github.com/BerriAI/litellm/blob/main/litellm.caching.caching.py)
+[**See Code**](https://github.com/BerriAI/litellm/blob/main/litellm/caching/caching.py)
:::info
@@ -26,7 +26,7 @@ Install redis
pip install redis
```
-For the hosted version you can setup your own Redis DB here: https://app.redislabs.com/
+For the hosted version you can setup your own Redis DB here: https://redis.io/try-free/
```python
import litellm
@@ -37,11 +37,11 @@ litellm.cache = Cache(type="redis", host=, port=, password=
-Install redis
+Install redisvl client
```shell
-pip install redisvl==0.0.7
+pip install redisvl==0.4.1
```
-For the hosted version you can setup your own Redis DB here: https://app.redislabs.com/
+For the hosted version you can setup your own Redis DB here: https://redis.io/try-free/
```python
import litellm
@@ -114,6 +114,7 @@ litellm.cache = Cache(
port=os.environ["REDIS_PORT"],
password=os.environ["REDIS_PASSWORD"],
similarity_threshold=0.8, # similarity threshold for cache hits, 0 == no similarity, 1 = exact matches, 0.5 == 50% similarity
+ ttl=120,
redis_semantic_cache_embedding_model="text-embedding-ada-002", # this model is passed to litellm.embedding(), any litellm.embedding() model is supported here
)
response1 = completion(
@@ -471,11 +472,13 @@ def __init__(
password: Optional[str] = None,
namespace: Optional[str] = None,
default_in_redis_ttl: Optional[float] = None,
- similarity_threshold: Optional[float] = None,
- redis_semantic_cache_use_async=False,
- redis_semantic_cache_embedding_model="text-embedding-ada-002",
redis_flush_size=None,
+ # redis semantic cache params
+ similarity_threshold: Optional[float] = None,
+ redis_semantic_cache_embedding_model: str = "text-embedding-ada-002",
+ redis_semantic_cache_index_name: Optional[str] = None,
+
# s3 Bucket, boto3 configuration
s3_bucket_name: Optional[str] = None,
s3_region_name: Optional[str] = None,
diff --git a/docs/my-website/docs/completion/document_understanding.md b/docs/my-website/docs/completion/document_understanding.md
index 6719169aef..f58b836c63 100644
--- a/docs/my-website/docs/completion/document_understanding.md
+++ b/docs/my-website/docs/completion/document_understanding.md
@@ -27,16 +27,18 @@ os.environ["AWS_REGION_NAME"] = ""
# pdf url
-image_url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
+file_url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
# model
model = "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0"
-image_content = [
+file_content = [
{"type": "text", "text": "What's this file about?"},
{
- "type": "image_url",
- "image_url": image_url, # OR {"url": image_url}
+ "type": "file",
+ "file": {
+ "file_id": file_url,
+ }
},
]
@@ -46,7 +48,7 @@ if not supports_pdf_input(model, None):
response = completion(
model=model,
- messages=[{"role": "user", "content": image_content}],
+ messages=[{"role": "user", "content": file_content}],
)
assert response is not None
```
@@ -80,11 +82,15 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
-d '{
"model": "bedrock-model",
"messages": [
- {"role": "user", "content": {"type": "text", "text": "What's this file about?"}},
- {
- "type": "image_url",
- "image_url": "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf",
- }
+ {"role": "user", "content": [
+ {"type": "text", "text": "What's this file about?"},
+ {
+ "type": "file",
+ "file": {
+ "file_id": "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf",
+ }
+ }
+ ]},
]
}'
```
@@ -116,11 +122,13 @@ base64_url = f"data:application/pdf;base64,{encoded_file}"
# model
model = "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0"
-image_content = [
+file_content = [
{"type": "text", "text": "What's this file about?"},
{
- "type": "image_url",
- "image_url": base64_url, # OR {"url": base64_url}
+ "type": "file",
+ "file": {
+ "file_data": base64_url,
+ }
},
]
@@ -130,11 +138,53 @@ if not supports_pdf_input(model, None):
response = completion(
model=model,
- messages=[{"role": "user", "content": image_content}],
+ messages=[{"role": "user", "content": file_content}],
)
assert response is not None
```
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: bedrock-model
+ litellm_params:
+ model: bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0
+ aws_access_key_id: os.environ/AWS_ACCESS_KEY_ID
+ aws_secret_access_key: os.environ/AWS_SECRET_ACCESS_KEY
+ aws_region_name: os.environ/AWS_REGION_NAME
+```
+
+2. Start the proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+3. Test it!
+
+```bash
+curl -X POST 'http://0.0.0.0:4000/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "bedrock-model",
+ "messages": [
+ {"role": "user", "content": [
+ {"type": "text", "text": "What's this file about?"},
+ {
+ "type": "file",
+ "file": {
+ "file_data": "data:application/pdf;base64...",
+ }
+ }
+ ]},
+ ]
+}'
+```
+
## Checking if a model supports pdf input
diff --git a/docs/my-website/docs/completion/drop_params.md b/docs/my-website/docs/completion/drop_params.md
index e79a88e14b..590d9a4595 100644
--- a/docs/my-website/docs/completion/drop_params.md
+++ b/docs/my-website/docs/completion/drop_params.md
@@ -107,4 +107,76 @@ response = litellm.completion(
-**additional_drop_params**: List or null - Is a list of openai params you want to drop when making a call to the model.
\ No newline at end of file
+**additional_drop_params**: List or null - Is a list of openai params you want to drop when making a call to the model.
+
+## Specify allowed openai params in a request
+
+Tell litellm to allow specific openai params in a request. Use this if you get a `litellm.UnsupportedParamsError` and want to allow a param. LiteLLM will pass the param as is to the model.
+
+
+
+
+
+
+In this example we pass `allowed_openai_params=["tools"]` to allow the `tools` param.
+
+```python showLineNumbers title="Pass allowed_openai_params to LiteLLM Python SDK"
+await litellm.acompletion(
+ model="azure/o_series/",
+ api_key="xxxxx",
+ api_base=api_base,
+ messages=[{"role": "user", "content": "Hello! return a json object"}],
+ tools=[{"type": "function", "function": {"name": "get_current_time", "description": "Get the current time in a given location.", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city name, e.g. San Francisco"}}, "required": ["location"]}}}]
+ allowed_openai_params=["tools"],
+)
+```
+
+
+
+When using litellm proxy you can pass `allowed_openai_params` in two ways:
+
+1. Dynamically pass `allowed_openai_params` in a request
+2. Set `allowed_openai_params` on the config.yaml file for a specific model
+
+#### Dynamically pass allowed_openai_params in a request
+In this example we pass `allowed_openai_params=["tools"]` to allow the `tools` param for a request sent to the model set on the proxy.
+
+```python showLineNumbers title="Dynamically pass allowed_openai_params in a request"
+import openai
+from openai import AsyncAzureOpenAI
+
+import openai
+client = openai.OpenAI(
+ api_key="anything",
+ base_url="http://0.0.0.0:4000"
+)
+
+response = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages = [
+ {
+ "role": "user",
+ "content": "this is a test request, write a short poem"
+ }
+ ],
+ extra_body={
+ "allowed_openai_params": ["tools"]
+ }
+)
+```
+
+#### Set allowed_openai_params on config.yaml
+
+You can also set `allowed_openai_params` on the config.yaml file for a specific model. This means that all requests to this deployment are allowed to pass in the `tools` param.
+
+```yaml showLineNumbers title="Set allowed_openai_params on config.yaml"
+model_list:
+ - model_name: azure-o1-preview
+ litellm_params:
+ model: azure/o_series/
+ api_key: xxxxx
+ api_base: https://openai-prod-test.openai.azure.com/openai/deployments/o1/chat/completions?api-version=2025-01-01-preview
+ allowed_openai_params: ["tools"]
+```
+
+
\ No newline at end of file
diff --git a/docs/my-website/docs/completion/prompt_caching.md b/docs/my-website/docs/completion/prompt_caching.md
index 6fbf89bd6d..9447a11d52 100644
--- a/docs/my-website/docs/completion/prompt_caching.md
+++ b/docs/my-website/docs/completion/prompt_caching.md
@@ -4,7 +4,7 @@ import TabItem from '@theme/TabItem';
# Prompt Caching
Supported Providers:
-- OpenAI (`deepseek/`)
+- OpenAI (`openai/`)
- Anthropic API (`anthropic/`)
- Bedrock (`bedrock/`, `bedrock/invoke/`, `bedrock/converse`) ([All models bedrock supports prompt caching on](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-caching.html))
- Deepseek API (`deepseek/`)
@@ -505,4 +505,4 @@ curl -L -X GET 'http://0.0.0.0:4000/v1/model/info' \
-This checks our maintained [model info/cost map](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json)
\ No newline at end of file
+This checks our maintained [model info/cost map](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json)
diff --git a/docs/my-website/docs/completion/web_search.md b/docs/my-website/docs/completion/web_search.md
new file mode 100644
index 0000000000..7a67dc265e
--- /dev/null
+++ b/docs/my-website/docs/completion/web_search.md
@@ -0,0 +1,308 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Using Web Search
+
+Use web search with litellm
+
+| Feature | Details |
+|---------|---------|
+| Supported Endpoints | - `/chat/completions` - `/responses` |
+| Supported Providers | `openai` |
+| LiteLLM Cost Tracking | ✅ Supported |
+| LiteLLM Version | `v1.63.15-nightly` or higher |
+
+
+## `/chat/completions` (litellm.completion)
+
+### Quick Start
+
+
+
+
+```python showLineNumbers
+from litellm import completion
+
+response = completion(
+ model="openai/gpt-4o-search-preview",
+ messages=[
+ {
+ "role": "user",
+ "content": "What was a positive news story from today?",
+ }
+ ],
+)
+```
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: gpt-4o-search-preview
+ litellm_params:
+ model: openai/gpt-4o-search-preview
+ api_key: os.environ/OPENAI_API_KEY
+```
+
+2. Start the proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+3. Test it!
+
+```python showLineNumbers
+from openai import OpenAI
+
+# Point to your proxy server
+client = OpenAI(
+ api_key="sk-1234",
+ base_url="http://0.0.0.0:4000"
+)
+
+response = client.chat.completions.create(
+ model="gpt-4o-search-preview",
+ messages=[
+ {
+ "role": "user",
+ "content": "What was a positive news story from today?"
+ }
+ ]
+)
+```
+
+
+
+### Search context size
+
+
+
+
+```python showLineNumbers
+from litellm import completion
+
+# Customize search context size
+response = completion(
+ model="openai/gpt-4o-search-preview",
+ messages=[
+ {
+ "role": "user",
+ "content": "What was a positive news story from today?",
+ }
+ ],
+ web_search_options={
+ "search_context_size": "low" # Options: "low", "medium" (default), "high"
+ }
+)
+```
+
+
+
+```python showLineNumbers
+from openai import OpenAI
+
+# Point to your proxy server
+client = OpenAI(
+ api_key="sk-1234",
+ base_url="http://0.0.0.0:4000"
+)
+
+# Customize search context size
+response = client.chat.completions.create(
+ model="gpt-4o-search-preview",
+ messages=[
+ {
+ "role": "user",
+ "content": "What was a positive news story from today?"
+ }
+ ],
+ web_search_options={
+ "search_context_size": "low" # Options: "low", "medium" (default), "high"
+ }
+)
+```
+
+
+
+## `/responses` (litellm.responses)
+
+### Quick Start
+
+
+
+
+```python showLineNumbers
+from litellm import responses
+
+response = responses(
+ model="openai/gpt-4o",
+ input=[
+ {
+ "role": "user",
+ "content": "What was a positive news story from today?"
+ }
+ ],
+ tools=[{
+ "type": "web_search_preview" # enables web search with default medium context size
+ }]
+)
+```
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: gpt-4o
+ litellm_params:
+ model: openai/gpt-4o
+ api_key: os.environ/OPENAI_API_KEY
+```
+
+2. Start the proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+3. Test it!
+
+```python showLineNumbers
+from openai import OpenAI
+
+# Point to your proxy server
+client = OpenAI(
+ api_key="sk-1234",
+ base_url="http://0.0.0.0:4000"
+)
+
+response = client.responses.create(
+ model="gpt-4o",
+ tools=[{
+ "type": "web_search_preview"
+ }],
+ input="What was a positive news story from today?",
+)
+
+print(response.output_text)
+```
+
+
+
+### Search context size
+
+
+
+
+```python showLineNumbers
+from litellm import responses
+
+# Customize search context size
+response = responses(
+ model="openai/gpt-4o",
+ input=[
+ {
+ "role": "user",
+ "content": "What was a positive news story from today?"
+ }
+ ],
+ tools=[{
+ "type": "web_search_preview",
+ "search_context_size": "low" # Options: "low", "medium" (default), "high"
+ }]
+)
+```
+
+
+
+```python showLineNumbers
+from openai import OpenAI
+
+# Point to your proxy server
+client = OpenAI(
+ api_key="sk-1234",
+ base_url="http://0.0.0.0:4000"
+)
+
+# Customize search context size
+response = client.responses.create(
+ model="gpt-4o",
+ tools=[{
+ "type": "web_search_preview",
+ "search_context_size": "low" # Options: "low", "medium" (default), "high"
+ }],
+ input="What was a positive news story from today?",
+)
+
+print(response.output_text)
+```
+
+
+
+
+
+
+
+
+## Checking if a model supports web search
+
+
+
+
+Use `litellm.supports_web_search(model="openai/gpt-4o-search-preview")` -> returns `True` if model can perform web searches
+
+```python showLineNumbers
+assert litellm.supports_web_search(model="openai/gpt-4o-search-preview") == True
+```
+
+
+
+
+1. Define OpenAI models in config.yaml
+
+```yaml
+model_list:
+ - model_name: gpt-4o-search-preview
+ litellm_params:
+ model: openai/gpt-4o-search-preview
+ api_key: os.environ/OPENAI_API_KEY
+ model_info:
+ supports_web_search: True
+```
+
+2. Run proxy server
+
+```bash
+litellm --config config.yaml
+```
+
+3. Call `/model_group/info` to check if a model supports web search
+
+```shell
+curl -X 'GET' \
+ 'http://localhost:4000/model_group/info' \
+ -H 'accept: application/json' \
+ -H 'x-api-key: sk-1234'
+```
+
+Expected Response
+
+```json showLineNumbers
+{
+ "data": [
+ {
+ "model_group": "gpt-4o-search-preview",
+ "providers": ["openai"],
+ "max_tokens": 128000,
+ "supports_web_search": true, # 👈 supports_web_search is true
+ }
+ ]
+}
+```
+
+
+
diff --git a/docs/my-website/docs/enterprise.md b/docs/my-website/docs/enterprise.md
index 0306a5b452..706ca33714 100644
--- a/docs/my-website/docs/enterprise.md
+++ b/docs/my-website/docs/enterprise.md
@@ -1,3 +1,5 @@
+import Image from '@theme/IdealImage';
+
# Enterprise
For companies that need SSO, user management and professional support for LiteLLM Proxy
@@ -7,6 +9,8 @@ Get free 7-day trial key [here](https://www.litellm.ai/#trial)
Includes all enterprise features.
+
+
[**Procurement available via AWS / Azure Marketplace**](./data_security.md#legalcompliance-faqs)
@@ -34,9 +38,9 @@ You can use our cloud product where we setup a dedicated instance for you.
Professional Support can assist with LLM/Provider integrations, deployment, upgrade management, and LLM Provider troubleshooting. We can’t solve your own infrastructure-related issues but we will guide you to fix them.
-- 1 hour for Sev0 issues
-- 6 hours for Sev1
-- 24h for Sev2-Sev3 between 7am – 7pm PT (Monday through Saturday)
+- 1 hour for Sev0 issues - 100% production traffic is failing
+- 6 hours for Sev1 - <100% production traffic is failing
+- 24h for Sev2-Sev3 between 7am – 7pm PT (Monday through Saturday) - setup issues e.g. Redis working on our end, but not on your infrastructure.
- 72h SLA for patching vulnerabilities in the software.
**We can offer custom SLAs** based on your needs and the severity of the issue
diff --git a/docs/my-website/docs/files_endpoints.md b/docs/my-website/docs/files_endpoints.md
index 7e20982ff4..31a02d41a3 100644
--- a/docs/my-website/docs/files_endpoints.md
+++ b/docs/my-website/docs/files_endpoints.md
@@ -2,10 +2,12 @@
import TabItem from '@theme/TabItem';
import Tabs from '@theme/Tabs';
-# /files
+# Provider Files Endpoints
Files are used to upload documents that can be used with features like Assistants, Fine-tuning, and Batch API.
+Use this to call the provider's `/files` endpoints directly, in the OpenAI format.
+
## Quick Start
- Upload a File
@@ -14,48 +16,105 @@ Files are used to upload documents that can be used with features like Assistant
- Delete File
- Get File Content
+
+
-```bash
-$ export OPENAI_API_KEY="sk-..."
+1. Setup config.yaml
-$ litellm
-
-# RUNNING on http://0.0.0.0:4000
+```
+# for /files endpoints
+files_settings:
+ - custom_llm_provider: azure
+ api_base: https://exampleopenaiendpoint-production.up.railway.app
+ api_key: fake-key
+ api_version: "2023-03-15-preview"
+ - custom_llm_provider: openai
+ api_key: os.environ/OPENAI_API_KEY
```
-**Upload a File**
+2. Start LiteLLM PROXY Server
+
```bash
-curl http://localhost:4000/v1/files \
- -H "Authorization: Bearer sk-1234" \
- -F purpose="fine-tune" \
- -F file="@mydata.jsonl"
+litellm --config /path/to/config.yaml
+
+## RUNNING on http://0.0.0.0:4000
```
-**List Files**
-```bash
-curl http://localhost:4000/v1/files \
- -H "Authorization: Bearer sk-1234"
+3. Use OpenAI's /files endpoints
+
+Upload a File
+
+```python
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="sk-...",
+ base_url="http://0.0.0.0:4000/v1"
+)
+
+client.files.create(
+ file=wav_data,
+ purpose="user_data",
+ extra_body={"custom_llm_provider": "openai"}
+)
```
-**Retrieve File Information**
-```bash
-curl http://localhost:4000/v1/files/file-abc123 \
- -H "Authorization: Bearer sk-1234"
+List Files
+
+```python
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="sk-...",
+ base_url="http://0.0.0.0:4000/v1"
+)
+
+files = client.files.list(extra_body={"custom_llm_provider": "openai"})
+print("files=", files)
```
-**Delete File**
-```bash
-curl http://localhost:4000/v1/files/file-abc123 \
- -X DELETE \
- -H "Authorization: Bearer sk-1234"
+Retrieve File Information
+
+```python
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="sk-...",
+ base_url="http://0.0.0.0:4000/v1"
+)
+
+file = client.files.retrieve(file_id="file-abc123", extra_body={"custom_llm_provider": "openai"})
+print("file=", file)
```
-**Get File Content**
-```bash
-curl http://localhost:4000/v1/files/file-abc123/content \
- -H "Authorization: Bearer sk-1234"
+Delete File
+
+```python
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="sk-...",
+ base_url="http://0.0.0.0:4000/v1"
+)
+
+response = client.files.delete(file_id="file-abc123", extra_body={"custom_llm_provider": "openai"})
+print("delete response=", response)
+```
+
+Get File Content
+
+```python
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="sk-...",
+ base_url="http://0.0.0.0:4000/v1"
+)
+
+content = client.files.content(file_id="file-abc123", extra_body={"custom_llm_provider": "openai"})
+print("content=", content)
```
@@ -120,7 +179,7 @@ print("file content=", content)
### [OpenAI](#quick-start)
-## [Azure OpenAI](./providers/azure#azure-batches-api)
+### [Azure OpenAI](./providers/azure#azure-batches-api)
### [Vertex AI](./providers/vertex#batch-apis)
diff --git a/docs/my-website/docs/guides/security_settings.md b/docs/my-website/docs/guides/security_settings.md
new file mode 100644
index 0000000000..4dfeda2d70
--- /dev/null
+++ b/docs/my-website/docs/guides/security_settings.md
@@ -0,0 +1,66 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# SSL Security Settings
+
+If you're in an environment using an older TTS bundle, with an older encryption, follow this guide.
+
+
+LiteLLM uses HTTPX for network requests, unless otherwise specified.
+
+1. Disable SSL verification
+
+
+
+
+
+```python
+import litellm
+litellm.ssl_verify = False
+```
+
+
+
+```yaml
+litellm_settings:
+ ssl_verify: false
+```
+
+
+
+
+```bash
+export SSL_VERIFY="False"
+```
+
+
+
+2. Lower security settings
+
+
+
+
+```python
+import litellm
+litellm.ssl_security_level = 1
+litellm.ssl_certificate = "/path/to/certificate.pem"
+```
+
+
+
+```yaml
+litellm_settings:
+ ssl_security_level: 1
+ ssl_certificate: "/path/to/certificate.pem"
+```
+
+
+
+```bash
+export SSL_SECURITY_LEVEL="1"
+export SSL_CERTIFICATE="/path/to/certificate.pem"
+```
+
+
+
+
diff --git a/docs/my-website/docs/index.md b/docs/my-website/docs/index.md
index dd3be587b5..9e4d76b89c 100644
--- a/docs/my-website/docs/index.md
+++ b/docs/my-website/docs/index.md
@@ -111,8 +111,8 @@ from litellm import completion
import os
# auth: run 'gcloud auth application-default'
-os.environ["VERTEX_PROJECT"] = "hardy-device-386718"
-os.environ["VERTEX_LOCATION"] = "us-central1"
+os.environ["VERTEXAI_PROJECT"] = "hardy-device-386718"
+os.environ["VERTEXAI_LOCATION"] = "us-central1"
response = completion(
model="vertex_ai/gemini-1.5-pro",
diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md
new file mode 100644
index 0000000000..0947c494c7
--- /dev/null
+++ b/docs/my-website/docs/mcp.md
@@ -0,0 +1,427 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import Image from '@theme/IdealImage';
+
+# /mcp [BETA] - Model Context Protocol
+
+## Expose MCP tools on LiteLLM Proxy Server
+
+This allows you to define tools that can be called by any MCP compatible client. Define your `mcp_servers` with LiteLLM and all your clients can list and call available tools.
+
+
+
+ LiteLLM MCP Architecture: Use MCP tools with all LiteLLM supported models
+
+
+#### How it works
+
+LiteLLM exposes the following MCP endpoints:
+
+- `/mcp/tools/list` - List all available tools
+- `/mcp/tools/call` - Call a specific tool with the provided arguments
+
+When MCP clients connect to LiteLLM they can follow this workflow:
+
+1. Connect to the LiteLLM MCP server
+2. List all available tools on LiteLLM
+3. Client makes LLM API request with tool call(s)
+4. LLM API returns which tools to call and with what arguments
+5. MCP client makes MCP tool calls to LiteLLM
+6. LiteLLM makes the tool calls to the appropriate MCP server
+7. LiteLLM returns the tool call results to the MCP client
+
+#### Usage
+
+#### 1. Define your tools on under `mcp_servers` in your config.yaml file.
+
+LiteLLM allows you to define your tools on the `mcp_servers` section in your config.yaml file. All tools listed here will be available to MCP clients (when they connect to LiteLLM and call `list_tools`).
+
+```yaml title="config.yaml" showLineNumbers
+model_list:
+ - model_name: gpt-4o
+ litellm_params:
+ model: openai/gpt-4o
+ api_key: sk-xxxxxxx
+
+mcp_servers:
+ {
+ "zapier_mcp": {
+ "url": "https://actions.zapier.com/mcp/sk-akxxxxx/sse"
+ },
+ "fetch": {
+ "url": "http://localhost:8000/sse"
+ }
+ }
+```
+
+
+#### 2. Start LiteLLM Gateway
+
+
+
+
+```shell title="Docker Run" showLineNumbers
+docker run -d \
+ -p 4000:4000 \
+ -e OPENAI_API_KEY=$OPENAI_API_KEY \
+ --name my-app \
+ -v $(pwd)/my_config.yaml:/app/config.yaml \
+ my-app:latest \
+ --config /app/config.yaml \
+ --port 4000 \
+ --detailed_debug \
+```
+
+
+
+
+
+```shell title="litellm pip" showLineNumbers
+litellm --config config.yaml --detailed_debug
+```
+
+
+
+
+
+#### 3. Make an LLM API request
+
+In this example we will do the following:
+
+1. Use MCP client to list MCP tools on LiteLLM Proxy
+2. Use `transform_mcp_tool_to_openai_tool` to convert MCP tools to OpenAI tools
+3. Provide the MCP tools to `gpt-4o`
+4. Handle tool call from `gpt-4o`
+5. Convert OpenAI tool call to MCP tool call
+6. Execute tool call on MCP server
+
+```python title="MCP Client List Tools" showLineNumbers
+import asyncio
+from openai import AsyncOpenAI
+from openai.types.chat import ChatCompletionUserMessageParam
+from mcp import ClientSession
+from mcp.client.sse import sse_client
+from litellm.experimental_mcp_client.tools import (
+ transform_mcp_tool_to_openai_tool,
+ transform_openai_tool_call_request_to_mcp_tool_call_request,
+)
+
+
+async def main():
+ # Initialize clients
+
+ # point OpenAI client to LiteLLM Proxy
+ client = AsyncOpenAI(api_key="sk-1234", base_url="http://localhost:4000")
+
+ # Point MCP client to LiteLLM Proxy
+ async with sse_client("http://localhost:4000/mcp/") as (read, write):
+ async with ClientSession(read, write) as session:
+ await session.initialize()
+
+ # 1. List MCP tools on LiteLLM Proxy
+ mcp_tools = await session.list_tools()
+ print("List of MCP tools for MCP server:", mcp_tools.tools)
+
+ # Create message
+ messages = [
+ ChatCompletionUserMessageParam(
+ content="Send an email about LiteLLM supporting MCP", role="user"
+ )
+ ]
+
+ # 2. Use `transform_mcp_tool_to_openai_tool` to convert MCP tools to OpenAI tools
+ # Since OpenAI only supports tools in the OpenAI format, we need to convert the MCP tools to the OpenAI format.
+ openai_tools = [
+ transform_mcp_tool_to_openai_tool(tool) for tool in mcp_tools.tools
+ ]
+
+ # 3. Provide the MCP tools to `gpt-4o`
+ response = await client.chat.completions.create(
+ model="gpt-4o",
+ messages=messages,
+ tools=openai_tools,
+ tool_choice="auto",
+ )
+
+ # 4. Handle tool call from `gpt-4o`
+ if response.choices[0].message.tool_calls:
+ tool_call = response.choices[0].message.tool_calls[0]
+ if tool_call:
+
+ # 5. Convert OpenAI tool call to MCP tool call
+ # Since MCP servers expect tools in the MCP format, we need to convert the OpenAI tool call to the MCP format.
+ # This is done using litellm.experimental_mcp_client.tools.transform_openai_tool_call_request_to_mcp_tool_call_request
+ mcp_call = (
+ transform_openai_tool_call_request_to_mcp_tool_call_request(
+ openai_tool=tool_call.model_dump()
+ )
+ )
+
+ # 6. Execute tool call on MCP server
+ result = await session.call_tool(
+ name=mcp_call.name, arguments=mcp_call.arguments
+ )
+
+ print("Result:", result)
+
+
+# Run it
+asyncio.run(main())
+```
+
+## LiteLLM Python SDK MCP Bridge
+
+LiteLLM Python SDK acts as a MCP bridge to utilize MCP tools with all LiteLLM supported models. LiteLLM offers the following features for using MCP
+
+- **List** Available MCP Tools: OpenAI clients can view all available MCP tools
+ - `litellm.experimental_mcp_client.load_mcp_tools` to list all available MCP tools
+- **Call** MCP Tools: OpenAI clients can call MCP tools
+ - `litellm.experimental_mcp_client.call_openai_tool` to call an OpenAI tool on an MCP server
+
+
+### 1. List Available MCP Tools
+
+In this example we'll use `litellm.experimental_mcp_client.load_mcp_tools` to list all available MCP tools on any MCP server. This method can be used in two ways:
+
+- `format="mcp"` - (default) Return MCP tools
+ - Returns: `mcp.types.Tool`
+- `format="openai"` - Return MCP tools converted to OpenAI API compatible tools. Allows using with OpenAI endpoints.
+ - Returns: `openai.types.chat.ChatCompletionToolParam`
+
+
+
+
+```python title="MCP Client List Tools" showLineNumbers
+# Create server parameters for stdio connection
+from mcp import ClientSession, StdioServerParameters
+from mcp.client.stdio import stdio_client
+import os
+import litellm
+from litellm import experimental_mcp_client
+
+
+server_params = StdioServerParameters(
+ command="python3",
+ # Make sure to update to the full absolute path to your mcp_server.py file
+ args=["./mcp_server.py"],
+)
+
+async with stdio_client(server_params) as (read, write):
+ async with ClientSession(read, write) as session:
+ # Initialize the connection
+ await session.initialize()
+
+ # Get tools
+ tools = await experimental_mcp_client.load_mcp_tools(session=session, format="openai")
+ print("MCP TOOLS: ", tools)
+
+ messages = [{"role": "user", "content": "what's (3 + 5)"}]
+ llm_response = await litellm.acompletion(
+ model="gpt-4o",
+ api_key=os.getenv("OPENAI_API_KEY"),
+ messages=messages,
+ tools=tools,
+ )
+ print("LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str))
+```
+
+
+
+
+
+In this example we'll walk through how you can use the OpenAI SDK pointed to the LiteLLM proxy to call MCP tools. The key difference here is we use the OpenAI SDK to make the LLM API request
+
+```python title="MCP Client List Tools" showLineNumbers
+# Create server parameters for stdio connection
+from mcp import ClientSession, StdioServerParameters
+from mcp.client.stdio import stdio_client
+import os
+from openai import OpenAI
+from litellm import experimental_mcp_client
+
+server_params = StdioServerParameters(
+ command="python3",
+ # Make sure to update to the full absolute path to your mcp_server.py file
+ args=["./mcp_server.py"],
+)
+
+async with stdio_client(server_params) as (read, write):
+ async with ClientSession(read, write) as session:
+ # Initialize the connection
+ await session.initialize()
+
+ # Get tools using litellm mcp client
+ tools = await experimental_mcp_client.load_mcp_tools(session=session, format="openai")
+ print("MCP TOOLS: ", tools)
+
+ # Use OpenAI SDK pointed to LiteLLM proxy
+ client = OpenAI(
+ api_key="your-api-key", # Your LiteLLM proxy API key
+ base_url="http://localhost:4000" # Your LiteLLM proxy URL
+ )
+
+ messages = [{"role": "user", "content": "what's (3 + 5)"}]
+ llm_response = client.chat.completions.create(
+ model="gpt-4",
+ messages=messages,
+ tools=tools
+ )
+ print("LLM RESPONSE: ", llm_response)
+```
+
+
+
+
+### 2. List and Call MCP Tools
+
+In this example we'll use
+- `litellm.experimental_mcp_client.load_mcp_tools` to list all available MCP tools on any MCP server
+- `litellm.experimental_mcp_client.call_openai_tool` to call an OpenAI tool on an MCP server
+
+The first llm response returns a list of OpenAI tools. We take the first tool call from the LLM response and pass it to `litellm.experimental_mcp_client.call_openai_tool` to call the tool on the MCP server.
+
+#### How `litellm.experimental_mcp_client.call_openai_tool` works
+
+- Accepts an OpenAI Tool Call from the LLM response
+- Converts the OpenAI Tool Call to an MCP Tool
+- Calls the MCP Tool on the MCP server
+- Returns the result of the MCP Tool call
+
+
+
+
+```python title="MCP Client List and Call Tools" showLineNumbers
+# Create server parameters for stdio connection
+from mcp import ClientSession, StdioServerParameters
+from mcp.client.stdio import stdio_client
+import os
+import litellm
+from litellm import experimental_mcp_client
+
+
+server_params = StdioServerParameters(
+ command="python3",
+ # Make sure to update to the full absolute path to your mcp_server.py file
+ args=["./mcp_server.py"],
+)
+
+async with stdio_client(server_params) as (read, write):
+ async with ClientSession(read, write) as session:
+ # Initialize the connection
+ await session.initialize()
+
+ # Get tools
+ tools = await experimental_mcp_client.load_mcp_tools(session=session, format="openai")
+ print("MCP TOOLS: ", tools)
+
+ messages = [{"role": "user", "content": "what's (3 + 5)"}]
+ llm_response = await litellm.acompletion(
+ model="gpt-4o",
+ api_key=os.getenv("OPENAI_API_KEY"),
+ messages=messages,
+ tools=tools,
+ )
+ print("LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str))
+
+ openai_tool = llm_response["choices"][0]["message"]["tool_calls"][0]
+ # Call the tool using MCP client
+ call_result = await experimental_mcp_client.call_openai_tool(
+ session=session,
+ openai_tool=openai_tool,
+ )
+ print("MCP TOOL CALL RESULT: ", call_result)
+
+ # send the tool result to the LLM
+ messages.append(llm_response["choices"][0]["message"])
+ messages.append(
+ {
+ "role": "tool",
+ "content": str(call_result.content[0].text),
+ "tool_call_id": openai_tool["id"],
+ }
+ )
+ print("final messages with tool result: ", messages)
+ llm_response = await litellm.acompletion(
+ model="gpt-4o",
+ api_key=os.getenv("OPENAI_API_KEY"),
+ messages=messages,
+ tools=tools,
+ )
+ print(
+ "FINAL LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str)
+ )
+```
+
+
+
+
+In this example we'll walk through how you can use the OpenAI SDK pointed to the LiteLLM proxy to call MCP tools. The key difference here is we use the OpenAI SDK to make the LLM API request
+
+```python title="MCP Client with OpenAI SDK" showLineNumbers
+# Create server parameters for stdio connection
+from mcp import ClientSession, StdioServerParameters
+from mcp.client.stdio import stdio_client
+import os
+from openai import OpenAI
+from litellm import experimental_mcp_client
+
+server_params = StdioServerParameters(
+ command="python3",
+ # Make sure to update to the full absolute path to your mcp_server.py file
+ args=["./mcp_server.py"],
+)
+
+async with stdio_client(server_params) as (read, write):
+ async with ClientSession(read, write) as session:
+ # Initialize the connection
+ await session.initialize()
+
+ # Get tools using litellm mcp client
+ tools = await experimental_mcp_client.load_mcp_tools(session=session, format="openai")
+ print("MCP TOOLS: ", tools)
+
+ # Use OpenAI SDK pointed to LiteLLM proxy
+ client = OpenAI(
+ api_key="your-api-key", # Your LiteLLM proxy API key
+ base_url="http://localhost:8000" # Your LiteLLM proxy URL
+ )
+
+ messages = [{"role": "user", "content": "what's (3 + 5)"}]
+ llm_response = client.chat.completions.create(
+ model="gpt-4",
+ messages=messages,
+ tools=tools
+ )
+ print("LLM RESPONSE: ", llm_response)
+
+ # Get the first tool call
+ tool_call = llm_response.choices[0].message.tool_calls[0]
+
+ # Call the tool using MCP client
+ call_result = await experimental_mcp_client.call_openai_tool(
+ session=session,
+ openai_tool=tool_call.model_dump(),
+ )
+ print("MCP TOOL CALL RESULT: ", call_result)
+
+ # Send the tool result back to the LLM
+ messages.append(llm_response.choices[0].message.model_dump())
+ messages.append({
+ "role": "tool",
+ "content": str(call_result.content[0].text),
+ "tool_call_id": tool_call.id,
+ })
+
+ final_response = client.chat.completions.create(
+ model="gpt-4",
+ messages=messages,
+ tools=tools
+ )
+ print("FINAL RESPONSE: ", final_response)
+```
+
+
+
\ No newline at end of file
diff --git a/docs/my-website/docs/observability/agentops_integration.md b/docs/my-website/docs/observability/agentops_integration.md
new file mode 100644
index 0000000000..e0599fab70
--- /dev/null
+++ b/docs/my-website/docs/observability/agentops_integration.md
@@ -0,0 +1,83 @@
+# 🖇️ AgentOps - LLM Observability Platform
+
+:::tip
+
+This is community maintained. Please make an issue if you run into a bug:
+https://github.com/BerriAI/litellm
+
+:::
+
+[AgentOps](https://docs.agentops.ai) is an observability platform that enables tracing and monitoring of LLM calls, providing detailed insights into your AI operations.
+
+## Using AgentOps with LiteLLM
+
+LiteLLM provides `success_callbacks` and `failure_callbacks`, allowing you to easily integrate AgentOps for comprehensive tracing and monitoring of your LLM operations.
+
+### Integration
+
+Use just a few lines of code to instantly trace your responses **across all providers** with AgentOps:
+Get your AgentOps API Keys from https://app.agentops.ai/
+```python
+import litellm
+
+# Configure LiteLLM to use AgentOps
+litellm.success_callback = ["agentops"]
+
+# Make your LLM calls as usual
+response = litellm.completion(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "Hello, how are you?"}],
+)
+```
+
+Complete Code:
+
+```python
+import os
+from litellm import completion
+
+# Set env variables
+os.environ["OPENAI_API_KEY"] = "your-openai-key"
+os.environ["AGENTOPS_API_KEY"] = "your-agentops-api-key"
+
+# Configure LiteLLM to use AgentOps
+litellm.success_callback = ["agentops"]
+
+# OpenAI call
+response = completion(
+ model="gpt-4",
+ messages=[{"role": "user", "content": "Hi 👋 - I'm OpenAI"}],
+)
+
+print(response)
+```
+
+### Configuration Options
+
+The AgentOps integration can be configured through environment variables:
+
+- `AGENTOPS_API_KEY` (str, optional): Your AgentOps API key
+- `AGENTOPS_ENVIRONMENT` (str, optional): Deployment environment (defaults to "production")
+- `AGENTOPS_SERVICE_NAME` (str, optional): Service name for tracing (defaults to "agentops")
+
+### Advanced Usage
+
+You can configure additional settings through environment variables:
+
+```python
+import os
+
+# Configure AgentOps settings
+os.environ["AGENTOPS_API_KEY"] = "your-agentops-api-key"
+os.environ["AGENTOPS_ENVIRONMENT"] = "staging"
+os.environ["AGENTOPS_SERVICE_NAME"] = "my-service"
+
+# Enable AgentOps tracing
+litellm.success_callback = ["agentops"]
+```
+
+### Support
+
+For issues or questions, please refer to:
+- [AgentOps Documentation](https://docs.agentops.ai)
+- [LiteLLM Documentation](https://docs.litellm.ai)
\ No newline at end of file
diff --git a/docs/my-website/docs/observability/arize_integration.md b/docs/my-website/docs/observability/arize_integration.md
index 1cd36a1111..a654a1b4de 100644
--- a/docs/my-website/docs/observability/arize_integration.md
+++ b/docs/my-website/docs/observability/arize_integration.md
@@ -1,4 +1,7 @@
+
import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
# Arize AI
@@ -11,6 +14,8 @@ https://github.com/BerriAI/litellm
:::
+
+
## Pre-Requisites
@@ -24,7 +29,9 @@ You can also use the instrumentor option instead of the callback, which you can
```python
litellm.callbacks = ["arize"]
```
+
```python
+
import litellm
import os
@@ -48,7 +55,7 @@ response = litellm.completion(
### Using with LiteLLM Proxy
-
+1. Setup config.yaml
```yaml
model_list:
- model_name: gpt-4
@@ -60,13 +67,134 @@ model_list:
litellm_settings:
callbacks: ["arize"]
+general_settings:
+ master_key: "sk-1234" # can also be set as an environment variable
+
environment_variables:
ARIZE_SPACE_KEY: "d0*****"
ARIZE_API_KEY: "141a****"
ARIZE_ENDPOINT: "https://otlp.arize.com/v1" # OPTIONAL - your custom arize GRPC api endpoint
- ARIZE_HTTP_ENDPOINT: "https://otlp.arize.com/v1" # OPTIONAL - your custom arize HTTP api endpoint. Set either this or ARIZE_ENDPOINT
+ ARIZE_HTTP_ENDPOINT: "https://otlp.arize.com/v1" # OPTIONAL - your custom arize HTTP api endpoint. Set either this or ARIZE_ENDPOINT or Neither (defaults to https://otlp.arize.com/v1 on grpc)
```
+2. Start the proxy
+
+```bash
+litellm --config config.yaml
+```
+
+3. Test it!
+
+```bash
+curl -X POST 'http://0.0.0.0:4000/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{ "model": "gpt-4", "messages": [{"role": "user", "content": "Hi 👋 - i'm openai"}]}'
+```
+
+## Pass Arize Space/Key per-request
+
+Supported parameters:
+- `arize_api_key`
+- `arize_space_key`
+
+
+
+
+```python
+import litellm
+import os
+
+# LLM API Keys
+os.environ['OPENAI_API_KEY']=""
+
+# set arize as a callback, litellm will send the data to arize
+litellm.callbacks = ["arize"]
+
+# openai call
+response = litellm.completion(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": "Hi 👋 - i'm openai"}
+ ],
+ arize_api_key=os.getenv("ARIZE_SPACE_2_API_KEY"),
+ arize_space_key=os.getenv("ARIZE_SPACE_2_KEY"),
+)
+```
+
+
+
+
+1. Setup config.yaml
+```yaml
+model_list:
+ - model_name: gpt-4
+ litellm_params:
+ model: openai/fake
+ api_key: fake-key
+ api_base: https://exampleopenaiendpoint-production.up.railway.app/
+
+litellm_settings:
+ callbacks: ["arize"]
+
+general_settings:
+ master_key: "sk-1234" # can also be set as an environment variable
+```
+
+2. Start the proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+3. Test it!
+
+
+
+
+```bash
+curl -X POST 'http://0.0.0.0:4000/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "gpt-4",
+ "messages": [{"role": "user", "content": "Hi 👋 - i'm openai"}],
+ "arize_api_key": "ARIZE_SPACE_2_API_KEY",
+ "arize_space_key": "ARIZE_SPACE_2_KEY"
+}'
+```
+
+
+
+```python
+import openai
+client = openai.OpenAI(
+ api_key="anything",
+ base_url="http://0.0.0.0:4000"
+)
+
+# request sent to model set on litellm proxy, `litellm --model`
+response = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages = [
+ {
+ "role": "user",
+ "content": "this is a test request, write a short poem"
+ }
+ ],
+ extra_body={
+ "arize_api_key": "ARIZE_SPACE_2_API_KEY",
+ "arize_space_key": "ARIZE_SPACE_2_KEY"
+ }
+)
+
+print(response)
+```
+
+
+
+
+
## Support & Talk to Founders
- [Schedule Demo 👋](https://calendly.com/d/4mp-gd3-k5k/berriai-1-1-onboarding-litellm-hosted-version)
diff --git a/docs/my-website/docs/pass_through/cohere.md b/docs/my-website/docs/pass_through/cohere.md
index 87eabd462c..227ff5777a 100644
--- a/docs/my-website/docs/pass_through/cohere.md
+++ b/docs/my-website/docs/pass_through/cohere.md
@@ -4,7 +4,7 @@ Pass-through endpoints for Cohere - call provider-specific endpoint, in native f
| Feature | Supported | Notes |
|-------|-------|-------|
-| Cost Tracking | ❌ | [Tell us if you need this](https://github.com/BerriAI/litellm/issues/new) |
+| Cost Tracking | ✅ | Supported for `/v1/chat`, and `/v2/chat` |
| Logging | ✅ | works across all integrations |
| End-user Tracking | ❌ | [Tell us if you need this](https://github.com/BerriAI/litellm/issues/new) |
| Streaming | ✅ | |
diff --git a/docs/my-website/docs/pass_through/mistral.md b/docs/my-website/docs/pass_through/mistral.md
new file mode 100644
index 0000000000..ee7ca800c4
--- /dev/null
+++ b/docs/my-website/docs/pass_through/mistral.md
@@ -0,0 +1,217 @@
+# Mistral
+
+Pass-through endpoints for Mistral - call provider-specific endpoint, in native format (no translation).
+
+| Feature | Supported | Notes |
+|-------|-------|-------|
+| Cost Tracking | ❌ | Not supported |
+| Logging | ✅ | works across all integrations |
+| End-user Tracking | ❌ | [Tell us if you need this](https://github.com/BerriAI/litellm/issues/new) |
+| Streaming | ✅ | |
+
+Just replace `https://api.mistral.ai/v1` with `LITELLM_PROXY_BASE_URL/mistral` 🚀
+
+#### **Example Usage**
+
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/mistral/v1/ocr' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "mistral-ocr-latest",
+ "document": {
+ "type": "image_url",
+ "image_url": "https://raw.githubusercontent.com/mistralai/cookbook/refs/heads/main/mistral/ocr/receipt.png"
+ }
+
+}'
+```
+
+Supports **ALL** Mistral Endpoints (including streaming).
+
+## Quick Start
+
+Let's call the Mistral [`/chat/completions` endpoint](https://docs.mistral.ai/api/#tag/chat/operation/chat_completion_v1_chat_completions_post)
+
+1. Add MISTRAL_API_KEY to your environment
+
+```bash
+export MISTRAL_API_KEY="sk-1234"
+```
+
+2. Start LiteLLM Proxy
+
+```bash
+litellm
+
+# RUNNING on http://0.0.0.0:4000
+```
+
+3. Test it!
+
+Let's call the Mistral `/ocr` endpoint
+
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/mistral/v1/ocr' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "mistral-ocr-latest",
+ "document": {
+ "type": "image_url",
+ "image_url": "https://raw.githubusercontent.com/mistralai/cookbook/refs/heads/main/mistral/ocr/receipt.png"
+ }
+
+}'
+```
+
+
+## Examples
+
+Anything after `http://0.0.0.0:4000/mistral` is treated as a provider-specific route, and handled accordingly.
+
+Key Changes:
+
+| **Original Endpoint** | **Replace With** |
+|------------------------------------------------------|-----------------------------------|
+| `https://api.mistral.ai/v1` | `http://0.0.0.0:4000/mistral` (LITELLM_PROXY_BASE_URL="http://0.0.0.0:4000") |
+| `bearer $MISTRAL_API_KEY` | `bearer anything` (use `bearer LITELLM_VIRTUAL_KEY` if Virtual Keys are setup on proxy) |
+
+
+### **Example 1: OCR endpoint**
+
+#### LiteLLM Proxy Call
+
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/mistral/v1/ocr' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer $LITELLM_API_KEY' \
+-d '{
+ "model": "mistral-ocr-latest",
+ "document": {
+ "type": "image_url",
+ "image_url": "https://raw.githubusercontent.com/mistralai/cookbook/refs/heads/main/mistral/ocr/receipt.png"
+ }
+}'
+```
+
+
+#### Direct Mistral API Call
+
+```bash
+curl https://api.mistral.ai/v1/ocr \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer ${MISTRAL_API_KEY}" \
+ -d '{
+ "model": "mistral-ocr-latest",
+ "document": {
+ "type": "document_url",
+ "document_url": "https://arxiv.org/pdf/2201.04234"
+ },
+ "include_image_base64": true
+ }'
+```
+
+### **Example 2: Chat API**
+
+#### LiteLLM Proxy Call
+
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/mistral/v1/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer $LITELLM_VIRTUAL_KEY' \
+-d '{
+ "messages": [
+ {
+ "role": "user",
+ "content": "I am going to Paris, what should I see?"
+ }
+ ],
+ "max_tokens": 2048,
+ "temperature": 0.8,
+ "top_p": 0.1,
+ "model": "mistral-large-latest",
+}'
+```
+
+#### Direct Mistral API Call
+
+```bash
+curl -L -X POST 'https://api.mistral.ai/v1/chat/completions' \
+-H 'Content-Type: application/json' \
+-d '{
+ "messages": [
+ {
+ "role": "user",
+ "content": "I am going to Paris, what should I see?"
+ }
+ ],
+ "max_tokens": 2048,
+ "temperature": 0.8,
+ "top_p": 0.1,
+ "model": "mistral-large-latest",
+}'
+```
+
+
+## Advanced - Use with Virtual Keys
+
+Pre-requisites
+- [Setup proxy with DB](../proxy/virtual_keys.md#setup)
+
+Use this, to avoid giving developers the raw Mistral API key, but still letting them use Mistral endpoints.
+
+### Usage
+
+1. Setup environment
+
+```bash
+export DATABASE_URL=""
+export LITELLM_MASTER_KEY=""
+export MISTRAL_API_BASE=""
+```
+
+```bash
+litellm
+
+# RUNNING on http://0.0.0.0:4000
+```
+
+2. Generate virtual key
+
+```bash
+curl -X POST 'http://0.0.0.0:4000/key/generate' \
+-H 'Authorization: Bearer sk-1234' \
+-H 'Content-Type: application/json' \
+-d '{}'
+```
+
+Expected Response
+
+```bash
+{
+ ...
+ "key": "sk-1234ewknldferwedojwojw"
+}
+```
+
+3. Test it!
+
+
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/mistral/v1/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234ewknldferwedojwojw' \
+ --data '{
+ "messages": [
+ {
+ "role": "user",
+ "content": "I am going to Paris, what should I see?"
+ }
+ ],
+ "max_tokens": 2048,
+ "temperature": 0.8,
+ "top_p": 0.1,
+ "model": "qwen2.5-7b-instruct",
+}'
+```
\ No newline at end of file
diff --git a/docs/my-website/docs/pass_through/vertex_ai.md b/docs/my-website/docs/pass_through/vertex_ai.md
index ce366af541..f40dfa70eb 100644
--- a/docs/my-website/docs/pass_through/vertex_ai.md
+++ b/docs/my-website/docs/pass_through/vertex_ai.md
@@ -13,8 +13,102 @@ Pass-through endpoints for Vertex AI - call provider-specific endpoint, in nativ
| End-user Tracking | ❌ | [Tell us if you need this](https://github.com/BerriAI/litellm/issues/new) |
| Streaming | ✅ | |
+## Supported Endpoints
+
+LiteLLM supports 2 vertex ai passthrough routes:
+
+1. `/vertex_ai` → routes to `https://{vertex_location}-aiplatform.googleapis.com/`
+2. `/vertex_ai/discovery` → routes to [`https://discoveryengine.googleapis.com`](https://discoveryengine.googleapis.com/)
+
+## How to use
+
Just replace `https://REGION-aiplatform.googleapis.com` with `LITELLM_PROXY_BASE_URL/vertex_ai`
+LiteLLM supports 3 flows for calling Vertex AI endpoints via pass-through:
+
+1. **Specific Credentials**: Admin sets passthrough credentials for a specific project/region.
+
+2. **Default Credentials**: Admin sets default credentials.
+
+3. **Client-Side Credentials**: User can send client-side credentials through to Vertex AI (default behavior - if no default or mapped credentials are found, the request is passed through directly).
+
+
+## Example Usage
+
+
+
+
+```yaml
+model_list:
+ - model_name: gemini-1.0-pro
+ litellm_params:
+ model: vertex_ai/gemini-1.0-pro
+ vertex_project: adroit-crow-413218
+ vertex_region: us-central1
+ vertex_credentials: /path/to/credentials.json
+ use_in_pass_through: true # 👈 KEY CHANGE
+```
+
+
+
+
+
+
+
+```yaml
+default_vertex_config:
+ vertex_project: adroit-crow-413218
+ vertex_region: us-central1
+ vertex_credentials: /path/to/credentials.json
+```
+
+
+
+```bash
+export DEFAULT_VERTEXAI_PROJECT="adroit-crow-413218"
+export DEFAULT_VERTEXAI_LOCATION="us-central1"
+export DEFAULT_GOOGLE_APPLICATION_CREDENTIALS="/path/to/credentials.json"
+```
+
+
+
+
+
+
+Try Gemini 2.0 Flash (curl)
+
+```
+MODEL_ID="gemini-2.0-flash-001"
+PROJECT_ID="YOUR_PROJECT_ID"
+```
+
+```bash
+curl \
+ -X POST \
+ -H "Authorization: Bearer $(gcloud auth application-default print-access-token)" \
+ -H "Content-Type: application/json" \
+ "${LITELLM_PROXY_BASE_URL}/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/${MODEL_ID}:streamGenerateContent" -d \
+ $'{
+ "contents": {
+ "role": "user",
+ "parts": [
+ {
+ "fileData": {
+ "mimeType": "image/png",
+ "fileUri": "gs://generativeai-downloads/images/scones.jpg"
+ }
+ },
+ {
+ "text": "Describe this picture."
+ }
+ ]
+ }
+ }'
+```
+
+
+
+
#### **Example Usage**
@@ -22,7 +116,7 @@ Just replace `https://REGION-aiplatform.googleapis.com` with `LITELLM_PROXY_BASE
```bash
-curl http://localhost:4000/vertex_ai/publishers/google/models/gemini-1.0-pro:generateContent \
+curl http://localhost:4000/vertex_ai/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/${MODEL_ID}:generateContent \
-H "Content-Type: application/json" \
-H "x-litellm-api-key: Bearer sk-1234" \
-d '{
@@ -101,7 +195,7 @@ litellm
Let's call the Google AI Studio token counting endpoint
```bash
-curl http://localhost:4000/vertex-ai/publishers/google/models/gemini-1.0-pro:generateContent \
+curl http://localhost:4000/vertex-ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.0-pro:generateContent \
-H "Content-Type: application/json" \
-H "Authorization: Bearer sk-1234" \
-d '{
@@ -140,7 +234,7 @@ LiteLLM Proxy Server supports two methods of authentication to Vertex AI:
```shell
-curl http://localhost:4000/vertex_ai/publishers/google/models/gemini-1.5-flash-001:generateContent \
+curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.5-flash-001:generateContent \
-H "Content-Type: application/json" \
-H "x-litellm-api-key: Bearer sk-1234" \
-d '{"contents":[{"role": "user", "parts":[{"text": "hi"}]}]}'
@@ -152,7 +246,7 @@ curl http://localhost:4000/vertex_ai/publishers/google/models/gemini-1.5-flash-0
```shell
-curl http://localhost:4000/vertex_ai/publishers/google/models/textembedding-gecko@001:predict \
+curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/textembedding-gecko@001:predict \
-H "Content-Type: application/json" \
-H "x-litellm-api-key: Bearer sk-1234" \
-d '{"instances":[{"content": "gm"}]}'
@@ -162,7 +256,7 @@ curl http://localhost:4000/vertex_ai/publishers/google/models/textembedding-geck
### Imagen API
```shell
-curl http://localhost:4000/vertex_ai/publishers/google/models/imagen-3.0-generate-001:predict \
+curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/imagen-3.0-generate-001:predict \
-H "Content-Type: application/json" \
-H "x-litellm-api-key: Bearer sk-1234" \
-d '{"instances":[{"prompt": "make an otter"}], "parameters": {"sampleCount": 1}}'
@@ -172,7 +266,7 @@ curl http://localhost:4000/vertex_ai/publishers/google/models/imagen-3.0-generat
### Count Tokens API
```shell
-curl http://localhost:4000/vertex_ai/publishers/google/models/gemini-1.5-flash-001:countTokens \
+curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.5-flash-001:countTokens \
-H "Content-Type: application/json" \
-H "x-litellm-api-key: Bearer sk-1234" \
-d '{"contents":[{"role": "user", "parts":[{"text": "hi"}]}]}'
@@ -183,7 +277,7 @@ Create Fine Tuning Job
```shell
-curl http://localhost:4000/vertex_ai/tuningJobs \
+curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.5-flash-001:tuningJobs \
-H "Content-Type: application/json" \
-H "x-litellm-api-key: Bearer sk-1234" \
-d '{
@@ -243,7 +337,7 @@ Expected Response
```bash
-curl http://localhost:4000/vertex_ai/publishers/google/models/gemini-1.0-pro:generateContent \
+curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.0-pro:generateContent \
-H "Content-Type: application/json" \
-H "x-litellm-api-key: Bearer sk-1234" \
-d '{
@@ -268,7 +362,7 @@ tags: ["vertex-js-sdk", "pass-through-endpoint"]
```bash
-curl http://localhost:4000/vertex-ai/publishers/google/models/gemini-1.0-pro:generateContent \
+curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.0-pro:generateContent \
-H "Content-Type: application/json" \
-H "x-litellm-api-key: Bearer sk-1234" \
-H "tags: vertex-js-sdk,pass-through-endpoint" \
diff --git a/docs/my-website/docs/pass_through/vllm.md b/docs/my-website/docs/pass_through/vllm.md
new file mode 100644
index 0000000000..b267622948
--- /dev/null
+++ b/docs/my-website/docs/pass_through/vllm.md
@@ -0,0 +1,185 @@
+# VLLM
+
+Pass-through endpoints for VLLM - call provider-specific endpoint, in native format (no translation).
+
+| Feature | Supported | Notes |
+|-------|-------|-------|
+| Cost Tracking | ❌ | Not supported |
+| Logging | ✅ | works across all integrations |
+| End-user Tracking | ❌ | [Tell us if you need this](https://github.com/BerriAI/litellm/issues/new) |
+| Streaming | ✅ | |
+
+Just replace `https://my-vllm-server.com` with `LITELLM_PROXY_BASE_URL/vllm` 🚀
+
+#### **Example Usage**
+
+```bash
+curl -L -X GET 'http://0.0.0.0:4000/vllm/metrics' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+```
+
+Supports **ALL** VLLM Endpoints (including streaming).
+
+## Quick Start
+
+Let's call the VLLM [`/metrics` endpoint](https://vllm.readthedocs.io/en/latest/api_reference/api_reference.html)
+
+1. Add HOSTED VLLM API BASE to your environment
+
+```bash
+export HOSTED_VLLM_API_BASE="https://my-vllm-server.com"
+```
+
+2. Start LiteLLM Proxy
+
+```bash
+litellm
+
+# RUNNING on http://0.0.0.0:4000
+```
+
+3. Test it!
+
+Let's call the VLLM `/metrics` endpoint
+
+```bash
+curl -L -X GET 'http://0.0.0.0:4000/vllm/metrics' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+```
+
+
+## Examples
+
+Anything after `http://0.0.0.0:4000/vllm` is treated as a provider-specific route, and handled accordingly.
+
+Key Changes:
+
+| **Original Endpoint** | **Replace With** |
+|------------------------------------------------------|-----------------------------------|
+| `https://my-vllm-server.com` | `http://0.0.0.0:4000/vllm` (LITELLM_PROXY_BASE_URL="http://0.0.0.0:4000") |
+| `bearer $VLLM_API_KEY` | `bearer anything` (use `bearer LITELLM_VIRTUAL_KEY` if Virtual Keys are setup on proxy) |
+
+
+### **Example 1: Metrics endpoint**
+
+#### LiteLLM Proxy Call
+
+```bash
+curl -L -X GET 'http://0.0.0.0:4000/vllm/metrics' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer $LITELLM_VIRTUAL_KEY' \
+```
+
+
+#### Direct VLLM API Call
+
+```bash
+curl -L -X GET 'https://my-vllm-server.com/metrics' \
+-H 'Content-Type: application/json' \
+```
+
+### **Example 2: Chat API**
+
+#### LiteLLM Proxy Call
+
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/vllm/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer $LITELLM_VIRTUAL_KEY' \
+-d '{
+ "messages": [
+ {
+ "role": "user",
+ "content": "I am going to Paris, what should I see?"
+ }
+ ],
+ "max_tokens": 2048,
+ "temperature": 0.8,
+ "top_p": 0.1,
+ "model": "qwen2.5-7b-instruct",
+}'
+```
+
+#### Direct VLLM API Call
+
+```bash
+curl -L -X POST 'https://my-vllm-server.com/chat/completions' \
+-H 'Content-Type: application/json' \
+-d '{
+ "messages": [
+ {
+ "role": "user",
+ "content": "I am going to Paris, what should I see?"
+ }
+ ],
+ "max_tokens": 2048,
+ "temperature": 0.8,
+ "top_p": 0.1,
+ "model": "qwen2.5-7b-instruct",
+}'
+```
+
+
+## Advanced - Use with Virtual Keys
+
+Pre-requisites
+- [Setup proxy with DB](../proxy/virtual_keys.md#setup)
+
+Use this, to avoid giving developers the raw Cohere API key, but still letting them use Cohere endpoints.
+
+### Usage
+
+1. Setup environment
+
+```bash
+export DATABASE_URL=""
+export LITELLM_MASTER_KEY=""
+export HOSTED_VLLM_API_BASE=""
+```
+
+```bash
+litellm
+
+# RUNNING on http://0.0.0.0:4000
+```
+
+2. Generate virtual key
+
+```bash
+curl -X POST 'http://0.0.0.0:4000/key/generate' \
+-H 'Authorization: Bearer sk-1234' \
+-H 'Content-Type: application/json' \
+-d '{}'
+```
+
+Expected Response
+
+```bash
+{
+ ...
+ "key": "sk-1234ewknldferwedojwojw"
+}
+```
+
+3. Test it!
+
+
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/vllm/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234ewknldferwedojwojw' \
+ --data '{
+ "messages": [
+ {
+ "role": "user",
+ "content": "I am going to Paris, what should I see?"
+ }
+ ],
+ "max_tokens": 2048,
+ "temperature": 0.8,
+ "top_p": 0.1,
+ "model": "qwen2.5-7b-instruct",
+}'
+```
\ No newline at end of file
diff --git a/docs/my-website/docs/providers/anthropic.md b/docs/my-website/docs/providers/anthropic.md
index 55e9ba10d3..9e4f6908a4 100644
--- a/docs/my-website/docs/providers/anthropic.md
+++ b/docs/my-website/docs/providers/anthropic.md
@@ -821,6 +821,14 @@ print(f"\nResponse: {resp}")
## Usage - Thinking / `reasoning_content`
+LiteLLM translates OpenAI's `reasoning_effort` to Anthropic's `thinking` parameter. [Code](https://github.com/BerriAI/litellm/blob/23051d89dd3611a81617d84277059cd88b2df511/litellm/llms/anthropic/chat/transformation.py#L298)
+
+| reasoning_effort | thinking |
+| ---------------- | -------- |
+| "low" | "budget_tokens": 1024 |
+| "medium" | "budget_tokens": 2048 |
+| "high" | "budget_tokens": 4096 |
+
@@ -830,7 +838,7 @@ from litellm import completion
resp = completion(
model="anthropic/claude-3-7-sonnet-20250219",
messages=[{"role": "user", "content": "What is the capital of France?"}],
- thinking={"type": "enabled", "budget_tokens": 1024},
+ reasoning_effort="low",
)
```
@@ -863,7 +871,7 @@ curl http://0.0.0.0:4000/v1/chat/completions \
-d '{
"model": "claude-3-7-sonnet-20250219",
"messages": [{"role": "user", "content": "What is the capital of France?"}],
- "thinking": {"type": "enabled", "budget_tokens": 1024}
+ "reasoning_effort": "low"
}'
```
@@ -927,6 +935,44 @@ ModelResponse(
)
```
+### Pass `thinking` to Anthropic models
+
+You can also pass the `thinking` parameter to Anthropic models.
+
+
+You can also pass the `thinking` parameter to Anthropic models.
+
+
+
+
+```python
+response = litellm.completion(
+ model="anthropic/claude-3-7-sonnet-20250219",
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
+ thinking={"type": "enabled", "budget_tokens": 1024},
+)
+```
+
+
+
+
+```bash
+curl http://0.0.0.0:4000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer $LITELLM_KEY" \
+ -d '{
+ "model": "anthropic/claude-3-7-sonnet-20250219",
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
+ "thinking": {"type": "enabled", "budget_tokens": 1024}
+ }'
+```
+
+
+
+
+
+
+
## **Passing Extra Headers to Anthropic API**
Pass `extra_headers: dict` to `litellm.completion`
@@ -1035,8 +1081,10 @@ response = completion(
"content": [
{"type": "text", "text": "You are a very professional document summarization specialist. Please summarize the given document."},
{
- "type": "image_url",
- "image_url": f"data:application/pdf;base64,{encoded_file}", # 👈 PDF
+ "type": "file",
+ "file": {
+ "file_data": f"data:application/pdf;base64,{encoded_file}", # 👈 PDF
+ }
},
],
}
@@ -1081,8 +1129,10 @@ curl http://0.0.0.0:4000/v1/chat/completions \
"text": "You are a very professional document summarization specialist. Please summarize the given document"
},
{
- "type": "image_url",
- "image_url": "data:application/pdf;base64,{encoded_file}" # 👈 PDF
+ "type": "file",
+ "file": {
+ "file_data": f"data:application/pdf;base64,{encoded_file}", # 👈 PDF
+ }
}
}
]
diff --git a/docs/my-website/docs/providers/azure.md b/docs/my-website/docs/providers/azure.md
index 111738a449..e58d8a7b5d 100644
--- a/docs/my-website/docs/providers/azure.md
+++ b/docs/my-website/docs/providers/azure.md
@@ -291,14 +291,15 @@ response = completion(
)
```
-## Azure O1 Models
+## O-Series Models
-| Model Name | Function Call |
-|---------------------|----------------------------------------------------|
-| o1-mini | `response = completion(model="azure/", messages=messages)` |
-| o1-preview | `response = completion(model="azure/", messages=messages)` |
+Azure OpenAI O-Series models are supported on LiteLLM.
-Set `litellm.enable_preview_features = True` to use Azure O1 Models with streaming support.
+LiteLLM routes any deployment name with `o1` or `o3` in the model name, to the O-Series [transformation](https://github.com/BerriAI/litellm/blob/91ed05df2962b8eee8492374b048d27cc144d08c/litellm/llms/azure/chat/o1_transformation.py#L4) logic.
+
+To set this explicitly, set `model` to `azure/o_series/`.
+
+**Automatic Routing**
@@ -306,60 +307,112 @@ Set `litellm.enable_preview_features = True` to use Azure O1 Models with streami
```python
import litellm
-litellm.enable_preview_features = True # 👈 KEY CHANGE
-
-response = litellm.completion(
- model="azure/",
- messages=[{"role": "user", "content": "What is the weather like in Boston?"}],
- stream=True
-)
-
-for chunk in response:
- print(chunk)
+litellm.completion(model="azure/my-o3-deployment", messages=[{"role": "user", "content": "Hello, world!"}]) # 👈 Note: 'o3' in the deployment name
```
-
+
-1. Setup config.yaml
```yaml
model_list:
- - model_name: o1-mini
+ - model_name: o3-mini
litellm_params:
- model: azure/o1-mini
- api_base: "os.environ/AZURE_API_BASE"
- api_key: "os.environ/AZURE_API_KEY"
- api_version: "os.environ/AZURE_API_VERSION"
-
-litellm_settings:
- enable_preview_features: true # 👈 KEY CHANGE
+ model: azure/o3-model
+ api_base: os.environ/AZURE_API_BASE
+ api_key: os.environ/AZURE_API_KEY
```
-2. Start proxy
+
+
+
+**Explicit Routing**
+
+
+
+
+```python
+import litellm
+
+litellm.completion(model="azure/o_series/my-random-deployment-name", messages=[{"role": "user", "content": "Hello, world!"}]) # 👈 Note: 'o_series/' in the deployment name
+```
+
+
+
+```yaml
+model_list:
+ - model_name: o3-mini
+ litellm_params:
+ model: azure/o_series/my-random-deployment-name
+ api_base: os.environ/AZURE_API_BASE
+ api_key: os.environ/AZURE_API_KEY
+```
+
+
+
+
+## Azure Audio Model
+
+
+
+
+```python
+from litellm import completion
+import os
+
+os.environ["AZURE_API_KEY"] = ""
+os.environ["AZURE_API_BASE"] = ""
+os.environ["AZURE_API_VERSION"] = ""
+
+response = completion(
+ model="azure/azure-openai-4o-audio",
+ messages=[
+ {
+ "role": "user",
+ "content": "I want to try out speech to speech"
+ }
+ ],
+ modalities=["text","audio"],
+ audio={"voice": "alloy", "format": "wav"}
+)
+
+print(response)
+```
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: azure-openai-4o-audio
+ litellm_params:
+ model: azure/azure-openai-4o-audio
+ api_base: os.environ/AZURE_API_BASE
+ api_key: os.environ/AZURE_API_KEY
+ api_version: os.environ/AZURE_API_VERSION
+```
+
+2. Start proxy
```bash
litellm --config /path/to/config.yaml
```
-3. Test it
+3. Test it!
-```python
-import openai
-client = openai.OpenAI(
- api_key="anything",
- base_url="http://0.0.0.0:4000"
-)
-response = client.chat.completions.create(model="o1-mini", messages = [
- {
- "role": "user",
- "content": "this is a test request, write a short poem"
- }
-],
-stream=True)
-
-for chunk in response:
- print(chunk)
+```bash
+curl http://localhost:4000/v1/chat/completions \
+ -H "Authorization: Bearer $LITELLM_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "azure-openai-4o-audio",
+ "messages": [{"role": "user", "content": "I want to try out speech to speech"}],
+ "modalities": ["text","audio"],
+ "audio": {"voice": "alloy", "format": "wav"}
+ }'
```
+
+
@@ -425,7 +478,7 @@ response.stream_to_file(speech_file_path)
## **Authentication**
-### Entrata ID - use `azure_ad_token`
+### Entra ID - use `azure_ad_token`
This is a walkthrough on how to use Azure Active Directory Tokens - Microsoft Entra ID to make `litellm.completion()` calls
@@ -492,7 +545,7 @@ model_list:
-### Entrata ID - use tenant_id, client_id, client_secret
+### Entra ID - use tenant_id, client_id, client_secret
Here is an example of setting up `tenant_id`, `client_id`, `client_secret` in your litellm proxy `config.yaml`
```yaml
@@ -528,7 +581,7 @@ Example video of using `tenant_id`, `client_id`, `client_secret` with LiteLLM Pr
-### Entrata ID - use client_id, username, password
+### Entra ID - use client_id, username, password
Here is an example of setting up `client_id`, `azure_username`, `azure_password` in your litellm proxy `config.yaml`
```yaml
@@ -948,60 +1001,124 @@ Expected Response:
{"data":[{"id":"batch_R3V...}
```
-## O-Series Models
-Azure OpenAI O-Series models are supported on LiteLLM.
+## **Azure Responses API**
-LiteLLM routes any deployment name with `o1` or `o3` in the model name, to the O-Series [transformation](https://github.com/BerriAI/litellm/blob/91ed05df2962b8eee8492374b048d27cc144d08c/litellm/llms/azure/chat/o1_transformation.py#L4) logic.
+| Property | Details |
+|-------|-------|
+| Description | Azure OpenAI Responses API |
+| `custom_llm_provider` on LiteLLM | `azure/` |
+| Supported Operations | `/v1/responses`|
+| Azure OpenAI Responses API | [Azure OpenAI Responses API ↗](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/responses?tabs=python-secure) |
+| Cost Tracking, Logging Support | ✅ LiteLLM will log, track cost for Responses API Requests |
+| Supported OpenAI Params | ✅ All OpenAI params are supported, [See here](https://github.com/BerriAI/litellm/blob/0717369ae6969882d149933da48eeb8ab0e691bd/litellm/llms/openai/responses/transformation.py#L23) |
-To set this explicitly, set `model` to `azure/o_series/`.
+## Usage
-**Automatic Routing**
+## Create a model response
-
+
-```python
+#### Non-streaming
+
+```python showLineNumbers title="Azure Responses API"
import litellm
-litellm.completion(model="azure/my-o3-deployment", messages=[{"role": "user", "content": "Hello, world!"}]) # 👈 Note: 'o3' in the deployment name
-```
-
-
+# Non-streaming response
+response = litellm.responses(
+ model="azure/o1-pro",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ max_output_tokens=100,
+ api_key=os.getenv("AZURE_RESPONSES_OPENAI_API_KEY"),
+ api_base="https://litellm8397336933.openai.azure.com/",
+ api_version="2023-03-15-preview",
+)
-```yaml
-model_list:
- - model_name: o3-mini
- litellm_params:
- model: azure/o3-model
- api_base: os.environ/AZURE_API_BASE
- api_key: os.environ/AZURE_API_KEY
+print(response)
```
-
-
-
-**Explicit Routing**
-
-
-
-
-```python
+#### Streaming
+```python showLineNumbers title="Azure Responses API"
import litellm
-litellm.completion(model="azure/o_series/my-random-deployment-name", messages=[{"role": "user", "content": "Hello, world!"}]) # 👈 Note: 'o_series/' in the deployment name
-```
-
-
+# Streaming response
+response = litellm.responses(
+ model="azure/o1-pro",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ stream=True,
+ api_key=os.getenv("AZURE_RESPONSES_OPENAI_API_KEY"),
+ api_base="https://litellm8397336933.openai.azure.com/",
+ api_version="2023-03-15-preview",
+)
-```yaml
-model_list:
- - model_name: o3-mini
- litellm_params:
- model: azure/o_series/my-random-deployment-name
- api_base: os.environ/AZURE_API_BASE
- api_key: os.environ/AZURE_API_KEY
+for event in response:
+ print(event)
```
+
+
+
+
+First, add this to your litellm proxy config.yaml:
+```yaml showLineNumbers title="Azure Responses API"
+model_list:
+ - model_name: o1-pro
+ litellm_params:
+ model: azure/o1-pro
+ api_key: os.environ/AZURE_RESPONSES_OPENAI_API_KEY
+ api_base: https://litellm8397336933.openai.azure.com/
+ api_version: 2023-03-15-preview
+```
+
+Start your LiteLLM proxy:
+```bash
+litellm --config /path/to/config.yaml
+
+# RUNNING on http://0.0.0.0:4000
+```
+
+Then use the OpenAI SDK pointed to your proxy:
+
+#### Non-streaming
+```python showLineNumbers
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Non-streaming response
+response = client.responses.create(
+ model="o1-pro",
+ input="Tell me a three sentence bedtime story about a unicorn."
+)
+
+print(response)
+```
+
+#### Streaming
+```python showLineNumbers
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Streaming response
+response = client.responses.create(
+ model="o1-pro",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ stream=True
+)
+
+for event in response:
+ print(event)
+```
+
@@ -1076,32 +1193,24 @@ print(response)
```
-### Parallel Function calling
+### Tool Calling / Function Calling
+
See a detailed walthrough of parallel function calling with litellm [here](https://docs.litellm.ai/docs/completion/function_call)
+
+
+
+
+
```python
# set Azure env variables
import os
+import litellm
+import json
+
os.environ['AZURE_API_KEY'] = "" # litellm reads AZURE_API_KEY from .env and sends the request
os.environ['AZURE_API_BASE'] = "https://openai-gpt-4-test-v-1.openai.azure.com/"
os.environ['AZURE_API_VERSION'] = "2023-07-01-preview"
-import litellm
-import json
-# Example dummy function hard coded to return the same weather
-# In production, this could be your backend API or an external API
-def get_current_weather(location, unit="fahrenheit"):
- """Get the current weather in a given location"""
- if "tokyo" in location.lower():
- return json.dumps({"location": "Tokyo", "temperature": "10", "unit": "celsius"})
- elif "san francisco" in location.lower():
- return json.dumps({"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"})
- elif "paris" in location.lower():
- return json.dumps({"location": "Paris", "temperature": "22", "unit": "celsius"})
- else:
- return json.dumps({"location": location, "temperature": "unknown"})
-
-## Step 1: send the conversation and available functions to the model
-messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
tools = [
{
"type": "function",
@@ -1125,7 +1234,7 @@ tools = [
response = litellm.completion(
model="azure/chatgpt-functioncalling", # model = azure/
- messages=messages,
+ messages=[{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}],
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
)
@@ -1134,8 +1243,49 @@ response_message = response.choices[0].message
tool_calls = response.choices[0].message.tool_calls
print("\nTool Choice:\n", tool_calls)
```
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: azure-gpt-3.5
+ litellm_params:
+ model: azure/chatgpt-functioncalling
+ api_base: os.environ/AZURE_API_BASE
+ api_key: os.environ/AZURE_API_KEY
+ api_version: "2023-07-01-preview"
+```
+
+2. Start proxy
+
+```bash
+litellm --config config.yaml
+```
+
+3. Test it
+
+```bash
+curl -L -X POST 'http://localhost:4000/v1/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "azure-gpt-3.5",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hey, how'\''s it going? Thinking long and hard before replying - what is the meaning of the world and life itself"
+ }
+ ]
+}'
+```
+
+
+
+
### Spend Tracking for Azure OpenAI Models (PROXY)
Set base model for cost tracking azure image-gen call
diff --git a/docs/my-website/docs/providers/bedrock.md b/docs/my-website/docs/providers/bedrock.md
index 19b3728882..2a9c528a65 100644
--- a/docs/my-website/docs/providers/bedrock.md
+++ b/docs/my-website/docs/providers/bedrock.md
@@ -79,6 +79,7 @@ aws_session_name: Optional[str],
aws_profile_name: Optional[str],
aws_role_name: Optional[str],
aws_web_identity_token: Optional[str],
+aws_bedrock_runtime_endpoint: Optional[str],
```
### 2. Start the proxy
@@ -475,7 +476,7 @@ os.environ["AWS_REGION_NAME"] = ""
resp = completion(
model="bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
messages=[{"role": "user", "content": "What is the capital of France?"}],
- thinking={"type": "enabled", "budget_tokens": 1024},
+ reasoning_effort="low",
)
print(resp)
@@ -490,7 +491,7 @@ model_list:
- model_name: bedrock-claude-3-7
litellm_params:
model: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0
- thinking: {"type": "enabled", "budget_tokens": 1024} # 👈 EITHER HERE OR ON REQUEST
+ reasoning_effort: "low" # 👈 EITHER HERE OR ON REQUEST
```
2. Start proxy
@@ -508,7 +509,7 @@ curl http://0.0.0.0:4000/v1/chat/completions \
-d '{
"model": "bedrock-claude-3-7",
"messages": [{"role": "user", "content": "What is the capital of France?"}],
- "thinking": {"type": "enabled", "budget_tokens": 1024} # 👈 EITHER HERE OR ON CONFIG.YAML
+ "reasoning_effort": "low" # 👈 EITHER HERE OR ON CONFIG.YAML
}'
```
@@ -557,6 +558,10 @@ Same as [Anthropic API response](../providers/anthropic#usage---thinking--reason
}
```
+### Pass `thinking` to Anthropic models
+
+Same as [Anthropic API response](../providers/anthropic#usage---thinking--reasoning_content).
+
## Usage - Structured Output / JSON mode
@@ -663,6 +668,58 @@ curl http://0.0.0.0:4000/v1/chat/completions \
+## Usage - Latency Optimized Inference
+
+Valid from v1.65.1+
+
+
+
+
+```python
+from litellm import completion
+
+response = completion(
+ model="bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0",
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
+ performanceConfig={"latency": "optimized"},
+)
+```
+
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: bedrock-claude-3-7
+ litellm_params:
+ model: bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0
+ performanceConfig: {"latency": "optimized"} # 👈 EITHER HERE OR ON REQUEST
+```
+
+2. Start proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+3. Test it!
+
+```bash
+curl http://0.0.0.0:4000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer $LITELLM_KEY" \
+ -d '{
+ "model": "bedrock-claude-3-7",
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
+ "performanceConfig": {"latency": "optimized"} # 👈 EITHER HERE OR ON CONFIG.YAML
+ }'
+```
+
+
+
+
## Usage - Bedrock Guardrails
Example of using [Bedrock Guardrails with LiteLLM](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-use-converse-api.html)
@@ -1115,14 +1172,22 @@ os.environ["AWS_REGION_NAME"] = ""
# pdf url
image_url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
+# Download the file
+response = requests.get(url)
+file_data = response.content
+
+encoded_file = base64.b64encode(file_data).decode("utf-8")
+
# model
model = "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0"
image_content = [
{"type": "text", "text": "What's this file about?"},
{
- "type": "image_url",
- "image_url": image_url, # OR {"url": image_url}
+ "type": "file",
+ "file": {
+ "file_data": f"data:application/pdf;base64,{encoded_file}", # 👈 PDF
+ }
},
]
@@ -1168,8 +1233,10 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
"messages": [
{"role": "user", "content": {"type": "text", "text": "What's this file about?"}},
{
- "type": "image_url",
- "image_url": "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf",
+ "type": "file",
+ "file": {
+ "file_data": f"data:application/pdf;base64,{encoded_file}", # 👈 PDF
+ }
}
]
}'
@@ -1427,10 +1494,14 @@ response = litellm.embedding(
## Supported AWS Bedrock Models
+
+LiteLLM supports ALL Bedrock models.
+
Here's an example of using a bedrock model with LiteLLM. For a complete list, refer to the [model cost map](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json)
| Model Name | Command |
|----------------------------|------------------------------------------------------------------|
+| Deepseek R1 | `completion(model='bedrock/us.deepseek.r1-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` |
| Anthropic Claude-V3.5 Sonnet | `completion(model='bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` |
| Anthropic Claude-V3 sonnet | `completion(model='bedrock/anthropic.claude-3-sonnet-20240229-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` |
| Anthropic Claude-V3 Haiku | `completion(model='bedrock/anthropic.claude-3-haiku-20240307-v1:0', messages=messages)` | `os.environ['AWS_ACCESS_KEY_ID']`, `os.environ['AWS_SECRET_ACCESS_KEY']` |
@@ -1771,6 +1842,7 @@ response = completion(
)
```
+
1. Setup config.yaml
@@ -1815,11 +1887,13 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
```
+
### SSO Login (AWS Profile)
- Set `AWS_PROFILE` environment variable
- Make bedrock completion call
+
```python
import os
from litellm import completion
@@ -1912,12 +1986,46 @@ model_list:
+Text to Image :
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/v1/images/generations' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer $LITELLM_VIRTUAL_KEY' \
+-d '{
+ "model": "amazon.nova-canvas-v1:0",
+ "prompt": "A cute baby sea otter"
+}'
+```
+Color Guided Generation:
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/v1/images/generations' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer $LITELLM_VIRTUAL_KEY' \
+-d '{
+ "model": "amazon.nova-canvas-v1:0",
+ "prompt": "A cute baby sea otter",
+ "taskType": "COLOR_GUIDED_GENERATION",
+ "colorGuidedGenerationParams":{"colors":["#FFFFFF"]}
+}'
+```
+
+| Model Name | Function Call |
+|-------------------------|---------------------------------------------|
+| Stable Diffusion 3 - v0 | `image_generation(model="bedrock/stability.stability.sd3-large-v1:0", prompt=prompt)` |
+| Stable Diffusion - v0 | `image_generation(model="bedrock/stability.stable-diffusion-xl-v0", prompt=prompt)` |
+| Stable Diffusion - v1 | `image_generation(model="bedrock/stability.stable-diffusion-xl-v1", prompt=prompt)` |
+| Amazon Nova Canvas - v0 | `image_generation(model="bedrock/amazon.nova-canvas-v1:0", prompt=prompt)` |
+
+
### Passing an external BedrockRuntime.Client as a parameter - Completion()
+
+This is a deprecated flow. Boto3 is not async. And boto3.client does not let us make the http call through httpx. Pass in your aws params through the method above 👆. [See Auth Code](https://github.com/BerriAI/litellm/blob/55a20c7cce99a93d36a82bf3ae90ba3baf9a7f89/litellm/llms/bedrock_httpx.py#L284) [Add new auth flow](https://github.com/BerriAI/litellm/issues)
:::warning
-This is a deprecated flow. Boto3 is not async. And boto3.client does not let us make the http call through httpx. Pass in your aws params through the method above 👆. [See Auth Code](https://github.com/BerriAI/litellm/blob/55a20c7cce99a93d36a82bf3ae90ba3baf9a7f89/litellm/llms/bedrock_httpx.py#L284) [Add new auth flow](https://github.com/BerriAI/litellm/issues)
+
+
Experimental - 2024-Jun-23:
diff --git a/docs/my-website/docs/providers/databricks.md b/docs/my-website/docs/providers/databricks.md
index 395a544db4..8631cbfdad 100644
--- a/docs/my-website/docs/providers/databricks.md
+++ b/docs/my-website/docs/providers/databricks.md
@@ -1,7 +1,7 @@
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-# 🆕 Databricks
+# Databricks
LiteLLM supports all models on Databricks
@@ -154,7 +154,205 @@ response = completion(
temperature: 0.5
```
-## Passings Databricks specific params - 'instruction'
+
+## Usage - Thinking / `reasoning_content`
+
+LiteLLM translates OpenAI's `reasoning_effort` to Anthropic's `thinking` parameter. [Code](https://github.com/BerriAI/litellm/blob/23051d89dd3611a81617d84277059cd88b2df511/litellm/llms/anthropic/chat/transformation.py#L298)
+
+| reasoning_effort | thinking |
+| ---------------- | -------- |
+| "low" | "budget_tokens": 1024 |
+| "medium" | "budget_tokens": 2048 |
+| "high" | "budget_tokens": 4096 |
+
+
+Known Limitations:
+- Support for passing thinking blocks back to Claude [Issue](https://github.com/BerriAI/litellm/issues/9790)
+
+
+
+
+
+```python
+from litellm import completion
+import os
+
+# set ENV variables (can also be passed in to .completion() - e.g. `api_base`, `api_key`)
+os.environ["DATABRICKS_API_KEY"] = "databricks key"
+os.environ["DATABRICKS_API_BASE"] = "databricks base url"
+
+resp = completion(
+ model="databricks/databricks-claude-3-7-sonnet",
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
+ reasoning_effort="low",
+)
+
+```
+
+
+
+
+
+1. Setup config.yaml
+
+```yaml
+- model_name: claude-3-7-sonnet
+ litellm_params:
+ model: databricks/databricks-claude-3-7-sonnet
+ api_key: os.environ/DATABRICKS_API_KEY
+ api_base: os.environ/DATABRICKS_API_BASE
+```
+
+2. Start proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+3. Test it!
+
+```bash
+curl http://0.0.0.0:4000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer " \
+ -d '{
+ "model": "claude-3-7-sonnet",
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
+ "reasoning_effort": "low"
+ }'
+```
+
+
+
+
+
+**Expected Response**
+
+```python
+ModelResponse(
+ id='chatcmpl-c542d76d-f675-4e87-8e5f-05855f5d0f5e',
+ created=1740470510,
+ model='claude-3-7-sonnet-20250219',
+ object='chat.completion',
+ system_fingerprint=None,
+ choices=[
+ Choices(
+ finish_reason='stop',
+ index=0,
+ message=Message(
+ content="The capital of France is Paris.",
+ role='assistant',
+ tool_calls=None,
+ function_call=None,
+ provider_specific_fields={
+ 'citations': None,
+ 'thinking_blocks': [
+ {
+ 'type': 'thinking',
+ 'thinking': 'The capital of France is Paris. This is a very straightforward factual question.',
+ 'signature': 'EuYBCkQYAiJAy6...'
+ }
+ ]
+ }
+ ),
+ thinking_blocks=[
+ {
+ 'type': 'thinking',
+ 'thinking': 'The capital of France is Paris. This is a very straightforward factual question.',
+ 'signature': 'EuYBCkQYAiJAy6AGB...'
+ }
+ ],
+ reasoning_content='The capital of France is Paris. This is a very straightforward factual question.'
+ )
+ ],
+ usage=Usage(
+ completion_tokens=68,
+ prompt_tokens=42,
+ total_tokens=110,
+ completion_tokens_details=None,
+ prompt_tokens_details=PromptTokensDetailsWrapper(
+ audio_tokens=None,
+ cached_tokens=0,
+ text_tokens=None,
+ image_tokens=None
+ ),
+ cache_creation_input_tokens=0,
+ cache_read_input_tokens=0
+ )
+)
+```
+
+### Pass `thinking` to Anthropic models
+
+You can also pass the `thinking` parameter to Anthropic models.
+
+
+You can also pass the `thinking` parameter to Anthropic models.
+
+
+
+
+```python
+from litellm import completion
+import os
+
+# set ENV variables (can also be passed in to .completion() - e.g. `api_base`, `api_key`)
+os.environ["DATABRICKS_API_KEY"] = "databricks key"
+os.environ["DATABRICKS_API_BASE"] = "databricks base url"
+
+response = litellm.completion(
+ model="databricks/databricks-claude-3-7-sonnet",
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
+ thinking={"type": "enabled", "budget_tokens": 1024},
+)
+```
+
+
+
+
+```bash
+curl http://0.0.0.0:4000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer $LITELLM_KEY" \
+ -d '{
+ "model": "databricks/databricks-claude-3-7-sonnet",
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
+ "thinking": {"type": "enabled", "budget_tokens": 1024}
+ }'
+```
+
+
+
+
+
+
+
+
+## Supported Databricks Chat Completion Models
+
+:::tip
+
+**We support ALL Databricks models, just set `model=databricks/` as a prefix when sending litellm requests**
+
+:::
+
+
+| Model Name | Command |
+|----------------------------|------------------------------------------------------------------|
+| databricks/databricks-claude-3-7-sonnet | `completion(model='databricks/databricks/databricks-claude-3-7-sonnet', messages=messages)` |
+| databricks-meta-llama-3-1-70b-instruct | `completion(model='databricks/databricks-meta-llama-3-1-70b-instruct', messages=messages)` |
+| databricks-meta-llama-3-1-405b-instruct | `completion(model='databricks/databricks-meta-llama-3-1-405b-instruct', messages=messages)` |
+| databricks-dbrx-instruct | `completion(model='databricks/databricks-dbrx-instruct', messages=messages)` |
+| databricks-meta-llama-3-70b-instruct | `completion(model='databricks/databricks-meta-llama-3-70b-instruct', messages=messages)` |
+| databricks-llama-2-70b-chat | `completion(model='databricks/databricks-llama-2-70b-chat', messages=messages)` |
+| databricks-mixtral-8x7b-instruct | `completion(model='databricks/databricks-mixtral-8x7b-instruct', messages=messages)` |
+| databricks-mpt-30b-instruct | `completion(model='databricks/databricks-mpt-30b-instruct', messages=messages)` |
+| databricks-mpt-7b-instruct | `completion(model='databricks/databricks-mpt-7b-instruct', messages=messages)` |
+
+
+## Embedding Models
+
+### Passing Databricks specific params - 'instruction'
For embedding models, databricks lets you pass in an additional param 'instruction'. [Full Spec](https://github.com/BerriAI/litellm/blob/43353c28b341df0d9992b45c6ce464222ebd7984/litellm/llms/databricks.py#L164)
@@ -187,27 +385,6 @@ response = litellm.embedding(
instruction: "Represent this sentence for searching relevant passages:"
```
-
-## Supported Databricks Chat Completion Models
-
-:::tip
-
-**We support ALL Databricks models, just set `model=databricks/` as a prefix when sending litellm requests**
-
-:::
-
-
-| Model Name | Command |
-|----------------------------|------------------------------------------------------------------|
-| databricks-meta-llama-3-1-70b-instruct | `completion(model='databricks/databricks-meta-llama-3-1-70b-instruct', messages=messages)` |
-| databricks-meta-llama-3-1-405b-instruct | `completion(model='databricks/databricks-meta-llama-3-1-405b-instruct', messages=messages)` |
-| databricks-dbrx-instruct | `completion(model='databricks/databricks-dbrx-instruct', messages=messages)` |
-| databricks-meta-llama-3-70b-instruct | `completion(model='databricks/databricks-meta-llama-3-70b-instruct', messages=messages)` |
-| databricks-llama-2-70b-chat | `completion(model='databricks/databricks-llama-2-70b-chat', messages=messages)` |
-| databricks-mixtral-8x7b-instruct | `completion(model='databricks/databricks-mixtral-8x7b-instruct', messages=messages)` |
-| databricks-mpt-30b-instruct | `completion(model='databricks/databricks-mpt-30b-instruct', messages=messages)` |
-| databricks-mpt-7b-instruct | `completion(model='databricks/databricks-mpt-7b-instruct', messages=messages)` |
-
## Supported Databricks Embedding Models
:::tip
diff --git a/docs/my-website/docs/providers/gemini.md b/docs/my-website/docs/providers/gemini.md
index 4a6cfdf1a3..434df6a7c9 100644
--- a/docs/my-website/docs/providers/gemini.md
+++ b/docs/my-website/docs/providers/gemini.md
@@ -39,14 +39,164 @@ response = completion(
- temperature
- top_p
- max_tokens
+- max_completion_tokens
- stream
- tools
- tool_choice
+- functions
- response_format
- n
- stop
+- logprobs
+- frequency_penalty
+- modalities
+- reasoning_content
+
+**Anthropic Params**
+- thinking (used to set max budget tokens across anthropic/gemini models)
+
+[**See Updated List**](https://github.com/BerriAI/litellm/blob/main/litellm/llms/gemini/chat/transformation.py#L70)
+
+
+
+## Usage - Thinking / `reasoning_content`
+
+LiteLLM translates OpenAI's `reasoning_effort` to Gemini's `thinking` parameter. [Code](https://github.com/BerriAI/litellm/blob/620664921902d7a9bfb29897a7b27c1a7ef4ddfb/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py#L362)
+
+**Mapping**
+
+| reasoning_effort | thinking |
+| ---------------- | -------- |
+| "low" | "budget_tokens": 1024 |
+| "medium" | "budget_tokens": 2048 |
+| "high" | "budget_tokens": 4096 |
+
+
+
+
+```python
+from litellm import completion
+
+resp = completion(
+ model="gemini/gemini-2.5-flash-preview-04-17",
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
+ reasoning_effort="low",
+)
+
+```
+
+
+
+
+
+1. Setup config.yaml
+
+```yaml
+- model_name: gemini-2.5-flash
+ litellm_params:
+ model: gemini/gemini-2.5-flash-preview-04-17
+ api_key: os.environ/GEMINI_API_KEY
+```
+
+2. Start proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+3. Test it!
+
+```bash
+curl http://0.0.0.0:4000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer " \
+ -d '{
+ "model": "gemini-2.5-flash",
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
+ "reasoning_effort": "low"
+ }'
+```
+
+
+
+
+
+**Expected Response**
+
+```python
+ModelResponse(
+ id='chatcmpl-c542d76d-f675-4e87-8e5f-05855f5d0f5e',
+ created=1740470510,
+ model='claude-3-7-sonnet-20250219',
+ object='chat.completion',
+ system_fingerprint=None,
+ choices=[
+ Choices(
+ finish_reason='stop',
+ index=0,
+ message=Message(
+ content="The capital of France is Paris.",
+ role='assistant',
+ tool_calls=None,
+ function_call=None,
+ reasoning_content='The capital of France is Paris. This is a very straightforward factual question.'
+ ),
+ )
+ ],
+ usage=Usage(
+ completion_tokens=68,
+ prompt_tokens=42,
+ total_tokens=110,
+ completion_tokens_details=None,
+ prompt_tokens_details=PromptTokensDetailsWrapper(
+ audio_tokens=None,
+ cached_tokens=0,
+ text_tokens=None,
+ image_tokens=None
+ ),
+ cache_creation_input_tokens=0,
+ cache_read_input_tokens=0
+ )
+)
+```
+
+### Pass `thinking` to Gemini models
+
+You can also pass the `thinking` parameter to Gemini models.
+
+This is translated to Gemini's [`thinkingConfig` parameter](https://ai.google.dev/gemini-api/docs/thinking#set-budget).
+
+
+
+
+```python
+response = litellm.completion(
+ model="gemini/gemini-2.5-flash-preview-04-17",
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
+ thinking={"type": "enabled", "budget_tokens": 1024},
+)
+```
+
+
+
+
+```bash
+curl http://0.0.0.0:4000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer $LITELLM_KEY" \
+ -d '{
+ "model": "gemini/gemini-2.5-flash-preview-04-17",
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
+ "thinking": {"type": "enabled", "budget_tokens": 1024}
+ }'
+```
+
+
+
+
+
+
-[**See Updated List**](https://github.com/BerriAI/litellm/blob/1c747f3ad372399c5b95cc5696b06a5fbe53186b/litellm/llms/vertex_httpx.py#L122)
## Passing Gemini Specific Params
### Response schema
@@ -365,7 +515,7 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
## Specifying Safety Settings
-In certain use-cases you may need to make calls to the models and pass [safety settigns](https://ai.google.dev/docs/safety_setting_gemini) different from the defaults. To do so, simple pass the `safety_settings` argument to `completion` or `acompletion`. For example:
+In certain use-cases you may need to make calls to the models and pass [safety settings](https://ai.google.dev/docs/safety_setting_gemini) different from the defaults. To do so, simple pass the `safety_settings` argument to `completion` or `acompletion`. For example:
```python
response = completion(
@@ -438,6 +588,179 @@ assert isinstance(
```
+### Google Search Tool
+
+
+
+
+```python
+from litellm import completion
+import os
+
+os.environ["GEMINI_API_KEY"] = ".."
+
+tools = [{"googleSearch": {}}] # 👈 ADD GOOGLE SEARCH
+
+response = completion(
+ model="gemini/gemini-2.0-flash",
+ messages=[{"role": "user", "content": "What is the weather in San Francisco?"}],
+ tools=tools,
+)
+
+print(response)
+```
+
+
+
+
+1. Setup config.yaml
+```yaml
+model_list:
+ - model_name: gemini-2.0-flash
+ litellm_params:
+ model: gemini/gemini-2.0-flash
+ api_key: os.environ/GEMINI_API_KEY
+```
+
+2. Start Proxy
+```bash
+$ litellm --config /path/to/config.yaml
+```
+
+3. Make Request!
+```bash
+curl -X POST 'http://0.0.0.0:4000/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "gemini-2.0-flash",
+ "messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
+ "tools": [{"googleSearch": {}}]
+}
+'
+```
+
+
+
+
+### Google Search Retrieval
+
+
+
+
+
+```python
+from litellm import completion
+import os
+
+os.environ["GEMINI_API_KEY"] = ".."
+
+tools = [{"googleSearchRetrieval": {}}] # 👈 ADD GOOGLE SEARCH
+
+response = completion(
+ model="gemini/gemini-2.0-flash",
+ messages=[{"role": "user", "content": "What is the weather in San Francisco?"}],
+ tools=tools,
+)
+
+print(response)
+```
+
+
+
+
+1. Setup config.yaml
+```yaml
+model_list:
+ - model_name: gemini-2.0-flash
+ litellm_params:
+ model: gemini/gemini-2.0-flash
+ api_key: os.environ/GEMINI_API_KEY
+```
+
+2. Start Proxy
+```bash
+$ litellm --config /path/to/config.yaml
+```
+
+3. Make Request!
+```bash
+curl -X POST 'http://0.0.0.0:4000/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "gemini-2.0-flash",
+ "messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
+ "tools": [{"googleSearchRetrieval": {}}]
+}
+'
+```
+
+
+
+
+
+### Code Execution Tool
+
+
+
+
+
+```python
+from litellm import completion
+import os
+
+os.environ["GEMINI_API_KEY"] = ".."
+
+tools = [{"codeExecution": {}}] # 👈 ADD GOOGLE SEARCH
+
+response = completion(
+ model="gemini/gemini-2.0-flash",
+ messages=[{"role": "user", "content": "What is the weather in San Francisco?"}],
+ tools=tools,
+)
+
+print(response)
+```
+
+
+
+
+1. Setup config.yaml
+```yaml
+model_list:
+ - model_name: gemini-2.0-flash
+ litellm_params:
+ model: gemini/gemini-2.0-flash
+ api_key: os.environ/GEMINI_API_KEY
+```
+
+2. Start Proxy
+```bash
+$ litellm --config /path/to/config.yaml
+```
+
+3. Make Request!
+```bash
+curl -X POST 'http://0.0.0.0:4000/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "gemini-2.0-flash",
+ "messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
+ "tools": [{"codeExecution": {}}]
+}
+'
+```
+
+
+
+
+
+
+
+
+
## JSON Mode
@@ -589,8 +912,10 @@ response = litellm.completion(
"content": [
{"type": "text", "text": "Please summarize the audio."},
{
- "type": "image_url",
- "image_url": "data:audio/mp3;base64,{}".format(encoded_data), # 👈 SET MIME_TYPE + DATA
+ "type": "file",
+ "file": {
+ "file_data": "data:audio/mp3;base64,{}".format(encoded_data), # 👈 SET MIME_TYPE + DATA
+ }
},
],
}
@@ -640,8 +965,11 @@ response = litellm.completion(
"content": [
{"type": "text", "text": "Please summarize the file."},
{
- "type": "image_url",
- "image_url": "https://storage..." # 👈 SET THE IMG URL
+ "type": "file",
+ "file": {
+ "file_id": "https://storage...", # 👈 SET THE IMG URL
+ "format": "application/pdf" # OPTIONAL
+ }
},
],
}
@@ -668,8 +996,11 @@ response = litellm.completion(
"content": [
{"type": "text", "text": "Please summarize the file."},
{
- "type": "image_url",
- "image_url": "gs://..." # 👈 SET THE cloud storage bucket url
+ "type": "file",
+ "file": {
+ "file_id": "gs://storage...", # 👈 SET THE IMG URL
+ "format": "application/pdf" # OPTIONAL
+ }
},
],
}
@@ -879,3 +1210,54 @@ response = await client.chat.completions.create(
+
+## Image Generation
+
+
+
+
+```python
+from litellm import completion
+
+response = completion(
+ model="gemini/gemini-2.0-flash-exp-image-generation",
+ messages=[{"role": "user", "content": "Generate an image of a cat"}],
+ modalities=["image", "text"],
+)
+assert response.choices[0].message.content is not None # "data:image/png;base64,e4rr.."
+```
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: gemini-2.0-flash-exp-image-generation
+ litellm_params:
+ model: gemini/gemini-2.0-flash-exp-image-generation
+ api_key: os.environ/GEMINI_API_KEY
+```
+
+2. Start proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+3. Test it!
+
+```bash
+curl -L -X POST 'http://localhost:4000/v1/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "gemini-2.0-flash-exp-image-generation",
+ "messages": [{"role": "user", "content": "Generate an image of a cat"}],
+ "modalities": ["image", "text"]
+}'
+```
+
+
+
+
diff --git a/docs/my-website/docs/providers/google_ai_studio/files.md b/docs/my-website/docs/providers/google_ai_studio/files.md
new file mode 100644
index 0000000000..500f1d5718
--- /dev/null
+++ b/docs/my-website/docs/providers/google_ai_studio/files.md
@@ -0,0 +1,161 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# [BETA] Google AI Studio (Gemini) Files API
+
+Use this to upload files to Google AI Studio (Gemini).
+
+Useful to pass in large media files to Gemini's `/generateContent` endpoint.
+
+| Action | Supported |
+|----------|-----------|
+| `create` | Yes |
+| `delete` | No |
+| `retrieve` | No |
+| `list` | No |
+
+## Usage
+
+
+
+
+```python
+import base64
+import requests
+from litellm import completion, create_file
+import os
+
+
+### UPLOAD FILE ###
+
+# Fetch the audio file and convert it to a base64 encoded string
+url = "https://cdn.openai.com/API/docs/audio/alloy.wav"
+response = requests.get(url)
+response.raise_for_status()
+wav_data = response.content
+encoded_string = base64.b64encode(wav_data).decode('utf-8')
+
+
+file = create_file(
+ file=wav_data,
+ purpose="user_data",
+ extra_body={"custom_llm_provider": "gemini"},
+ api_key=os.getenv("GEMINI_API_KEY"),
+)
+
+print(f"file: {file}")
+
+assert file is not None
+
+
+### GENERATE CONTENT ###
+completion = completion(
+ model="gemini-2.0-flash",
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "What is in this recording?"
+ },
+ {
+ "type": "file",
+ "file": {
+ "file_id": file.id,
+ "filename": "my-test-name",
+ "format": "audio/wav"
+ }
+ }
+ ]
+ },
+ ]
+)
+
+print(completion.choices[0].message)
+```
+
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: "gemini-2.0-flash"
+ litellm_params:
+ model: gemini/gemini-2.0-flash
+ api_key: os.environ/GEMINI_API_KEY
+```
+
+2. Start proxy
+
+```bash
+litellm --config config.yaml
+```
+
+3. Test it
+
+```python
+import base64
+import requests
+from openai import OpenAI
+
+client = OpenAI(
+ base_url="http://0.0.0.0:4000",
+ api_key="sk-1234"
+)
+
+# Fetch the audio file and convert it to a base64 encoded string
+url = "https://cdn.openai.com/API/docs/audio/alloy.wav"
+response = requests.get(url)
+response.raise_for_status()
+wav_data = response.content
+encoded_string = base64.b64encode(wav_data).decode('utf-8')
+
+
+file = client.files.create(
+ file=wav_data,
+ purpose="user_data",
+ extra_body={"target_model_names": "gemini-2.0-flash"}
+)
+
+print(f"file: {file}")
+
+assert file is not None
+
+completion = client.chat.completions.create(
+ model="gemini-2.0-flash",
+ modalities=["text", "audio"],
+ audio={"voice": "alloy", "format": "wav"},
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "What is in this recording?"
+ },
+ {
+ "type": "file",
+ "file": {
+ "file_id": file.id,
+ "filename": "my-test-name",
+ "format": "audio/wav"
+ }
+ }
+ ]
+ },
+ ],
+ extra_body={"drop_params": True}
+)
+
+print(completion.choices[0].message)
+```
+
+
+
+
+
+
+
diff --git a/docs/my-website/docs/providers/huggingface.md b/docs/my-website/docs/providers/huggingface.md
index 5297a688ba..399d49b5f4 100644
--- a/docs/my-website/docs/providers/huggingface.md
+++ b/docs/my-website/docs/providers/huggingface.md
@@ -2,466 +2,392 @@ import Image from '@theme/IdealImage';
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-# Huggingface
+# Hugging Face
+LiteLLM supports running inference across multiple services for models hosted on the Hugging Face Hub.
-LiteLLM supports the following types of Hugging Face models:
+- **Serverless Inference Providers** - Hugging Face offers an easy and unified access to serverless AI inference through multiple inference providers, like [Together AI](https://together.ai) and [Sambanova](https://sambanova.ai). This is the fastest way to integrate AI in your products with a maintenance-free and scalable solution. More details in the [Inference Providers documentation](https://huggingface.co/docs/inference-providers/index).
+- **Dedicated Inference Endpoints** - which is a product to easily deploy models to production. Inference is run by Hugging Face in a dedicated, fully managed infrastructure on a cloud provider of your choice. You can deploy your model on Hugging Face Inference Endpoints by following [these steps](https://huggingface.co/docs/inference-endpoints/guides/create_endpoint).
-- Serverless Inference API (free) - loaded and ready to use: https://huggingface.co/models?inference=warm&pipeline_tag=text-generation
-- Dedicated Inference Endpoints (paid) - manual deployment: https://ui.endpoints.huggingface.co/
-- All LLMs served via Hugging Face's Inference use [Text-generation-inference](https://huggingface.co/docs/text-generation-inference).
+
+## Supported Models
+
+### Serverless Inference Providers
+You can check available models for an inference provider by going to [huggingface.co/models](https://huggingface.co/models), clicking the "Other" filter tab, and selecting your desired provider:
+
+
+
+For example, you can find all Fireworks supported models [here](https://huggingface.co/models?inference_provider=fireworks-ai&sort=trending).
+
+
+### Dedicated Inference Endpoints
+Refer to the [Inference Endpoints catalog](https://endpoints.huggingface.co/catalog) for a list of available models.
## Usage
+
+
+
+### Authentication
+With a single Hugging Face token, you can access inference through multiple providers. Your calls are routed through Hugging Face and the usage is billed directly to your Hugging Face account at the standard provider API rates.
+
+Simply set the `HF_TOKEN` environment variable with your Hugging Face token, you can create one here: https://huggingface.co/settings/tokens.
+
+```bash
+export HF_TOKEN="hf_xxxxxx"
+```
+or alternatively, you can pass your Hugging Face token as a parameter:
+```python
+completion(..., api_key="hf_xxxxxx")
+```
+
+### Getting Started
+
+To use a Hugging Face model, specify both the provider and model you want to use in the following format:
+```
+huggingface///
+```
+Where `/` is the Hugging Face model ID and `` is the inference provider.
+By default, if you don't specify a provider, LiteLLM will use the [HF Inference API](https://huggingface.co/docs/api-inference/en/index).
+
+Examples:
+
+```python
+# Run DeepSeek-R1 inference through Together AI
+completion(model="huggingface/together/deepseek-ai/DeepSeek-R1",...)
+
+# Run Qwen2.5-72B-Instruct inference through Sambanova
+completion(model="huggingface/sambanova/Qwen/Qwen2.5-72B-Instruct",...)
+
+# Run Llama-3.3-70B-Instruct inference through HF Inference API
+completion(model="huggingface/meta-llama/Llama-3.3-70B-Instruct",...)
+```
+
+
-You need to tell LiteLLM when you're calling Huggingface.
-This is done by adding the "huggingface/" prefix to `model`, example `completion(model="huggingface/",...)`.
-
-
-
-
-By default, LiteLLM will assume a Hugging Face call follows the [Messages API](https://huggingface.co/docs/text-generation-inference/messages_api), which is fully compatible with the OpenAI Chat Completion API.
-
-
-
+### Basic Completion
+Here's an example of chat completion using the DeepSeek-R1 model through Together AI:
```python
import os
from litellm import completion
-# [OPTIONAL] set env var
-os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
+os.environ["HF_TOKEN"] = "hf_xxxxxx"
-messages = [{ "content": "There's a llama in my garden 😱 What should I do?","role": "user"}]
-
-# e.g. Call 'https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct' from Serverless Inference API
response = completion(
- model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct",
- messages=[{ "content": "Hello, how are you?","role": "user"}],
+ model="huggingface/together/deepseek-ai/DeepSeek-R1",
+ messages=[
+ {
+ "role": "user",
+ "content": "How many r's are in the word 'strawberry'?",
+ }
+ ],
+)
+print(response)
+```
+
+### Streaming
+Now, let's see what a streaming request looks like.
+
+```python
+import os
+from litellm import completion
+
+os.environ["HF_TOKEN"] = "hf_xxxxxx"
+
+response = completion(
+ model="huggingface/together/deepseek-ai/DeepSeek-R1",
+ messages=[
+ {
+ "role": "user",
+ "content": "How many r's are in the word `strawberry`?",
+
+ }
+ ],
+ stream=True,
+)
+
+for chunk in response:
+ print(chunk)
+```
+
+### Image Input
+You can also pass images when the model supports it. Here is an example using [Llama-3.2-11B-Vision-Instruct](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct) model through Sambanova.
+
+```python
+from litellm import completion
+
+# Set your Hugging Face Token
+os.environ["HF_TOKEN"] = "hf_xxxxxx"
+
+messages=[
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": "What's in this image?"},
+ {
+ "type": "image_url",
+ "image_url": {
+ "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
+ }
+ },
+ ],
+ }
+ ]
+
+response = completion(
+ model="huggingface/sambanova/meta-llama/Llama-3.2-11B-Vision-Instruct",
+ messages=messages,
+)
+print(response.choices[0])
+```
+
+### Function Calling
+You can extend the model's capabilities by giving them access to tools. Here is an example with function calling using [Qwen2.5-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) model through Sambanova.
+
+```python
+import os
+from litellm import completion
+
+# Set your Hugging Face Token
+os.environ["HF_TOKEN"] = "hf_xxxxxx"
+
+tools = [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_current_weather",
+ "description": "Get the current weather in a given location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA",
+ },
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
+ },
+ "required": ["location"],
+ },
+ }
+ }
+]
+messages = [
+ {
+ "role": "user",
+ "content": "What's the weather like in Boston today?",
+ }
+]
+
+response = completion(
+ model="huggingface/sambanova/meta-llama/Llama-3.3-70B-Instruct",
+ messages=messages,
+ tools=tools,
+ tool_choice="auto"
+)
+print(response)
+```
+
+
+
+
+
+
+
+
+
+### Basic Completion
+After you have [deployed your Hugging Face Inference Endpoint](https://endpoints.huggingface.co/new) on dedicated infrastructure, you can run inference on it by providing the endpoint base URL in `api_base`, and indicating `huggingface/tgi` as the model name.
+
+```python
+import os
+from litellm import completion
+
+os.environ["HF_TOKEN"] = "hf_xxxxxx"
+
+response = completion(
+ model="huggingface/tgi",
+ messages=[{"content": "Hello, how are you?", "role": "user"}],
+ api_base="https://my-endpoint.endpoints.huggingface.cloud/v1/"
+)
+print(response)
+```
+
+### Streaming
+
+```python
+import os
+from litellm import completion
+
+os.environ["HF_TOKEN"] = "hf_xxxxxx"
+
+response = completion(
+ model="huggingface/tgi",
+ messages=[{"content": "Hello, how are you?", "role": "user"}],
+ api_base="https://my-endpoint.endpoints.huggingface.cloud/v1/",
stream=True
)
-print(response)
-```
-
-
-
-
-1. Add models to your config.yaml
-
-```yaml
-model_list:
- - model_name: llama-3.1-8B-instruct
- litellm_params:
- model: huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct
- api_key: os.environ/HUGGINGFACE_API_KEY
-```
-
-2. Start the proxy
-
-```bash
-$ litellm --config /path/to/config.yaml --debug
-```
-
-3. Test it!
-
-```shell
-curl --location 'http://0.0.0.0:4000/chat/completions' \
- --header 'Authorization: Bearer sk-1234' \
- --header 'Content-Type: application/json' \
- --data '{
- "model": "llama-3.1-8B-instruct",
- "messages": [
- {
- "role": "user",
- "content": "I like you!"
- }
- ],
-}'
-```
-
-
-
-
-
-
-Append `text-classification` to the model name
-
-e.g. `huggingface/text-classification/`
-
-
-
-
-```python
-import os
-from litellm import completion
-
-# [OPTIONAL] set env var
-os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
-
-messages = [{ "content": "I like you, I love you!","role": "user"}]
-
-# e.g. Call 'shahrukhx01/question-vs-statement-classifier' hosted on HF Inference endpoints
-response = completion(
- model="huggingface/text-classification/shahrukhx01/question-vs-statement-classifier",
- messages=messages,
- api_base="https://my-endpoint.endpoints.huggingface.cloud",
-)
-
-print(response)
-```
-
-
-
-
-1. Add models to your config.yaml
-
-```yaml
-model_list:
- - model_name: bert-classifier
- litellm_params:
- model: huggingface/text-classification/shahrukhx01/question-vs-statement-classifier
- api_key: os.environ/HUGGINGFACE_API_KEY
- api_base: "https://my-endpoint.endpoints.huggingface.cloud"
-```
-
-2. Start the proxy
-
-```bash
-$ litellm --config /path/to/config.yaml --debug
-```
-
-3. Test it!
-
-```shell
-curl --location 'http://0.0.0.0:4000/chat/completions' \
- --header 'Authorization: Bearer sk-1234' \
- --header 'Content-Type: application/json' \
- --data '{
- "model": "bert-classifier",
- "messages": [
- {
- "role": "user",
- "content": "I like you!"
- }
- ],
-}'
-```
-
-
-
-
-
-
-Steps to use
-* Create your own Hugging Face dedicated endpoint here: https://ui.endpoints.huggingface.co/
-* Set `api_base` to your deployed api base
-* Add the `huggingface/` prefix to your model so litellm knows it's a huggingface Deployed Inference Endpoint
-
-
-
-
-```python
-import os
-from litellm import completion
-
-os.environ["HUGGINGFACE_API_KEY"] = ""
-
-# TGI model: Call https://huggingface.co/glaiveai/glaive-coder-7b
-# add the 'huggingface/' prefix to the model to set huggingface as the provider
-# set api base to your deployed api endpoint from hugging face
-response = completion(
- model="huggingface/glaiveai/glaive-coder-7b",
- messages=[{ "content": "Hello, how are you?","role": "user"}],
- api_base="https://wjiegasee9bmqke2.us-east-1.aws.endpoints.huggingface.cloud"
-)
-print(response)
-```
-
-
-
-
-1. Add models to your config.yaml
-
-```yaml
-model_list:
- - model_name: glaive-coder
- litellm_params:
- model: huggingface/glaiveai/glaive-coder-7b
- api_key: os.environ/HUGGINGFACE_API_KEY
- api_base: "https://wjiegasee9bmqke2.us-east-1.aws.endpoints.huggingface.cloud"
-```
-
-2. Start the proxy
-
-```bash
-$ litellm --config /path/to/config.yaml --debug
-```
-
-3. Test it!
-
-```shell
-curl --location 'http://0.0.0.0:4000/chat/completions' \
- --header 'Authorization: Bearer sk-1234' \
- --header 'Content-Type: application/json' \
- --data '{
- "model": "glaive-coder",
- "messages": [
- {
- "role": "user",
- "content": "I like you!"
- }
- ],
-}'
-```
-
-
-
-
-
-
-
-## Streaming
-
-
-
-
-
-You need to tell LiteLLM when you're calling Huggingface.
-This is done by adding the "huggingface/" prefix to `model`, example `completion(model="huggingface/",...)`.
-
-```python
-import os
-from litellm import completion
-
-# [OPTIONAL] set env var
-os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
-
-messages = [{ "content": "There's a llama in my garden 😱 What should I do?","role": "user"}]
-
-# e.g. Call 'facebook/blenderbot-400M-distill' hosted on HF Inference endpoints
-response = completion(
- model="huggingface/facebook/blenderbot-400M-distill",
- messages=messages,
- api_base="https://my-endpoint.huggingface.cloud",
- stream=True
-)
-
-print(response)
for chunk in response:
- print(chunk)
+ print(chunk)
```
+### Image Input
+
+```python
+import os
+from litellm import completion
+
+os.environ["HF_TOKEN"] = "hf_xxxxxx"
+
+messages=[
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": "What's in this image?"},
+ {
+ "type": "image_url",
+ "image_url": {
+ "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
+ }
+ },
+ ],
+ }
+ ]
+response = completion(
+ model="huggingface/tgi",
+ messages=messages,
+ api_base="https://my-endpoint.endpoints.huggingface.cloud/v1/""
+)
+print(response.choices[0])
+```
+
+### Function Calling
+
+```python
+import os
+from litellm import completion
+
+os.environ["HF_TOKEN"] = "hf_xxxxxx"
+
+functions = [{
+ "name": "get_weather",
+ "description": "Get the weather in a given location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The location to get weather for"
+ }
+ },
+ "required": ["location"]
+ }
+}]
+
+response = completion(
+ model="huggingface/tgi",
+ messages=[{"content": "What's the weather like in San Francisco?", "role": "user"}],
+ api_base="https://my-endpoint.endpoints.huggingface.cloud/v1/",
+ functions=functions
+)
+print(response)
+```
+
+
+
+
+## LiteLLM Proxy Server with Hugging Face models
+You can set up a [LiteLLM Proxy Server](https://docs.litellm.ai/#litellm-proxy-server-llm-gateway) to serve Hugging Face models through any of the supported Inference Providers. Here's how to do it:
+
+### Step 1. Setup the config file
+
+In this case, we are configuring a proxy to serve `DeepSeek R1` from Hugging Face, using Together AI as the backend Inference Provider.
+
+```yaml
+model_list:
+ - model_name: my-r1-model
+ litellm_params:
+ model: huggingface/together/deepseek-ai/DeepSeek-R1
+ api_key: os.environ/HF_TOKEN # ensure you have `HF_TOKEN` in your .env
+```
+
+### Step 2. Start the server
+```bash
+litellm --config /path/to/config.yaml
+```
+
+### Step 3. Make a request to the server
+
+
+
+```shell
+curl --location 'http://0.0.0.0:4000/chat/completions' \
+ --header 'Content-Type: application/json' \
+ --data '{
+ "model": "my-r1-model",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hello, how are you?"
+ }
+ ]
+}'
+```
+
+
+
+
+```python
+# pip install openai
+from openai import OpenAI
+
+client = OpenAI(
+ base_url="http://0.0.0.0:4000",
+ api_key="anything",
+)
+
+response = client.chat.completions.create(
+ model="my-r1-model",
+ messages=[
+ {"role": "user", "content": "Hello, how are you?"}
+ ]
+)
+print(response)
+```
+
+
+
+
+
## Embedding
-LiteLLM supports Hugging Face's [text-embedding-inference](https://github.com/huggingface/text-embeddings-inference) format.
+LiteLLM supports Hugging Face's [text-embedding-inference](https://github.com/huggingface/text-embeddings-inference) models as well.
```python
from litellm import embedding
import os
-os.environ['HUGGINGFACE_API_KEY'] = ""
+os.environ['HF_TOKEN'] = "hf_xxxxxx"
response = embedding(
model='huggingface/microsoft/codebert-base',
input=["good morning from litellm"]
)
```
-## Advanced
-
-### Setting API KEYS + API BASE
-
-If required, you can set the api key + api base, set it in your os environment. [Code for how it's sent](https://github.com/BerriAI/litellm/blob/0100ab2382a0e720c7978fbf662cc6e6920e7e03/litellm/llms/huggingface_restapi.py#L25)
-
-```python
-import os
-os.environ["HUGGINGFACE_API_KEY"] = ""
-os.environ["HUGGINGFACE_API_BASE"] = ""
-```
-
-### Viewing Log probs
-
-#### Using `decoder_input_details` - OpenAI `echo`
-
-The `echo` param is supported by OpenAI Completions - Use `litellm.text_completion()` for this
-
-```python
-from litellm import text_completion
-response = text_completion(
- model="huggingface/bigcode/starcoder",
- prompt="good morning",
- max_tokens=10, logprobs=10,
- echo=True
-)
-```
-
-#### Output
-
-```json
-{
- "id": "chatcmpl-3fc71792-c442-4ba1-a611-19dd0ac371ad",
- "object": "text_completion",
- "created": 1698801125.936519,
- "model": "bigcode/starcoder",
- "choices": [
- {
- "text": ", I'm going to make you a sand",
- "index": 0,
- "logprobs": {
- "tokens": [
- "good",
- " morning",
- ",",
- " I",
- "'m",
- " going",
- " to",
- " make",
- " you",
- " a",
- " s",
- "and"
- ],
- "token_logprobs": [
- "None",
- -14.96875,
- -2.2285156,
- -2.734375,
- -2.0957031,
- -2.0917969,
- -0.09429932,
- -3.1132812,
- -1.3203125,
- -1.2304688,
- -1.6201172,
- -0.010292053
- ]
- },
- "finish_reason": "length"
- }
- ],
- "usage": {
- "completion_tokens": 9,
- "prompt_tokens": 2,
- "total_tokens": 11
- }
-}
-```
-
-### Models with Prompt Formatting
-
-For models with special prompt templates (e.g. Llama2), we format the prompt to fit their template.
-
-#### Models with natively Supported Prompt Templates
-
-| Model Name | Works for Models | Function Call | Required OS Variables |
-| ------------------------------------ | ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------- |
-| mistralai/Mistral-7B-Instruct-v0.1 | mistralai/Mistral-7B-Instruct-v0.1 | `completion(model='huggingface/mistralai/Mistral-7B-Instruct-v0.1', messages=messages, api_base="your_api_endpoint")` | `os.environ['HUGGINGFACE_API_KEY']` |
-| meta-llama/Llama-2-7b-chat | All meta-llama llama2 chat models | `completion(model='huggingface/meta-llama/Llama-2-7b', messages=messages, api_base="your_api_endpoint")` | `os.environ['HUGGINGFACE_API_KEY']` |
-| tiiuae/falcon-7b-instruct | All falcon instruct models | `completion(model='huggingface/tiiuae/falcon-7b-instruct', messages=messages, api_base="your_api_endpoint")` | `os.environ['HUGGINGFACE_API_KEY']` |
-| mosaicml/mpt-7b-chat | All mpt chat models | `completion(model='huggingface/mosaicml/mpt-7b-chat', messages=messages, api_base="your_api_endpoint")` | `os.environ['HUGGINGFACE_API_KEY']` |
-| codellama/CodeLlama-34b-Instruct-hf | All codellama instruct models | `completion(model='huggingface/codellama/CodeLlama-34b-Instruct-hf', messages=messages, api_base="your_api_endpoint")` | `os.environ['HUGGINGFACE_API_KEY']` |
-| WizardLM/WizardCoder-Python-34B-V1.0 | All wizardcoder models | `completion(model='huggingface/WizardLM/WizardCoder-Python-34B-V1.0', messages=messages, api_base="your_api_endpoint")` | `os.environ['HUGGINGFACE_API_KEY']` |
-| Phind/Phind-CodeLlama-34B-v2 | All phind-codellama models | `completion(model='huggingface/Phind/Phind-CodeLlama-34B-v2', messages=messages, api_base="your_api_endpoint")` | `os.environ['HUGGINGFACE_API_KEY']` |
-
-**What if we don't support a model you need?**
-You can also specify you're own custom prompt formatting, in case we don't have your model covered yet.
-
-**Does this mean you have to specify a prompt for all models?**
-No. By default we'll concatenate your message content to make a prompt.
-
-**Default Prompt Template**
-
-```python
-def default_pt(messages):
- return " ".join(message["content"] for message in messages)
-```
-
-[Code for how prompt formats work in LiteLLM](https://github.com/BerriAI/litellm/blob/main/litellm/llms/prompt_templates/factory.py)
-
-#### Custom prompt templates
-
-```python
-import litellm
-
-# Create your own custom prompt template works
-litellm.register_prompt_template(
- model="togethercomputer/LLaMA-2-7B-32K",
- roles={
- "system": {
- "pre_message": "[INST] <>\n",
- "post_message": "\n<>\n [/INST]\n"
- },
- "user": {
- "pre_message": "[INST] ",
- "post_message": " [/INST]\n"
- },
- "assistant": {
- "post_message": "\n"
- }
- }
- )
-
-def test_huggingface_custom_model():
- model = "huggingface/togethercomputer/LLaMA-2-7B-32K"
- response = completion(model=model, messages=messages, api_base="https://ecd4sb5n09bo4ei2.us-east-1.aws.endpoints.huggingface.cloud")
- print(response['choices'][0]['message']['content'])
- return response
-
-test_huggingface_custom_model()
-```
-
-[Implementation Code](https://github.com/BerriAI/litellm/blob/c0b3da2c14c791a0b755f0b1e5a9ef065951ecbf/litellm/llms/huggingface_restapi.py#L52)
-
-### Deploying a model on huggingface
-
-You can use any chat/text model from Hugging Face with the following steps:
-
-- Copy your model id/url from Huggingface Inference Endpoints
- - [ ] Go to https://ui.endpoints.huggingface.co/
- - [ ] Copy the url of the specific model you'd like to use
-
-- Set it as your model name
-- Set your HUGGINGFACE_API_KEY as an environment variable
-
-Need help deploying a model on huggingface? [Check out this guide.](https://huggingface.co/docs/inference-endpoints/guides/create_endpoint)
-
-# output
-
-Same as the OpenAI format, but also includes logprobs. [See the code](https://github.com/BerriAI/litellm/blob/b4b2dbf005142e0a483d46a07a88a19814899403/litellm/llms/huggingface_restapi.py#L115)
-
-```json
-{
- "choices": [
- {
- "finish_reason": "stop",
- "index": 0,
- "message": {
- "content": "\ud83d\ude31\n\nComment: @SarahSzabo I'm",
- "role": "assistant",
- "logprobs": -22.697942825499993
- }
- }
- ],
- "created": 1693436637.38206,
- "model": "https://ji16r2iys9a8rjk2.us-east-1.aws.endpoints.huggingface.cloud",
- "usage": {
- "prompt_tokens": 14,
- "completion_tokens": 11,
- "total_tokens": 25
- }
-}
-```
-
# FAQ
-**Does this support stop sequences?**
+**How does billing work with Hugging Face Inference Providers?**
-Yes, we support stop sequences - and you can pass as many as allowed by Hugging Face (or any provider!)
+> Billing is centralized on your Hugging Face account, no matter which providers you are using. You are billed the standard provider API rates with no additional markup - Hugging Face simply passes through the provider costs. Note that [Hugging Face PRO](https://huggingface.co/subscribe/pro) users get $2 worth of Inference credits every month that can be used across providers.
-**How do you deal with repetition penalty?**
+**Do I need to create an account for each Inference Provider?**
-We map the presence penalty parameter in openai to the repetition penalty parameter on Hugging Face. [See code](https://github.com/BerriAI/litellm/blob/b4b2dbf005142e0a483d46a07a88a19814899403/litellm/utils.py#L757).
+> No, you don't need to create separate accounts. All requests are routed through Hugging Face, so you only need your HF token. This allows you to easily benchmark different providers and choose the one that best fits your needs.
-We welcome any suggestions for improving our Hugging Face integration - Create an [issue](https://github.com/BerriAI/litellm/issues/new/choose)/[Join the Discord](https://discord.com/invite/wuPM9dRgDw)!
+**Will more inference providers be supported by Hugging Face in the future?**
+
+> Yes! New inference providers (and models) are being added gradually.
+
+We welcome any suggestions for improving our Hugging Face integration - Create an [issue](https://github.com/BerriAI/litellm/issues/new/choose)/[Join the Discord](https://discord.com/invite/wuPM9dRgDw)!
\ No newline at end of file
diff --git a/docs/my-website/docs/providers/infinity.md b/docs/my-website/docs/providers/infinity.md
index 091503bf18..7900d5adb4 100644
--- a/docs/my-website/docs/providers/infinity.md
+++ b/docs/my-website/docs/providers/infinity.md
@@ -3,18 +3,17 @@ import TabItem from '@theme/TabItem';
# Infinity
-| Property | Details |
-|-------|-------|
-| Description | Infinity is a high-throughput, low-latency REST API for serving text-embeddings, reranking models and clip|
-| Provider Route on LiteLLM | `infinity/` |
-| Supported Operations | `/rerank` |
-| Link to Provider Doc | [Infinity ↗](https://github.com/michaelfeil/infinity) |
-
+| Property | Details |
+| ------------------------- | ---------------------------------------------------------------------------------------------------------- |
+| Description | Infinity is a high-throughput, low-latency REST API for serving text-embeddings, reranking models and clip |
+| Provider Route on LiteLLM | `infinity/` |
+| Supported Operations | `/rerank`, `/embeddings` |
+| Link to Provider Doc | [Infinity ↗](https://github.com/michaelfeil/infinity) |
## **Usage - LiteLLM Python SDK**
```python
-from litellm import rerank
+from litellm import rerank, embedding
import os
os.environ["INFINITY_API_BASE"] = "http://localhost:8080"
@@ -39,8 +38,8 @@ model_list:
- model_name: custom-infinity-rerank
litellm_params:
model: infinity/rerank
- api_key: os.environ/INFINITY_API_KEY
api_base: https://localhost:8080
+ api_key: os.environ/INFINITY_API_KEY
```
Start litellm
@@ -51,7 +50,9 @@ litellm --config /path/to/config.yaml
# RUNNING on http://0.0.0.0:4000
```
-Test request
+## Test request:
+
+### Rerank
```bash
curl http://0.0.0.0:4000/rerank \
@@ -70,15 +71,14 @@ curl http://0.0.0.0:4000/rerank \
}'
```
+#### Supported Cohere Rerank API Params
-## Supported Cohere Rerank API Params
-
-| Param | Type | Description |
-|-------|-------|-------|
-| `query` | `str` | The query to rerank the documents against |
-| `documents` | `list[str]` | The documents to rerank |
-| `top_n` | `int` | The number of documents to return |
-| `return_documents` | `bool` | Whether to return the documents in the response |
+| Param | Type | Description |
+| ------------------ | ----------- | ----------------------------------------------- |
+| `query` | `str` | The query to rerank the documents against |
+| `documents` | `list[str]` | The documents to rerank |
+| `top_n` | `int` | The number of documents to return |
+| `return_documents` | `bool` | Whether to return the documents in the response |
### Usage - Return Documents
@@ -138,6 +138,7 @@ response = rerank(
raw_scores=True, # 👈 PROVIDER-SPECIFIC PARAM
)
```
+
@@ -161,7 +162,7 @@ litellm --config /path/to/config.yaml
# RUNNING on http://0.0.0.0:4000
```
-3. Test it!
+3. Test it!
```bash
curl http://0.0.0.0:4000/rerank \
@@ -179,6 +180,121 @@ curl http://0.0.0.0:4000/rerank \
"raw_scores": True # 👈 PROVIDER-SPECIFIC PARAM
}'
```
+
+
+## Embeddings
+
+LiteLLM provides an OpenAI api compatible `/embeddings` endpoint for embedding calls.
+
+**Setup**
+
+Add this to your litellm proxy config.yaml
+
+```yaml
+model_list:
+ - model_name: custom-infinity-embedding
+ litellm_params:
+ model: infinity/provider/custom-embedding-v1
+ api_base: http://localhost:8080
+ api_key: os.environ/INFINITY_API_KEY
+```
+
+### Test request:
+
+```bash
+curl http://0.0.0.0:4000/embeddings \
+ -H "Authorization: Bearer sk-1234" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "custom-infinity-embedding",
+ "input": ["hello"]
+ }'
+```
+
+#### Supported Embedding API Params
+
+| Param | Type | Description |
+| ----------------- | ----------- | ----------------------------------------------------------- |
+| `model` | `str` | The embedding model to use |
+| `input` | `list[str]` | The text inputs to generate embeddings for |
+| `encoding_format` | `str` | The format to return embeddings in (e.g. "float", "base64") |
+| `modality` | `str` | The type of input (e.g. "text", "image", "audio") |
+
+### Usage - Basic Examples
+
+
+
+
+```python
+from litellm import embedding
+import os
+
+os.environ["INFINITY_API_BASE"] = "http://localhost:8080"
+
+response = embedding(
+ model="infinity/bge-small",
+ input=["good morning from litellm"]
+)
+
+print(response.data[0]['embedding'])
+```
+
+
+
+
+
+```bash
+curl http://0.0.0.0:4000/embeddings \
+ -H "Authorization: Bearer sk-1234" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "custom-infinity-embedding",
+ "input": ["hello"]
+ }'
+```
+
+
+
+
+### Usage - OpenAI Client
+
+
+
+
+```python
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="",
+ base_url=""
+)
+
+response = client.embeddings.create(
+ model="bge-small",
+ input=["The food was delicious and the waiter..."],
+ encoding_format="float"
+)
+
+print(response.data[0].embedding)
+```
+
+
+
+
+
+```bash
+curl http://0.0.0.0:4000/embeddings \
+ -H "Authorization: Bearer sk-1234" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "bge-small",
+ "input": ["The food was delicious and the waiter..."],
+ "encoding_format": "float"
+ }'
+```
+
+
+
diff --git a/docs/my-website/docs/providers/litellm_proxy.md b/docs/my-website/docs/providers/litellm_proxy.md
index e204caba0a..a66423dac5 100644
--- a/docs/my-website/docs/providers/litellm_proxy.md
+++ b/docs/my-website/docs/providers/litellm_proxy.md
@@ -57,7 +57,7 @@ messages = [{ "content": "Hello, how are you?","role": "user"}]
# litellm proxy call
response = completion(
model="litellm_proxy/your-model-name",
- messages,
+ messages=messages,
api_base = "your-litellm-proxy-url",
api_key = "your-litellm-proxy-api-key"
)
@@ -76,7 +76,7 @@ messages = [{ "content": "Hello, how are you?","role": "user"}]
# openai call
response = completion(
model="litellm_proxy/your-model-name",
- messages,
+ messages=messages,
api_base = "your-litellm-proxy-url",
stream=True
)
diff --git a/docs/my-website/docs/providers/ollama.md b/docs/my-website/docs/providers/ollama.md
index 848be2beb7..d59d9dd0ce 100644
--- a/docs/my-website/docs/providers/ollama.md
+++ b/docs/my-website/docs/providers/ollama.md
@@ -202,6 +202,67 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
+
+## Using Ollama FIM on `/v1/completions`
+
+LiteLLM supports calling Ollama's `/api/generate` endpoint on `/v1/completions` requests.
+
+
+
+
+```python
+import litellm
+litellm._turn_on_debug() # turn on debug to see the request
+from litellm import completion
+
+response = completion(
+ model="ollama/llama3.1",
+ prompt="Hello, world!",
+ api_base="http://localhost:11434"
+)
+print(response)
+```
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: "llama3.1"
+ litellm_params:
+ model: "ollama/llama3.1"
+ api_base: "http://localhost:11434"
+```
+
+2. Start proxy
+
+```bash
+litellm --config /path/to/config.yaml --detailed_debug
+
+# RUNNING ON http://0.0.0.0:4000
+```
+
+3. Test it!
+
+```python
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="anything", # 👈 PROXY KEY (can be anything, if master_key not set)
+ base_url="http://0.0.0.0:4000" # 👈 PROXY BASE URL
+)
+
+response = client.completions.create(
+ model="ollama/llama3.1",
+ prompt="Hello, world!",
+ api_base="http://localhost:11434"
+)
+print(response)
+```
+
+
+
## Using ollama `api/chat`
In order to send ollama requests to `POST /api/chat` on your ollama server, set the model prefix to `ollama_chat`
diff --git a/docs/my-website/docs/providers/openai.md b/docs/my-website/docs/providers/openai.md
index 15661f6521..a4aee5dbf7 100644
--- a/docs/my-website/docs/providers/openai.md
+++ b/docs/my-website/docs/providers/openai.md
@@ -163,6 +163,12 @@ os.environ["OPENAI_API_BASE"] = "openaiai-api-base" # OPTIONAL
| Model Name | Function Call |
|-----------------------|-----------------------------------------------------------------|
+| gpt-4.1 | `response = completion(model="gpt-4.1", messages=messages)` |
+| gpt-4.1-mini | `response = completion(model="gpt-4.1-mini", messages=messages)` |
+| gpt-4.1-nano | `response = completion(model="gpt-4.1-nano", messages=messages)` |
+| o4-mini | `response = completion(model="o4-mini", messages=messages)` |
+| o3-mini | `response = completion(model="o3-mini", messages=messages)` |
+| o3 | `response = completion(model="o3", messages=messages)` |
| o1-mini | `response = completion(model="o1-mini", messages=messages)` |
| o1-preview | `response = completion(model="o1-preview", messages=messages)` |
| gpt-4o-mini | `response = completion(model="gpt-4o-mini", messages=messages)` |
@@ -228,6 +234,92 @@ response = completion(
```
+## PDF File Parsing
+
+OpenAI has a new `file` message type that allows you to pass in a PDF file and have it parsed into a structured output. [Read more](https://platform.openai.com/docs/guides/pdf-files?api-mode=chat&lang=python)
+
+
+
+
+```python
+import base64
+from litellm import completion
+
+with open("draconomicon.pdf", "rb") as f:
+ data = f.read()
+
+base64_string = base64.b64encode(data).decode("utf-8")
+
+completion = completion(
+ model="gpt-4o",
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "file",
+ "file": {
+ "filename": "draconomicon.pdf",
+ "file_data": f"data:application/pdf;base64,{base64_string}",
+ }
+ },
+ {
+ "type": "text",
+ "text": "What is the first dragon in the book?",
+ }
+ ],
+ },
+ ],
+)
+
+print(completion.choices[0].message.content)
+```
+
+
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: openai-model
+ litellm_params:
+ model: gpt-4o
+ api_key: os.environ/OPENAI_API_KEY
+```
+
+2. Start the proxy
+
+```bash
+litellm --config config.yaml
+```
+
+3. Test it!
+
+```bash
+curl -X POST 'http://0.0.0.0:4000/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "openai-model",
+ "messages": [
+ {"role": "user", "content": [
+ {
+ "type": "file",
+ "file": {
+ "filename": "draconomicon.pdf",
+ "file_data": f"data:application/pdf;base64,{base64_string}",
+ }
+ }
+ ]}
+ ]
+}'
+```
+
+
+
+
## OpenAI Fine Tuned Models
| Model Name | Function Call |
@@ -239,6 +331,74 @@ response = completion(
| fine tuned `gpt-3.5-turbo-0613` | `response = completion(model="ft:gpt-3.5-turbo-0613", messages=messages)` |
+## OpenAI Audio Transcription
+
+LiteLLM supports OpenAI Audio Transcription endpoint.
+
+Supported models:
+
+| Model Name | Function Call |
+|---------------------------|-----------------------------------------------------------------|
+| `whisper-1` | `response = completion(model="whisper-1", file=audio_file)` |
+| `gpt-4o-transcribe` | `response = completion(model="gpt-4o-transcribe", file=audio_file)` |
+| `gpt-4o-mini-transcribe` | `response = completion(model="gpt-4o-mini-transcribe", file=audio_file)` |
+
+
+
+
+```python
+from litellm import transcription
+import os
+
+# set api keys
+os.environ["OPENAI_API_KEY"] = ""
+audio_file = open("/path/to/audio.mp3", "rb")
+
+response = transcription(model="gpt-4o-transcribe", file=audio_file)
+
+print(f"response: {response}")
+```
+
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+- model_name: gpt-4o-transcribe
+ litellm_params:
+ model: gpt-4o-transcribe
+ api_key: os.environ/OPENAI_API_KEY
+ model_info:
+ mode: audio_transcription
+
+general_settings:
+ master_key: sk-1234
+```
+
+2. Start the proxy
+
+```bash
+litellm --config config.yaml
+```
+
+3. Test it!
+
+```bash
+curl --location 'http://0.0.0.0:8000/v1/audio/transcriptions' \
+--header 'Authorization: Bearer sk-1234' \
+--form 'file=@"/Users/krrishdholakia/Downloads/gettysburg.wav"' \
+--form 'model="gpt-4o-transcribe"'
+```
+
+
+
+
+
+
+
+
## Advanced
### Getting OpenAI API Response Headers
@@ -449,26 +609,6 @@ response = litellm.acompletion(
)
```
-### Using Helicone Proxy with LiteLLM
-```python
-import os
-import litellm
-from litellm import completion
-
-os.environ["OPENAI_API_KEY"] = ""
-
-# os.environ["OPENAI_API_BASE"] = ""
-litellm.api_base = "https://oai.hconeai.com/v1"
-litellm.headers = {
- "Helicone-Auth": f"Bearer {os.getenv('HELICONE_API_KEY')}",
- "Helicone-Cache-Enabled": "true",
-}
-
-messages = [{ "content": "Hello, how are you?","role": "user"}]
-
-# openai call
-response = completion("gpt-3.5-turbo", messages)
-```
### Using OpenAI Proxy with LiteLLM
```python
diff --git a/docs/my-website/docs/providers/openrouter.md b/docs/my-website/docs/providers/openrouter.md
index 09669c9f9e..58a87f6849 100644
--- a/docs/my-website/docs/providers/openrouter.md
+++ b/docs/my-website/docs/providers/openrouter.md
@@ -10,9 +10,11 @@ LiteLLM supports all the text / chat / vision models from [OpenRouter](https://o
import os
from litellm import completion
os.environ["OPENROUTER_API_KEY"] = ""
+os.environ["OPENROUTER_API_BASE"] = "" # [OPTIONAL] defaults to https://openrouter.ai/api/v1
-os.environ["OR_SITE_URL"] = "" # optional
-os.environ["OR_APP_NAME"] = "" # optional
+
+os.environ["OR_SITE_URL"] = "" # [OPTIONAL]
+os.environ["OR_APP_NAME"] = "" # [OPTIONAL]
response = completion(
model="openrouter/google/palm-2-chat-bison",
diff --git a/docs/my-website/docs/providers/perplexity.md b/docs/my-website/docs/providers/perplexity.md
index 620a7640ad..5ef1f8861a 100644
--- a/docs/my-website/docs/providers/perplexity.md
+++ b/docs/my-website/docs/providers/perplexity.md
@@ -17,7 +17,7 @@ import os
os.environ['PERPLEXITYAI_API_KEY'] = ""
response = completion(
- model="perplexity/mistral-7b-instruct",
+ model="perplexity/sonar-pro",
messages=messages
)
print(response)
@@ -30,7 +30,7 @@ import os
os.environ['PERPLEXITYAI_API_KEY'] = ""
response = completion(
- model="perplexity/mistral-7b-instruct",
+ model="perplexity/sonar-pro",
messages=messages,
stream=True
)
@@ -45,19 +45,12 @@ All models listed here https://docs.perplexity.ai/docs/model-cards are supported
| Model Name | Function Call |
|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| pplx-7b-chat | `completion(model="perplexity/pplx-7b-chat", messages)` |
-| pplx-70b-chat | `completion(model="perplexity/pplx-70b-chat", messages)` |
-| pplx-7b-online | `completion(model="perplexity/pplx-7b-online", messages)` |
-| pplx-70b-online | `completion(model="perplexity/pplx-70b-online", messages)` |
-| codellama-34b-instruct | `completion(model="perplexity/codellama-34b-instruct", messages)` |
-| llama-2-13b-chat | `completion(model="perplexity/llama-2-13b-chat", messages)` |
-| llama-2-70b-chat | `completion(model="perplexity/llama-2-70b-chat", messages)` |
-| mistral-7b-instruct | `completion(model="perplexity/mistral-7b-instruct", messages)` |
-| openhermes-2-mistral-7b | `completion(model="perplexity/openhermes-2-mistral-7b", messages)` |
-| openhermes-2.5-mistral-7b | `completion(model="perplexity/openhermes-2.5-mistral-7b", messages)` |
-| pplx-7b-chat-alpha | `completion(model="perplexity/pplx-7b-chat-alpha", messages)` |
-| pplx-70b-chat-alpha | `completion(model="perplexity/pplx-70b-chat-alpha", messages)` |
-
+| sonar-deep-research | `completion(model="perplexity/sonar-deep-research", messages)` |
+| sonar-reasoning-pro | `completion(model="perplexity/sonar-reasoning-pro", messages)` |
+| sonar-reasoning | `completion(model="perplexity/sonar-reasoning", messages)` |
+| sonar-pro | `completion(model="perplexity/sonar-pro", messages)` |
+| sonar | `completion(model="perplexity/sonar", messages)` |
+| r1-1776 | `completion(model="perplexity/r1-1776", messages)` |
diff --git a/docs/my-website/docs/providers/predibase.md b/docs/my-website/docs/providers/predibase.md
index 31713aef1e..9f25309c19 100644
--- a/docs/my-website/docs/providers/predibase.md
+++ b/docs/my-website/docs/providers/predibase.md
@@ -230,7 +230,7 @@ response = completion(
model="predibase/llama-3-8b-instruct",
messages = [{ "content": "Hello, how are you?","role": "user"}],
adapter_id="my_repo/3",
- adapter_soruce="pbase",
+ adapter_source="pbase",
)
```
diff --git a/docs/my-website/docs/providers/vertex.md b/docs/my-website/docs/providers/vertex.md
index 10ac13ecaf..762bd5f332 100644
--- a/docs/my-website/docs/providers/vertex.md
+++ b/docs/my-website/docs/providers/vertex.md
@@ -347,7 +347,7 @@ Return a `list[Recipe]`
completion(model="vertex_ai/gemini-1.5-flash-preview-0514", messages=messages, response_format={ "type": "json_object" })
```
-### **Grounding**
+### **Grounding - Web Search**
Add Google Search Result grounding to vertex ai calls.
@@ -358,7 +358,7 @@ See the grounding metadata with `response_obj._hidden_params["vertex_ai_groundin
-```python
+```python showLineNumbers
from litellm import completion
## SETUP ENVIRONMENT
@@ -377,14 +377,36 @@ print(resp)
-```bash
+
+
+
+```python showLineNumbers
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="sk-1234", # pass litellm proxy key, if you're using virtual keys
+ base_url="http://0.0.0.0:4000/v1/" # point to litellm proxy
+)
+
+response = client.chat.completions.create(
+ model="gemini-pro",
+ messages=[{"role": "user", "content": "Who won the world cup?"}],
+ tools=[{"googleSearchRetrieval": {}}],
+)
+
+print(response)
+```
+
+
+
+```bash showLineNumbers
curl http://localhost:4000/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer sk-1234" \
-d '{
"model": "gemini-pro",
"messages": [
- {"role": "user", "content": "Hello, Claude!"}
+ {"role": "user", "content": "Who won the world cup?"}
],
"tools": [
{
@@ -394,10 +416,82 @@ curl http://localhost:4000/v1/chat/completions \
}'
```
+
+
+You can also use the `enterpriseWebSearch` tool for an [enterprise compliant search](https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/web-grounding-enterprise).
+
+
+
+
+```python showLineNumbers
+from litellm import completion
+
+## SETUP ENVIRONMENT
+# !gcloud auth application-default login - run this to add vertex credentials to your env
+
+tools = [{"enterpriseWebSearch": {}}] # 👈 ADD GOOGLE ENTERPRISE SEARCH
+
+resp = litellm.completion(
+ model="vertex_ai/gemini-1.0-pro-001",
+ messages=[{"role": "user", "content": "Who won the world cup?"}],
+ tools=tools,
+ )
+
+print(resp)
+```
+
+
+
+
+
+
+```python showLineNumbers
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="sk-1234", # pass litellm proxy key, if you're using virtual keys
+ base_url="http://0.0.0.0:4000/v1/" # point to litellm proxy
+)
+
+response = client.chat.completions.create(
+ model="gemini-pro",
+ messages=[{"role": "user", "content": "Who won the world cup?"}],
+ tools=[{"enterpriseWebSearch": {}}],
+)
+
+print(response)
+```
+
+
+
+```bash showLineNumbers
+curl http://localhost:4000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer sk-1234" \
+ -d '{
+ "model": "gemini-pro",
+ "messages": [
+ {"role": "user", "content": "Who won the world cup?"}
+ ],
+ "tools": [
+ {
+ "enterpriseWebSearch": {}
+ }
+ ]
+ }'
+
+```
+
+
+
+
+
+
+
#### **Moving from Vertex AI SDK to LiteLLM (GROUNDING)**
@@ -448,6 +542,154 @@ print(resp)
```
+### **Thinking / `reasoning_content`**
+
+LiteLLM translates OpenAI's `reasoning_effort` to Gemini's `thinking` parameter. [Code](https://github.com/BerriAI/litellm/blob/620664921902d7a9bfb29897a7b27c1a7ef4ddfb/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py#L362)
+
+**Mapping**
+
+| reasoning_effort | thinking |
+| ---------------- | -------- |
+| "low" | "budget_tokens": 1024 |
+| "medium" | "budget_tokens": 2048 |
+| "high" | "budget_tokens": 4096 |
+
+
+
+
+```python
+from litellm import completion
+
+# !gcloud auth application-default login - run this to add vertex credentials to your env
+
+resp = completion(
+ model="vertex_ai/gemini-2.5-flash-preview-04-17",
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
+ reasoning_effort="low",
+ vertex_project="project-id",
+ vertex_location="us-central1"
+)
+
+```
+
+
+
+
+
+1. Setup config.yaml
+
+```yaml
+- model_name: gemini-2.5-flash
+ litellm_params:
+ model: vertex_ai/gemini-2.5-flash-preview-04-17
+ vertex_credentials: {"project_id": "project-id", "location": "us-central1", "project_key": "project-key"}
+ vertex_project: "project-id"
+ vertex_location: "us-central1"
+```
+
+2. Start proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+3. Test it!
+
+```bash
+curl http://0.0.0.0:4000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer " \
+ -d '{
+ "model": "gemini-2.5-flash",
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
+ "reasoning_effort": "low"
+ }'
+```
+
+
+
+
+
+**Expected Response**
+
+```python
+ModelResponse(
+ id='chatcmpl-c542d76d-f675-4e87-8e5f-05855f5d0f5e',
+ created=1740470510,
+ model='claude-3-7-sonnet-20250219',
+ object='chat.completion',
+ system_fingerprint=None,
+ choices=[
+ Choices(
+ finish_reason='stop',
+ index=0,
+ message=Message(
+ content="The capital of France is Paris.",
+ role='assistant',
+ tool_calls=None,
+ function_call=None,
+ reasoning_content='The capital of France is Paris. This is a very straightforward factual question.'
+ ),
+ )
+ ],
+ usage=Usage(
+ completion_tokens=68,
+ prompt_tokens=42,
+ total_tokens=110,
+ completion_tokens_details=None,
+ prompt_tokens_details=PromptTokensDetailsWrapper(
+ audio_tokens=None,
+ cached_tokens=0,
+ text_tokens=None,
+ image_tokens=None
+ ),
+ cache_creation_input_tokens=0,
+ cache_read_input_tokens=0
+ )
+)
+```
+
+#### Pass `thinking` to Gemini models
+
+You can also pass the `thinking` parameter to Gemini models.
+
+This is translated to Gemini's [`thinkingConfig` parameter](https://ai.google.dev/gemini-api/docs/thinking#set-budget).
+
+
+
+
+```python
+from litellm import completion
+
+# !gcloud auth application-default login - run this to add vertex credentials to your env
+
+response = litellm.completion(
+ model="vertex_ai/gemini-2.5-flash-preview-04-17",
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
+ thinking={"type": "enabled", "budget_tokens": 1024},
+ vertex_project="project-id",
+ vertex_location="us-central1"
+)
+```
+
+
+
+
+```bash
+curl http://0.0.0.0:4000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer $LITELLM_KEY" \
+ -d '{
+ "model": "vertex_ai/gemini-2.5-flash-preview-04-17",
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
+ "thinking": {"type": "enabled", "budget_tokens": 1024}
+ }'
+```
+
+
+
+
+
### **Context Caching**
Use Vertex AI context caching is supported by calling provider api directly. (Unified Endpoint support comin soon.).
@@ -1369,6 +1611,103 @@ curl --location 'http://0.0.0.0:4000/chat/completions' \
+## Gemini Pro
+| Model Name | Function Call |
+|------------------|--------------------------------------|
+| gemini-pro | `completion('gemini-pro', messages)`, `completion('vertex_ai/gemini-pro', messages)` |
+
+## Fine-tuned Models
+
+You can call fine-tuned Vertex AI Gemini models through LiteLLM
+
+| Property | Details |
+|----------|---------|
+| Provider Route | `vertex_ai/gemini/{MODEL_ID}` |
+| Vertex Documentation | [Vertex AI - Fine-tuned Gemini Models](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-use-supervised-tuning#test_the_tuned_model_with_a_prompt)|
+| Supported Operations | `/chat/completions`, `/completions`, `/embeddings`, `/images` |
+
+To use a model that follows the `/gemini` request/response format, simply set the model parameter as
+
+```python title="Model parameter for calling fine-tuned gemini models"
+model="vertex_ai/gemini/"
+```
+
+
+
+
+```python showLineNumbers title="Example"
+import litellm
+import os
+
+## set ENV variables
+os.environ["VERTEXAI_PROJECT"] = "hardy-device-38811"
+os.environ["VERTEXAI_LOCATION"] = "us-central1"
+
+response = litellm.completion(
+ model="vertex_ai/gemini/", # e.g. vertex_ai/gemini/4965075652664360960
+ messages=[{ "content": "Hello, how are you?","role": "user"}],
+)
+```
+
+
+
+
+1. Add Vertex Credentials to your env
+
+```bash title="Authenticate to Vertex AI"
+!gcloud auth application-default login
+```
+
+2. Setup config.yaml
+
+```yaml showLineNumbers title="Add to litellm config"
+- model_name: finetuned-gemini
+ litellm_params:
+ model: vertex_ai/gemini/
+ vertex_project:
+ vertex_location:
+```
+
+3. Test it!
+
+
+
+
+```python showLineNumbers title="Example request"
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="your-litellm-key",
+ base_url="http://0.0.0.0:4000"
+)
+
+response = client.chat.completions.create(
+ model="finetuned-gemini",
+ messages=[
+ {"role": "user", "content": "hi"}
+ ]
+)
+print(response)
+```
+
+
+
+
+```bash showLineNumbers title="Example request"
+curl --location 'https://0.0.0.0:4000/v1/chat/completions' \
+--header 'Content-Type: application/json' \
+--header 'Authorization: ' \
+--data '{"model": "finetuned-gemini" ,"messages":[{"role": "user", "content":[{"type": "text", "text": "hi"}]}]}'
+```
+
+
+
+
+
+
+
+
+
## Model Garden
:::tip
@@ -1479,67 +1818,6 @@ response = completion(
-## Gemini Pro
-| Model Name | Function Call |
-|------------------|--------------------------------------|
-| gemini-pro | `completion('gemini-pro', messages)`, `completion('vertex_ai/gemini-pro', messages)` |
-
-## Fine-tuned Models
-
-Fine tuned models on vertex have a numerical model/endpoint id.
-
-
-
-
-```python
-from litellm import completion
-import os
-
-## set ENV variables
-os.environ["VERTEXAI_PROJECT"] = "hardy-device-38811"
-os.environ["VERTEXAI_LOCATION"] = "us-central1"
-
-response = completion(
- model="vertex_ai/", # e.g. vertex_ai/4965075652664360960
- messages=[{ "content": "Hello, how are you?","role": "user"}],
- base_model="vertex_ai/gemini-1.5-pro" # the base model - used for routing
-)
-```
-
-
-
-
-1. Add Vertex Credentials to your env
-
-```bash
-!gcloud auth application-default login
-```
-
-2. Setup config.yaml
-
-```yaml
-- model_name: finetuned-gemini
- litellm_params:
- model: vertex_ai/
- vertex_project:
- vertex_location:
- model_info:
- base_model: vertex_ai/gemini-1.5-pro # IMPORTANT
-```
-
-3. Test it!
-
-```bash
-curl --location 'https://0.0.0.0:4000/v1/chat/completions' \
---header 'Content-Type: application/json' \
---header 'Authorization: ' \
---data '{"model": "finetuned-gemini" ,"messages":[{"role": "user", "content":[{"type": "text", "text": "hi"}]}]}'
-```
-
-
-
-
-
## Gemini Pro Vision
| Model Name | Function Call |
@@ -1684,23 +1962,25 @@ assert isinstance(
```
-## Usage - PDF / Videos / etc. Files
+## Usage - PDF / Videos / Audio etc. Files
Pass any file supported by Vertex AI, through LiteLLM.
-LiteLLM Supports the following image types passed in url
+LiteLLM Supports the following file types passed in url.
+
+Using `file` message type for VertexAI is live from v1.65.1+
```
-Images with Cloud Storage URIs - gs://cloud-samples-data/generative-ai/image/boats.jpeg
-Images with direct links - https://storage.googleapis.com/github-repo/img/gemini/intro/landmark3.jpg
+Files with Cloud Storage URIs - gs://cloud-samples-data/generative-ai/image/boats.jpeg
+Files with direct links - https://storage.googleapis.com/github-repo/img/gemini/intro/landmark3.jpg
Videos with Cloud Storage URIs - https://storage.googleapis.com/github-repo/img/gemini/multimodality_usecases_overview/pixel8.mp4
-Base64 Encoded Local Images
+Base64 Encoded Local Files
```
-### **Using `gs://`**
+### **Using `gs://` or any URL**
```python
from litellm import completion
@@ -1712,8 +1992,11 @@ response = completion(
"content": [
{"type": "text", "text": "You are a very professional document summarization specialist. Please summarize the given document."},
{
- "type": "image_url",
- "image_url": "gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf", # 👈 PDF
+ "type": "file",
+ "file": {
+ "file_id": "gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf",
+ "format": "application/pdf" # OPTIONAL - specify mime-type
+ }
},
],
}
@@ -1747,8 +2030,16 @@ response = completion(
"content": [
{"type": "text", "text": "You are a very professional document summarization specialist. Please summarize the given document."},
{
- "type": "image_url",
- "image_url": f"data:application/pdf;base64,{encoded_file}", # 👈 PDF
+ "type": "file",
+ "file": {
+ "file_data": f"data:application/pdf;base64,{encoded_file}", # 👈 PDF
+ }
+ },
+ {
+ "type": "audio_input",
+ "audio_input {
+ "audio_input": f"data:audio/mp3;base64,{encoded_file}", # 👈 AUDIO File ('file' message works as too)
+ }
},
],
}
@@ -1794,8 +2085,11 @@ curl http://0.0.0.0:4000/v1/chat/completions \
"text": "You are a very professional document summarization specialist. Please summarize the given document"
},
{
- "type": "image_url",
- "image_url": "gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf" # 👈 PDF
+ "type": "file",
+ "file": {
+ "file_id": "gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf",
+ "format": "application/pdf" # OPTIONAL
+ }
}
}
]
@@ -1822,11 +2116,18 @@ curl http://0.0.0.0:4000/v1/chat/completions \
"text": "You are a very professional document summarization specialist. Please summarize the given document"
},
{
- "type": "image_url",
- "image_url": "data:application/pdf;base64,{encoded_file}" # 👈 PDF
- }
- }
- ]
+ "type": "file",
+ "file": {
+ "file_data": f"data:application/pdf;base64,{encoded_file}", # 👈 PDF
+ },
+ },
+ {
+ "type": "audio_input",
+ "audio_input {
+ "audio_input": f"data:audio/mp3;base64,{encoded_file}", # 👈 AUDIO File ('file' message works as too)
+ }
+ },
+ ]
}
],
"max_tokens": 300
@@ -1836,6 +2137,7 @@ curl http://0.0.0.0:4000/v1/chat/completions \
+
## Chat Models
| Model Name | Function Call |
|------------------|--------------------------------------|
@@ -2044,7 +2346,12 @@ print(response)
## **Multi-Modal Embeddings**
-Usage
+
+Known Limitations:
+- Only supports 1 image / video / image per request
+- Only supports GCS or base64 encoded images / videos
+
+### Usage
@@ -2260,6 +2567,115 @@ print(f"Text Embedding: {embeddings.text_embedding}")
+### Text + Image + Video Embeddings
+
+
+
+
+Text + Image
+
+```python
+response = await litellm.aembedding(
+ model="vertex_ai/multimodalembedding@001",
+ input=["hey", "gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png"] # will be sent as a gcs image
+)
+```
+
+Text + Video
+
+```python
+response = await litellm.aembedding(
+ model="vertex_ai/multimodalembedding@001",
+ input=["hey", "gs://my-bucket/embeddings/supermarket-video.mp4"] # will be sent as a gcs image
+)
+```
+
+Image + Video
+
+```python
+response = await litellm.aembedding(
+ model="vertex_ai/multimodalembedding@001",
+ input=["gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png", "gs://my-bucket/embeddings/supermarket-video.mp4"] # will be sent as a gcs image
+)
+```
+
+
+
+
+
+1. Add model to config.yaml
+```yaml
+model_list:
+ - model_name: multimodalembedding@001
+ litellm_params:
+ model: vertex_ai/multimodalembedding@001
+ vertex_project: "adroit-crow-413218"
+ vertex_location: "us-central1"
+ vertex_credentials: adroit-crow-413218-a956eef1a2a8.json
+
+litellm_settings:
+ drop_params: True
+```
+
+2. Start Proxy
+
+```
+$ litellm --config /path/to/config.yaml
+```
+
+3. Make Request use OpenAI Python SDK, Langchain Python SDK
+
+
+Text + Image
+
+```python
+import openai
+
+client = openai.OpenAI(api_key="sk-1234", base_url="http://0.0.0.0:4000")
+
+# # request sent to model set on litellm proxy, `litellm --model`
+response = client.embeddings.create(
+ model="multimodalembedding@001",
+ input = ["hey", "gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png"],
+)
+
+print(response)
+```
+
+Text + Video
+```python
+import openai
+
+client = openai.OpenAI(api_key="sk-1234", base_url="http://0.0.0.0:4000")
+
+# # request sent to model set on litellm proxy, `litellm --model`
+response = client.embeddings.create(
+ model="multimodalembedding@001",
+ input = ["hey", "gs://my-bucket/embeddings/supermarket-video.mp4"],
+)
+
+print(response)
+```
+
+Image + Video
+```python
+import openai
+
+client = openai.OpenAI(api_key="sk-1234", base_url="http://0.0.0.0:4000")
+
+# # request sent to model set on litellm proxy, `litellm --model`
+response = client.embeddings.create(
+ model="multimodalembedding@001",
+ input = ["gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png", "gs://my-bucket/embeddings/supermarket-video.mp4"],
+)
+
+print(response)
+```
+
+
+
+
+
## **Image Generation Models**
Usage
diff --git a/docs/my-website/docs/providers/vllm.md b/docs/my-website/docs/providers/vllm.md
index b5987167ec..5c8233b056 100644
--- a/docs/my-website/docs/providers/vllm.md
+++ b/docs/my-website/docs/providers/vllm.md
@@ -161,6 +161,120 @@ curl -L -X POST 'http://0.0.0.0:4000/embeddings' \
Example Implementation from VLLM [here](https://github.com/vllm-project/vllm/pull/10020)
+
+
+
+Use this to send a video url to VLLM + Gemini in the same format, using OpenAI's `files` message type.
+
+There are two ways to send a video url to VLLM:
+
+1. Pass the video url directly
+
+```
+{"type": "file", "file": {"file_id": video_url}},
+```
+
+2. Pass the video data as base64
+
+```
+{"type": "file", "file": {"file_data": f"data:video/mp4;base64,{video_data_base64}"}}
+```
+
+
+
+
+```python
+from litellm import completion
+
+messages=[
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "Summarize the following video"
+ },
+ {
+ "type": "file",
+ "file": {
+ "file_id": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
+ }
+ }
+ ]
+ }
+]
+
+# call vllm
+os.environ["HOSTED_VLLM_API_BASE"] = "https://hosted-vllm-api.co"
+os.environ["HOSTED_VLLM_API_KEY"] = "" # [optional], if your VLLM server requires an API key
+response = completion(
+ model="hosted_vllm/qwen", # pass the vllm model name
+ messages=messages,
+)
+
+# call gemini
+os.environ["GEMINI_API_KEY"] = "your-gemini-api-key"
+response = completion(
+ model="gemini/gemini-1.5-flash", # pass the gemini model name
+ messages=messages,
+)
+
+print(response)
+```
+
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: my-model
+ litellm_params:
+ model: hosted_vllm/qwen # add hosted_vllm/ prefix to route as OpenAI provider
+ api_base: https://hosted-vllm-api.co # add api base for OpenAI compatible provider
+ - model_name: my-gemini-model
+ litellm_params:
+ model: gemini/gemini-1.5-flash # add gemini/ prefix to route as Google AI Studio provider
+ api_key: os.environ/GEMINI_API_KEY
+```
+
+2. Start the proxy
+
+```bash
+$ litellm --config /path/to/config.yaml
+
+# RUNNING on http://0.0.0.0:4000
+```
+
+3. Test it!
+
+```bash
+curl -X POST http://0.0.0.0:4000/chat/completions \
+-H "Authorization: Bearer sk-1234" \
+-H "Content-Type: application/json" \
+-d '{
+ "model": "my-model",
+ "messages": [
+ {"role": "user", "content":
+ [
+ {"type": "text", "text": "Summarize the following video"},
+ {"type": "file", "file": {"file_id": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"}}
+ ]
+ }
+ ]
+}'
+```
+
+
+
+
+
+
+
+
+Use this to send a video url to VLLM in it's native message format (`video_url`).
+
There are two ways to send a video url to VLLM:
1. Pass the video url directly
@@ -249,6 +363,10 @@ curl -X POST http://0.0.0.0:4000/chat/completions \
+
+
+
+
## (Deprecated) for `vllm pip package`
### Using - `litellm.completion`
diff --git a/docs/my-website/docs/providers/xai.md b/docs/my-website/docs/providers/xai.md
index 3faf7d1052..49a3640991 100644
--- a/docs/my-website/docs/providers/xai.md
+++ b/docs/my-website/docs/providers/xai.md
@@ -18,13 +18,14 @@ os.environ['XAI_API_KEY']
```
## Sample Usage
-```python
+
+```python showLineNumbers title="LiteLLM python sdk usage - Non-streaming"
from litellm import completion
import os
os.environ['XAI_API_KEY'] = ""
response = completion(
- model="xai/grok-2-latest",
+ model="xai/grok-3-mini-beta",
messages=[
{
"role": "user",
@@ -45,13 +46,14 @@ print(response)
```
## Sample Usage - Streaming
-```python
+
+```python showLineNumbers title="LiteLLM python sdk usage - Streaming"
from litellm import completion
import os
os.environ['XAI_API_KEY'] = ""
response = completion(
- model="xai/grok-2-latest",
+ model="xai/grok-3-mini-beta",
messages=[
{
"role": "user",
@@ -75,14 +77,15 @@ for chunk in response:
```
## Sample Usage - Vision
-```python
+
+```python showLineNumbers title="LiteLLM python sdk usage - Vision"
import os
from litellm import completion
os.environ["XAI_API_KEY"] = "your-api-key"
response = completion(
- model="xai/grok-2-latest",
+ model="xai/grok-2-vision-latest",
messages=[
{
"role": "user",
@@ -110,7 +113,7 @@ Here's how to call a XAI model with the LiteLLM Proxy Server
1. Modify the config.yaml
- ```yaml
+ ```yaml showLineNumbers
model_list:
- model_name: my-model
litellm_params:
@@ -131,7 +134,7 @@ Here's how to call a XAI model with the LiteLLM Proxy Server
- ```python
+ ```python showLineNumbers
import openai
client = openai.OpenAI(
api_key="sk-1234", # pass litellm proxy key, if you're using virtual keys
@@ -173,3 +176,81 @@ Here's how to call a XAI model with the LiteLLM Proxy Server
+## Reasoning Usage
+
+LiteLLM supports reasoning usage for xAI models.
+
+
+
+
+
+```python showLineNumbers title="reasoning with xai/grok-3-mini-beta"
+import litellm
+response = litellm.completion(
+ model="xai/grok-3-mini-beta",
+ messages=[{"role": "user", "content": "What is 101*3?"}],
+ reasoning_effort="low",
+)
+
+print("Reasoning Content:")
+print(response.choices[0].message.reasoning_content)
+
+print("\nFinal Response:")
+print(completion.choices[0].message.content)
+
+print("\nNumber of completion tokens (input):")
+print(completion.usage.completion_tokens)
+
+print("\nNumber of reasoning tokens (input):")
+print(completion.usage.completion_tokens_details.reasoning_tokens)
+```
+
+
+
+
+```python showLineNumbers title="reasoning with xai/grok-3-mini-beta"
+import openai
+client = openai.OpenAI(
+ api_key="sk-1234", # pass litellm proxy key, if you're using virtual keys
+ base_url="http://0.0.0.0:4000" # litellm-proxy-base url
+)
+
+response = client.chat.completions.create(
+ model="xai/grok-3-mini-beta",
+ messages=[{"role": "user", "content": "What is 101*3?"}],
+ reasoning_effort="low",
+)
+
+print("Reasoning Content:")
+print(response.choices[0].message.reasoning_content)
+
+print("\nFinal Response:")
+print(completion.choices[0].message.content)
+
+print("\nNumber of completion tokens (input):")
+print(completion.usage.completion_tokens)
+
+print("\nNumber of reasoning tokens (input):")
+print(completion.usage.completion_tokens_details.reasoning_tokens)
+```
+
+
+
+
+**Example Response:**
+
+```shell
+Reasoning Content:
+Let me calculate 101 multiplied by 3:
+101 * 3 = 303.
+I can double-check that: 100 * 3 is 300, and 1 * 3 is 3, so 300 + 3 = 303. Yes, that's correct.
+
+Final Response:
+The result of 101 multiplied by 3 is 303.
+
+Number of completion tokens (input):
+14
+
+Number of reasoning tokens (input):
+310
+```
diff --git a/docs/my-website/docs/proxy/admin_ui_sso.md b/docs/my-website/docs/proxy/admin_ui_sso.md
index b7f8ddd585..0bbba57fd9 100644
--- a/docs/my-website/docs/proxy/admin_ui_sso.md
+++ b/docs/my-website/docs/proxy/admin_ui_sso.md
@@ -147,11 +147,16 @@ Some SSO providers require a specific redirect url for login and logout. You can
- Login: `/sso/key/generate`
- Logout: ``
+Here's the env var to set the logout url on the proxy
+```bash
+PROXY_LOGOUT_URL="https://www.google.com"
+```
+
#### Step 3. Set `PROXY_BASE_URL` in your .env
Set this in your .env (so the proxy can set the correct redirect url)
```shell
-PROXY_BASE_URL=https://litellm-api.up.railway.app/
+PROXY_BASE_URL=https://litellm-api.up.railway.app
```
#### Step 4. Test flow
diff --git a/docs/my-website/docs/proxy/call_hooks.md b/docs/my-website/docs/proxy/call_hooks.md
index 8ea220cfa1..a7b0afcc18 100644
--- a/docs/my-website/docs/proxy/call_hooks.md
+++ b/docs/my-website/docs/proxy/call_hooks.md
@@ -70,6 +70,21 @@ class MyCustomHandler(CustomLogger): # https://docs.litellm.ai/docs/observabilit
response: str,
):
pass
+
+ aasync def async_post_call_streaming_iterator_hook(
+ self,
+ user_api_key_dict: UserAPIKeyAuth,
+ response: Any,
+ request_data: dict,
+ ) -> AsyncGenerator[ModelResponseStream, None]:
+ """
+ Passes the entire stream to the guardrail
+
+ This is useful for plugins that need to see the entire stream.
+ """
+ async for item in response:
+ yield item
+
proxy_handler_instance = MyCustomHandler()
```
diff --git a/docs/my-website/docs/proxy/config_settings.md b/docs/my-website/docs/proxy/config_settings.md
index 9e24437449..1e3c800b03 100644
--- a/docs/my-website/docs/proxy/config_settings.md
+++ b/docs/my-website/docs/proxy/config_settings.md
@@ -147,6 +147,7 @@ general_settings:
|------|------|-------------|
| completion_model | string | The default model to use for completions when `model` is not specified in the request |
| disable_spend_logs | boolean | If true, turns off writing each transaction to the database |
+| disable_spend_updates | boolean | If true, turns off all spend updates to the DB. Including key/user/team spend updates. |
| disable_master_key_return | boolean | If true, turns off returning master key on UI. (checked on '/user/info' endpoint) |
| disable_retry_on_max_parallel_request_limit_error | boolean | If true, turns off retries when max parallel request limit is reached |
| disable_reset_budget | boolean | If true, turns off reset budget scheduled task |
@@ -159,7 +160,7 @@ general_settings:
| database_url | string | The URL for the database connection [Set up Virtual Keys](virtual_keys) |
| database_connection_pool_limit | integer | The limit for database connection pool [Setting DB Connection Pool limit](#configure-db-pool-limits--connection-timeouts) |
| database_connection_timeout | integer | The timeout for database connections in seconds [Setting DB Connection Pool limit, timeout](#configure-db-pool-limits--connection-timeouts) |
-| allow_requests_on_db_unavailable | boolean | If true, allows requests to succeed even if DB is unreachable. **Only use this if running LiteLLM in your VPC** This will allow requests to work even when LiteLLM cannot connect to the DB to verify a Virtual Key |
+| allow_requests_on_db_unavailable | boolean | If true, allows requests to succeed even if DB is unreachable. **Only use this if running LiteLLM in your VPC** This will allow requests to work even when LiteLLM cannot connect to the DB to verify a Virtual Key [Doc on graceful db unavailability](prod#5-if-running-litellm-on-vpc-gracefully-handle-db-unavailability) |
| custom_auth | string | Write your own custom authentication logic [Doc Custom Auth](virtual_keys#custom-auth) |
| max_parallel_requests | integer | The max parallel requests allowed per deployment |
| global_max_parallel_requests | integer | The max parallel requests allowed on the proxy overall |
@@ -177,7 +178,7 @@ general_settings:
| use_x_forwarded_for | str | If true, uses the X-Forwarded-For header to get the client IP address |
| service_account_settings | List[Dict[str, Any]] | Set `service_account_settings` if you want to create settings that only apply to service account keys (Doc on service accounts)[./service_accounts.md] |
| image_generation_model | str | The default model to use for image generation - ignores model set in request |
-| store_model_in_db | boolean | If true, allows `/model/new` endpoint to store model information in db. Endpoint disabled by default. [Doc on `/model/new` endpoint](./model_management.md#create-a-new-model) |
+| store_model_in_db | boolean | If true, enables storing model + credential information in the DB. |
| store_prompts_in_spend_logs | boolean | If true, allows prompts and responses to be stored in the spend logs table. |
| max_request_size_mb | int | The maximum size for requests in MB. Requests above this size will be rejected. |
| max_response_size_mb | int | The maximum size for responses in MB. LLM Responses above this size will not be sent. |
@@ -298,6 +299,9 @@ router_settings:
|------|-------------|
| ACTIONS_ID_TOKEN_REQUEST_TOKEN | Token for requesting ID in GitHub Actions
| ACTIONS_ID_TOKEN_REQUEST_URL | URL for requesting ID token in GitHub Actions
+| AGENTOPS_ENVIRONMENT | Environment for AgentOps logging integration
+| AGENTOPS_API_KEY | API Key for AgentOps logging integration
+| AGENTOPS_SERVICE_NAME | Service Name for AgentOps logging integration
| AISPEND_ACCOUNT_ID | Account ID for AI Spend
| AISPEND_API_KEY | API Key for AI Spend
| ALLOWED_EMAIL_DOMAINS | List of email domains allowed for access
@@ -322,6 +326,9 @@ router_settings:
| AZURE_AUTHORITY_HOST | Azure authority host URL
| AZURE_CLIENT_ID | Client ID for Azure services
| AZURE_CLIENT_SECRET | Client secret for Azure services
+| AZURE_TENANT_ID | Tenant ID for Azure Active Directory
+| AZURE_USERNAME | Username for Azure services, use in conjunction with AZURE_PASSWORD for azure ad token with basic username/password workflow
+| AZURE_PASSWORD | Password for Azure services, use in conjunction with AZURE_USERNAME for azure ad token with basic username/password workflow
| AZURE_FEDERATED_TOKEN_FILE | File path to Azure federated token
| AZURE_KEY_VAULT_URI | URI for Azure Key Vault
| AZURE_STORAGE_ACCOUNT_KEY | The Azure Storage Account Key to use for Authentication to Azure Blob Storage logging
@@ -330,7 +337,6 @@ router_settings:
| AZURE_STORAGE_TENANT_ID | The Application Tenant ID to use for Authentication to Azure Blob Storage logging
| AZURE_STORAGE_CLIENT_ID | The Application Client ID to use for Authentication to Azure Blob Storage logging
| AZURE_STORAGE_CLIENT_SECRET | The Application Client Secret to use for Authentication to Azure Blob Storage logging
-| AZURE_TENANT_ID | Tenant ID for Azure Active Directory
| BERRISPEND_ACCOUNT_ID | Account ID for BerriSpend service
| BRAINTRUST_API_KEY | API key for Braintrust integration
| CIRCLE_OIDC_TOKEN | OpenID Connect token for CircleCI
@@ -405,6 +411,7 @@ router_settings:
| HELICONE_API_KEY | API key for Helicone service
| HOSTNAME | Hostname for the server, this will be [emitted to `datadog` logs](https://docs.litellm.ai/docs/proxy/logging#datadog)
| HUGGINGFACE_API_BASE | Base URL for Hugging Face API
+| HUGGINGFACE_API_KEY | API key for Hugging Face API
| IAM_TOKEN_DB_AUTH | IAM token for database authentication
| JSON_LOGS | Enable JSON formatted logging
| JWT_AUDIENCE | Expected audience for JWT tokens
@@ -431,6 +438,7 @@ router_settings:
| LITERAL_BATCH_SIZE | Batch size for Literal operations
| LITELLM_DONT_SHOW_FEEDBACK_BOX | Flag to hide feedback box in LiteLLM UI
| LITELLM_DROP_PARAMS | Parameters to drop in LiteLLM requests
+| LITELLM_MODIFY_PARAMS | Parameters to modify in LiteLLM requests
| LITELLM_EMAIL | Email associated with LiteLLM account
| LITELLM_GLOBAL_MAX_PARALLEL_REQUEST_RETRIES | Maximum retries for parallel requests in LiteLLM
| LITELLM_GLOBAL_MAX_PARALLEL_REQUEST_RETRY_TIMEOUT | Timeout for retries of parallel requests in LiteLLM
@@ -444,9 +452,12 @@ router_settings:
| LITELLM_TOKEN | Access token for LiteLLM integration
| LITELLM_PRINT_STANDARD_LOGGING_PAYLOAD | If true, prints the standard logging payload to the console - useful for debugging
| LOGFIRE_TOKEN | Token for Logfire logging service
+| MISTRAL_API_BASE | Base URL for Mistral API
+| MISTRAL_API_KEY | API key for Mistral API
| MICROSOFT_CLIENT_ID | Client ID for Microsoft services
| MICROSOFT_CLIENT_SECRET | Client secret for Microsoft services
| MICROSOFT_TENANT | Tenant ID for Microsoft Azure
+| MICROSOFT_SERVICE_PRINCIPAL_ID | Service Principal ID for Microsoft Enterprise Application. (This is an advanced feature if you want litellm to auto-assign members to Litellm Teams based on their Microsoft Entra ID Groups)
| NO_DOCS | Flag to disable documentation generation
| NO_PROXY | List of addresses to bypass proxy
| OAUTH_TOKEN_INFO_ENDPOINT | Endpoint for OAuth token info retrieval
@@ -478,7 +489,7 @@ router_settings:
| PROXY_ADMIN_ID | Admin identifier for proxy server
| PROXY_BASE_URL | Base URL for proxy service
| PROXY_LOGOUT_URL | URL for logging out of the proxy service
-| PROXY_MASTER_KEY | Master key for proxy authentication
+| LITELLM_MASTER_KEY | Master key for proxy authentication
| QDRANT_API_BASE | Base URL for Qdrant API
| QDRANT_API_KEY | API key for Qdrant service
| QDRANT_URL | Connection URL for Qdrant database
@@ -499,9 +510,11 @@ router_settings:
| SMTP_USERNAME | Username for SMTP authentication (do not set if SMTP does not require auth)
| SPEND_LOGS_URL | URL for retrieving spend logs
| SSL_CERTIFICATE | Path to the SSL certificate file
+| SSL_SECURITY_LEVEL | [BETA] Security level for SSL/TLS connections. E.g. `DEFAULT@SECLEVEL=1`
| SSL_VERIFY | Flag to enable or disable SSL certificate verification
| SUPABASE_KEY | API key for Supabase service
| SUPABASE_URL | Base URL for Supabase instance
+| STORE_MODEL_IN_DB | If true, enables storing model + credential information in the DB.
| TEST_EMAIL_ADDRESS | Email address used for testing purposes
| UI_LOGO_PATH | Path to the logo image used in the UI
| UI_PASSWORD | Password for accessing the UI
@@ -512,5 +525,5 @@ router_settings:
| UPSTREAM_LANGFUSE_RELEASE | Release version identifier for upstream Langfuse
| UPSTREAM_LANGFUSE_SECRET_KEY | Secret key for upstream Langfuse authentication
| USE_AWS_KMS | Flag to enable AWS Key Management Service for encryption
+| USE_PRISMA_MIGRATE | Flag to use prisma migrate instead of prisma db push. Recommended for production environments.
| WEBHOOK_URL | URL for receiving webhooks from external services
-
diff --git a/docs/my-website/docs/proxy/cost_tracking.md b/docs/my-website/docs/proxy/cost_tracking.md
index 7f90273c39..5b17e565a5 100644
--- a/docs/my-website/docs/proxy/cost_tracking.md
+++ b/docs/my-website/docs/proxy/cost_tracking.md
@@ -6,6 +6,8 @@ import Image from '@theme/IdealImage';
Track spend for keys, users, and teams across 100+ LLMs.
+LiteLLM automatically tracks spend for all known models. See our [model cost map](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json)
+
### How to Track Spend with LiteLLM
**Step 1**
@@ -35,10 +37,10 @@ response = client.chat.completions.create(
"content": "this is a test request, write a short poem"
}
],
- user="palantir",
- extra_body={
+ user="palantir", # OPTIONAL: pass user to track spend by user
+ extra_body={
"metadata": {
- "tags": ["jobID:214590dsff09fds", "taskName:run_page_classification"]
+ "tags": ["jobID:214590dsff09fds", "taskName:run_page_classification"] # ENTERPRISE: pass tags to track spend by tags
}
}
)
@@ -63,9 +65,9 @@ curl --location 'http://0.0.0.0:4000/chat/completions' \
"content": "what llm are you"
}
],
- "user": "palantir",
+ "user": "palantir", # OPTIONAL: pass user to track spend by user
"metadata": {
- "tags": ["jobID:214590dsff09fds", "taskName:run_page_classification"]
+ "tags": ["jobID:214590dsff09fds", "taskName:run_page_classification"] # ENTERPRISE: pass tags to track spend by tags
}
}'
```
@@ -90,7 +92,7 @@ chat = ChatOpenAI(
user="palantir",
extra_body={
"metadata": {
- "tags": ["jobID:214590dsff09fds", "taskName:run_page_classification"]
+ "tags": ["jobID:214590dsff09fds", "taskName:run_page_classification"] # ENTERPRISE: pass tags to track spend by tags
}
}
)
@@ -150,8 +152,112 @@ Navigate to the Usage Tab on the LiteLLM UI (found on https://your-proxy-endpoin
-## ✨ (Enterprise) API Endpoints to get Spend
-### Getting Spend Reports - To Charge Other Teams, Customers, Users
+### Allowing Non-Proxy Admins to access `/spend` endpoints
+
+Use this when you want non-proxy admins to access `/spend` endpoints
+
+:::info
+
+Schedule a [meeting with us to get your Enterprise License](https://calendly.com/d/4mp-gd3-k5k/litellm-1-1-onboarding-chat)
+
+:::
+
+##### Create Key
+Create Key with with `permissions={"get_spend_routes": true}`
+```shell
+curl --location 'http://0.0.0.0:4000/key/generate' \
+ --header 'Authorization: Bearer sk-1234' \
+ --header 'Content-Type: application/json' \
+ --data '{
+ "permissions": {"get_spend_routes": true}
+ }'
+```
+
+##### Use generated key on `/spend` endpoints
+
+Access spend Routes with newly generate keys
+```shell
+curl -X GET 'http://localhost:4000/global/spend/report?start_date=2024-04-01&end_date=2024-06-30' \
+ -H 'Authorization: Bearer sk-H16BKvrSNConSsBYLGc_7A'
+```
+
+
+
+#### Reset Team, API Key Spend - MASTER KEY ONLY
+
+Use `/global/spend/reset` if you want to:
+- Reset the Spend for all API Keys, Teams. The `spend` for ALL Teams and Keys in `LiteLLM_TeamTable` and `LiteLLM_VerificationToken` will be set to `spend=0`
+
+- LiteLLM will maintain all the logs in `LiteLLMSpendLogs` for Auditing Purposes
+
+##### Request
+Only the `LITELLM_MASTER_KEY` you set can access this route
+```shell
+curl -X POST \
+ 'http://localhost:4000/global/spend/reset' \
+ -H 'Authorization: Bearer sk-1234' \
+ -H 'Content-Type: application/json'
+```
+
+##### Expected Responses
+
+```shell
+{"message":"Spend for all API Keys and Teams reset successfully","status":"success"}
+```
+
+## Daily Spend Breakdown API
+
+Retrieve granular daily usage data for a user (by model, provider, and API key) with a single endpoint.
+
+Example Request:
+
+```shell title="Daily Spend Breakdown API" showLineNumbers
+curl -L -X GET 'http://localhost:4000/user/daily/activity?start_date=2025-03-20&end_date=2025-03-27' \
+-H 'Authorization: Bearer sk-...'
+```
+
+```json title="Daily Spend Breakdown API Response" showLineNumbers
+{
+ "results": [
+ {
+ "date": "2025-03-27",
+ "metrics": {
+ "spend": 0.0177072,
+ "prompt_tokens": 111,
+ "completion_tokens": 1711,
+ "total_tokens": 1822,
+ "api_requests": 11
+ },
+ "breakdown": {
+ "models": {
+ "gpt-4o-mini": {
+ "spend": 1.095e-05,
+ "prompt_tokens": 37,
+ "completion_tokens": 9,
+ "total_tokens": 46,
+ "api_requests": 1
+ },
+ "providers": { "openai": { ... }, "azure_ai": { ... } },
+ "api_keys": { "3126b6eaf1...": { ... } }
+ }
+ }
+ ],
+ "metadata": {
+ "total_spend": 0.7274667,
+ "total_prompt_tokens": 280990,
+ "total_completion_tokens": 376674,
+ "total_api_requests": 14
+ }
+}
+```
+
+### API Reference
+
+See our [Swagger API](https://litellm-api.up.railway.app/#/Budget%20%26%20Spend%20Tracking/get_user_daily_activity_user_daily_activity_get) for more details on the `/user/daily/activity` endpoint
+
+## ✨ (Enterprise) Generate Spend Reports
+
+Use this to charge other teams, customers, users
Use the `/global/spend/report` endpoint to get spend reports
@@ -470,105 +576,6 @@ curl -X GET 'http://localhost:4000/global/spend/report?start_date=2024-04-01&end
-### Allowing Non-Proxy Admins to access `/spend` endpoints
-
-Use this when you want non-proxy admins to access `/spend` endpoints
-
-:::info
-
-Schedule a [meeting with us to get your Enterprise License](https://calendly.com/d/4mp-gd3-k5k/litellm-1-1-onboarding-chat)
-
-:::
-
-##### Create Key
-Create Key with with `permissions={"get_spend_routes": true}`
-```shell
-curl --location 'http://0.0.0.0:4000/key/generate' \
- --header 'Authorization: Bearer sk-1234' \
- --header 'Content-Type: application/json' \
- --data '{
- "permissions": {"get_spend_routes": true}
- }'
-```
-
-##### Use generated key on `/spend` endpoints
-
-Access spend Routes with newly generate keys
-```shell
-curl -X GET 'http://localhost:4000/global/spend/report?start_date=2024-04-01&end_date=2024-06-30' \
- -H 'Authorization: Bearer sk-H16BKvrSNConSsBYLGc_7A'
-```
-
-
-
-#### Reset Team, API Key Spend - MASTER KEY ONLY
-
-Use `/global/spend/reset` if you want to:
-- Reset the Spend for all API Keys, Teams. The `spend` for ALL Teams and Keys in `LiteLLM_TeamTable` and `LiteLLM_VerificationToken` will be set to `spend=0`
-
-- LiteLLM will maintain all the logs in `LiteLLMSpendLogs` for Auditing Purposes
-
-##### Request
-Only the `LITELLM_MASTER_KEY` you set can access this route
-```shell
-curl -X POST \
- 'http://localhost:4000/global/spend/reset' \
- -H 'Authorization: Bearer sk-1234' \
- -H 'Content-Type: application/json'
-```
-
-##### Expected Responses
-
-```shell
-{"message":"Spend for all API Keys and Teams reset successfully","status":"success"}
-```
-
-
-
-
-## Spend Tracking for Azure OpenAI Models
-
-Set base model for cost tracking azure image-gen call
-
-#### Image Generation
-
-```yaml
-model_list:
- - model_name: dall-e-3
- litellm_params:
- model: azure/dall-e-3-test
- api_version: 2023-06-01-preview
- api_base: https://openai-gpt-4-test-v-1.openai.azure.com/
- api_key: os.environ/AZURE_API_KEY
- base_model: dall-e-3 # 👈 set dall-e-3 as base model
- model_info:
- mode: image_generation
-```
-
-#### Chat Completions / Embeddings
-
-**Problem**: Azure returns `gpt-4` in the response when `azure/gpt-4-1106-preview` is used. This leads to inaccurate cost tracking
-
-**Solution** ✅ : Set `base_model` on your config so litellm uses the correct model for calculating azure cost
-
-Get the base model name from [here](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json)
-
-Example config with `base_model`
-```yaml
-model_list:
- - model_name: azure-gpt-3.5
- litellm_params:
- model: azure/chatgpt-v-2
- api_base: os.environ/AZURE_API_BASE
- api_key: os.environ/AZURE_API_KEY
- api_version: "2023-07-01-preview"
- model_info:
- base_model: azure/gpt-4-1106-preview
-```
-
-## Custom Input/Output Pricing
-
-👉 Head to [Custom Input/Output Pricing](https://docs.litellm.ai/docs/proxy/custom_pricing) to setup custom pricing or your models
## ✨ Custom Spend Log metadata
@@ -587,4 +594,5 @@ Logging specific key,value pairs in spend logs metadata is an enterprise feature
Tracking spend with Custom tags is an enterprise feature. [See here](./enterprise.md#tracking-spend-for-custom-tags)
-:::
\ No newline at end of file
+:::
+
diff --git a/docs/my-website/docs/proxy/custom_pricing.md b/docs/my-website/docs/proxy/custom_pricing.md
index 16d634dee4..792d5c26dd 100644
--- a/docs/my-website/docs/proxy/custom_pricing.md
+++ b/docs/my-website/docs/proxy/custom_pricing.md
@@ -26,10 +26,12 @@ model_list:
- model_name: sagemaker-completion-model
litellm_params:
model: sagemaker/berri-benchmarking-Llama-2-70b-chat-hf-4
+ model_info:
input_cost_per_second: 0.000420
- model_name: sagemaker-embedding-model
litellm_params:
model: sagemaker/berri-benchmarking-gpt-j-6b-fp16
+ model_info:
input_cost_per_second: 0.000420
```
@@ -55,11 +57,55 @@ model_list:
api_key: os.environ/AZURE_API_KEY
api_base: os.environ/AZURE_API_BASE
api_version: os.envrion/AZURE_API_VERSION
+ model_info:
input_cost_per_token: 0.000421 # 👈 ONLY to track cost per token
output_cost_per_token: 0.000520 # 👈 ONLY to track cost per token
```
-### Debugging
+## Override Model Cost Map
+
+You can override [our model cost map](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) with your own custom pricing for a mapped model.
+
+Just add a `model_info` key to your model in the config, and override the desired keys.
+
+Example: Override Anthropic's model cost map for the `prod/claude-3-5-sonnet-20241022` model.
+
+```yaml
+model_list:
+ - model_name: "prod/claude-3-5-sonnet-20241022"
+ litellm_params:
+ model: "anthropic/claude-3-5-sonnet-20241022"
+ api_key: os.environ/ANTHROPIC_PROD_API_KEY
+ model_info:
+ input_cost_per_token: 0.000006
+ output_cost_per_token: 0.00003
+ cache_creation_input_token_cost: 0.0000075
+ cache_read_input_token_cost: 0.0000006
+```
+
+## Set 'base_model' for Cost Tracking (e.g. Azure deployments)
+
+**Problem**: Azure returns `gpt-4` in the response when `azure/gpt-4-1106-preview` is used. This leads to inaccurate cost tracking
+
+**Solution** ✅ : Set `base_model` on your config so litellm uses the correct model for calculating azure cost
+
+Get the base model name from [here](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json)
+
+Example config with `base_model`
+```yaml
+model_list:
+ - model_name: azure-gpt-3.5
+ litellm_params:
+ model: azure/chatgpt-v-2
+ api_base: os.environ/AZURE_API_BASE
+ api_key: os.environ/AZURE_API_KEY
+ api_version: "2023-07-01-preview"
+ model_info:
+ base_model: azure/gpt-4-1106-preview
+```
+
+
+## Debugging
If you're custom pricing is not being used or you're seeing errors, please check the following:
diff --git a/docs/my-website/docs/proxy/custom_prompt_management.md b/docs/my-website/docs/proxy/custom_prompt_management.md
new file mode 100644
index 0000000000..72a7333276
--- /dev/null
+++ b/docs/my-website/docs/proxy/custom_prompt_management.md
@@ -0,0 +1,194 @@
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Custom Prompt Management
+
+Connect LiteLLM to your prompt management system with custom hooks.
+
+## Overview
+
+
+
+
+
+## How it works
+
+## Quick Start
+
+### 1. Create Your Custom Prompt Manager
+
+Create a class that inherits from `CustomPromptManagement` to handle prompt retrieval and formatting:
+
+**Example Implementation**
+
+Create a new file called `custom_prompt.py` and add this code. The key method here is `get_chat_completion_prompt` you can implement custom logic to retrieve and format prompts based on the `prompt_id` and `prompt_variables`.
+
+```python
+from typing import List, Tuple, Optional
+from litellm.integrations.custom_prompt_management import CustomPromptManagement
+from litellm.types.llms.openai import AllMessageValues
+from litellm.types.utils import StandardCallbackDynamicParams
+
+class MyCustomPromptManagement(CustomPromptManagement):
+ def get_chat_completion_prompt(
+ self,
+ model: str,
+ messages: List[AllMessageValues],
+ non_default_params: dict,
+ prompt_id: str,
+ prompt_variables: Optional[dict],
+ dynamic_callback_params: StandardCallbackDynamicParams,
+ ) -> Tuple[str, List[AllMessageValues], dict]:
+ """
+ Retrieve and format prompts based on prompt_id.
+
+ Returns:
+ - model: The model to use
+ - messages: The formatted messages
+ - non_default_params: Optional parameters like temperature
+ """
+ # Example matching the diagram: Add system message for prompt_id "1234"
+ if prompt_id == "1234":
+ # Prepend system message while preserving existing messages
+ new_messages = [
+ {"role": "system", "content": "Be a good Bot!"},
+ ] + messages
+ return model, new_messages, non_default_params
+
+ # Default: Return original messages if no prompt_id match
+ return model, messages, non_default_params
+
+prompt_management = MyCustomPromptManagement()
+```
+
+### 2. Configure Your Prompt Manager in LiteLLM `config.yaml`
+
+```yaml
+model_list:
+ - model_name: gpt-4
+ litellm_params:
+ model: openai/gpt-4
+ api_key: os.environ/OPENAI_API_KEY
+
+litellm_settings:
+ callbacks: custom_prompt.prompt_management # sets litellm.callbacks = [prompt_management]
+```
+
+### 3. Start LiteLLM Gateway
+
+
+
+
+Mount your `custom_logger.py` on the LiteLLM Docker container.
+
+```shell
+docker run -d \
+ -p 4000:4000 \
+ -e OPENAI_API_KEY=$OPENAI_API_KEY \
+ --name my-app \
+ -v $(pwd)/my_config.yaml:/app/config.yaml \
+ -v $(pwd)/custom_logger.py:/app/custom_logger.py \
+ my-app:latest \
+ --config /app/config.yaml \
+ --port 4000 \
+ --detailed_debug \
+```
+
+
+
+
+
+```shell
+litellm --config config.yaml --detailed_debug
+```
+
+
+
+
+### 4. Test Your Custom Prompt Manager
+
+When you pass `prompt_id="1234"`, the custom prompt manager will add a system message "Be a good Bot!" to your conversation:
+
+
+
+
+```python
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="sk-1234",
+ base_url="http://0.0.0.0:4000"
+)
+
+response = client.chat.completions.create(
+ model="gemini-1.5-pro",
+ messages=[{"role": "user", "content": "hi"}],
+ prompt_id="1234"
+)
+
+print(response.choices[0].message.content)
+```
+
+
+
+
+```python
+from langchain.chat_models import ChatOpenAI
+from langchain.schema import HumanMessage
+
+chat = ChatOpenAI(
+ model="gpt-4",
+ openai_api_key="sk-1234",
+ openai_api_base="http://0.0.0.0:4000",
+ extra_body={
+ "prompt_id": "1234"
+ }
+)
+
+messages = []
+response = chat(messages)
+
+print(response.content)
+```
+
+
+
+
+```shell
+curl -X POST http://0.0.0.0:4000/v1/chat/completions \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer sk-1234" \
+-d '{
+ "model": "gemini-1.5-pro",
+ "messages": [{"role": "user", "content": "hi"}],
+ "prompt_id": "1234"
+}'
+```
+
+
+
+The request will be transformed from:
+```json
+{
+ "model": "gemini-1.5-pro",
+ "messages": [{"role": "user", "content": "hi"}],
+ "prompt_id": "1234"
+}
+```
+
+To:
+```json
+{
+ "model": "gemini-1.5-pro",
+ "messages": [
+ {"role": "system", "content": "Be a good Bot!"},
+ {"role": "user", "content": "hi"}
+ ]
+}
+```
+
+
diff --git a/docs/my-website/docs/proxy/db_deadlocks.md b/docs/my-website/docs/proxy/db_deadlocks.md
new file mode 100644
index 0000000000..332374995d
--- /dev/null
+++ b/docs/my-website/docs/proxy/db_deadlocks.md
@@ -0,0 +1,86 @@
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# High Availability Setup (Resolve DB Deadlocks)
+
+Resolve any Database Deadlocks you see in high traffic by using this setup
+
+## What causes the problem?
+
+LiteLLM writes `UPDATE` and `UPSERT` queries to the DB. When using 10+ instances of LiteLLM, these queries can cause deadlocks since each instance could simultaneously attempt to update the same `user_id`, `team_id`, `key` etc.
+
+## How the high availability setup fixes the problem
+- All instances will write to a Redis queue instead of the DB.
+- A single instance will acquire a lock on the DB and flush the redis queue to the DB.
+
+
+## How it works
+
+### Stage 1. Each instance writes updates to redis
+
+Each instance will accumlate the spend updates for a key, user, team, etc and write the updates to a redis queue.
+
+
+
+Each instance writes updates to redis
+
+
+
+### Stage 2. A single instance flushes the redis queue to the DB
+
+A single instance will acquire a lock on the DB and flush all elements in the redis queue to the DB.
+
+- 1 instance will attempt to acquire the lock for the DB update job
+- The status of the lock is stored in redis
+- If the instance acquires the lock to write to DB
+ - It will read all updates from redis
+ - Aggregate all updates into 1 transaction
+ - Write updates to DB
+ - Release the lock
+- Note: Only 1 instance can acquire the lock at a time, this limits the number of instances that can write to the DB at once
+
+
+
+
+A single instance flushes the redis queue to the DB
+
+
+
+## Usage
+
+### Required components
+
+- Redis
+- Postgres
+
+### Setup on LiteLLM config
+
+You can enable using the redis buffer by setting `use_redis_transaction_buffer: true` in the `general_settings` section of your `proxy_config.yaml` file.
+
+Note: This setup requires litellm to be connected to a redis instance.
+
+```yaml showLineNumbers title="litellm proxy_config.yaml"
+general_settings:
+ use_redis_transaction_buffer: true
+
+litellm_settings:
+ cache: True
+ cache_params:
+ type: redis
+ supported_call_types: [] # Optional: Set cache for proxy, but not on the actual llm api call
+```
+
+## Monitoring
+
+LiteLLM emits the following prometheus metrics to monitor the health/status of the in memory buffer and redis buffer.
+
+
+| Metric Name | Description | Storage Type |
+|-----------------------------------------------------|-----------------------------------------------------------------------------|--------------|
+| `litellm_pod_lock_manager_size` | Indicates which pod has the lock to write updates to the database. | Redis |
+| `litellm_in_memory_daily_spend_update_queue_size` | Number of items in the in-memory daily spend update queue. These are the aggregate spend logs for each user. | In-Memory |
+| `litellm_redis_daily_spend_update_queue_size` | Number of items in the Redis daily spend update queue. These are the aggregate spend logs for each user. | Redis |
+| `litellm_in_memory_spend_update_queue_size` | In-memory aggregate spend values for keys, users, teams, team members, etc.| In-Memory |
+| `litellm_redis_spend_update_queue_size` | Redis aggregate spend values for keys, users, teams, etc. | Redis |
+
diff --git a/docs/my-website/docs/proxy/guardrails/aim_security.md b/docs/my-website/docs/proxy/guardrails/aim_security.md
index 8f612b9dbe..d76c4e0c1c 100644
--- a/docs/my-website/docs/proxy/guardrails/aim_security.md
+++ b/docs/my-website/docs/proxy/guardrails/aim_security.md
@@ -23,6 +23,12 @@ In the newly created guard's page, you can find a reference to the prompt policy
You can decide which detections will be enabled, and set the threshold for each detection.
+:::info
+When using LiteLLM with virtual keys, key-specific policies can be set directly in Aim's guards page by specifying the virtual key alias when creating the guard.
+
+Only the aliases of your virtual keys (and not the actual key secrets) will be sent to Aim.
+:::
+
### 3. Add Aim Guardrail on your LiteLLM config.yaml
Define your guardrails under the `guardrails` section
@@ -134,7 +140,7 @@ The above request should not be blocked, and you should receive a regular LLM re
-# Advanced
+## Advanced
Aim Guard provides user-specific Guardrail policies, enabling you to apply tailored policies to individual users.
To utilize this feature, include the end-user's email in the request payload by setting the `x-aim-user-email` header of your request.
diff --git a/docs/my-website/docs/proxy/guardrails/custom_guardrail.md b/docs/my-website/docs/proxy/guardrails/custom_guardrail.md
index 50deac511f..657ccab68e 100644
--- a/docs/my-website/docs/proxy/guardrails/custom_guardrail.md
+++ b/docs/my-website/docs/proxy/guardrails/custom_guardrail.md
@@ -10,10 +10,12 @@ Use this is you want to write code to run a custom guardrail
### 1. Write a `CustomGuardrail` Class
-A CustomGuardrail has 3 methods to enforce guardrails
+A CustomGuardrail has 4 methods to enforce guardrails
- `async_pre_call_hook` - (Optional) modify input or reject request before making LLM API call
- `async_moderation_hook` - (Optional) reject request, runs while making LLM API call (help to lower latency)
- `async_post_call_success_hook`- (Optional) apply guardrail on input/output, runs after making LLM API call
+- `async_post_call_streaming_iterator_hook` - (Optional) pass the entire stream to the guardrail
+
**[See detailed spec of methods here](#customguardrail-methods)**
@@ -128,6 +130,23 @@ class myCustomGuardrail(CustomGuardrail):
):
raise ValueError("Guardrail failed Coffee Detected")
+ async def async_post_call_streaming_iterator_hook(
+ self,
+ user_api_key_dict: UserAPIKeyAuth,
+ response: Any,
+ request_data: dict,
+ ) -> AsyncGenerator[ModelResponseStream, None]:
+ """
+ Passes the entire stream to the guardrail
+
+ This is useful for guardrails that need to see the entire response, such as PII masking.
+
+ See Aim guardrail implementation for an example - https://github.com/BerriAI/litellm/blob/d0e022cfacb8e9ebc5409bb652059b6fd97b45c0/litellm/proxy/guardrails/guardrail_hooks/aim.py#L168
+
+ Triggered by mode: 'post_call'
+ """
+ async for item in response:
+ yield item
```
diff --git a/docs/my-website/docs/prompt_injection.md b/docs/my-website/docs/proxy/guardrails/prompt_injection.md
similarity index 100%
rename from docs/my-website/docs/prompt_injection.md
rename to docs/my-website/docs/proxy/guardrails/prompt_injection.md
diff --git a/docs/my-website/docs/proxy/guardrails/quick_start.md b/docs/my-website/docs/proxy/guardrails/quick_start.md
index 6744dc6578..aeac507e0a 100644
--- a/docs/my-website/docs/proxy/guardrails/quick_start.md
+++ b/docs/my-website/docs/proxy/guardrails/quick_start.md
@@ -17,6 +17,14 @@ model_list:
api_key: os.environ/OPENAI_API_KEY
guardrails:
+ - guardrail_name: general-guard
+ litellm_params:
+ guardrail: aim
+ mode: [pre_call, post_call]
+ api_key: os.environ/AIM_API_KEY
+ api_base: os.environ/AIM_API_BASE
+ default_on: true # Optional
+
- guardrail_name: "aporia-pre-guard"
litellm_params:
guardrail: aporia # supported values: "aporia", "lakera"
@@ -45,6 +53,7 @@ guardrails:
- `pre_call` Run **before** LLM call, on **input**
- `post_call` Run **after** LLM call, on **input & output**
- `during_call` Run **during** LLM call, on **input** Same as `pre_call` but runs in parallel as LLM call. Response not returned until guardrail check completes
+- A list of the above values to run multiple modes, e.g. `mode: [pre_call, post_call]`
## 2. Start LiteLLM Gateway
@@ -569,4 +578,4 @@ guardrails: Union[
class DynamicGuardrailParams:
extra_body: Dict[str, Any] # Additional parameters for the guardrail
-```
\ No newline at end of file
+```
diff --git a/docs/my-website/docs/proxy/image_handling.md b/docs/my-website/docs/proxy/image_handling.md
new file mode 100644
index 0000000000..300ab0bc38
--- /dev/null
+++ b/docs/my-website/docs/proxy/image_handling.md
@@ -0,0 +1,21 @@
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Image URL Handling
+
+
+
+Some LLM API's don't support url's for images, but do support base-64 strings.
+
+For those, LiteLLM will:
+
+1. Detect a URL being passed
+2. Check if the LLM API supports a URL
+3. Else, will download the base64
+4. Send the provider a base64 string.
+
+
+LiteLLM also caches this result, in-memory to reduce latency for subsequent calls.
+
+The limit for an in-memory cache is 1MB.
\ No newline at end of file
diff --git a/docs/my-website/docs/proxy/litellm_managed_files.md b/docs/my-website/docs/proxy/litellm_managed_files.md
new file mode 100644
index 0000000000..6e40c6dd44
--- /dev/null
+++ b/docs/my-website/docs/proxy/litellm_managed_files.md
@@ -0,0 +1,279 @@
+import TabItem from '@theme/TabItem';
+import Tabs from '@theme/Tabs';
+import Image from '@theme/IdealImage';
+
+# [BETA] Unified File ID
+
+Reuse the same 'file id' across different providers.
+
+| Feature | Description | Comments |
+| --- | --- | --- |
+| Proxy | ✅ | |
+| SDK | ❌ | Requires postgres DB for storing file ids |
+| Available across all providers | ✅ | |
+
+
+
+Limitations of LiteLLM Managed Files:
+- Only works for `/chat/completions` requests.
+- Assumes just 1 model configured per model_name.
+
+Follow [here](https://github.com/BerriAI/litellm/discussions/9632) for multiple models, batches support.
+
+### 1. Setup config.yaml
+
+```
+model_list:
+ - model_name: "gemini-2.0-flash"
+ litellm_params:
+ model: vertex_ai/gemini-2.0-flash
+ vertex_project: my-project-id
+ vertex_location: us-central1
+ - model_name: "gpt-4o-mini-openai"
+ litellm_params:
+ model: gpt-4o-mini
+ api_key: os.environ/OPENAI_API_KEY
+```
+
+### 2. Start proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+### 3. Test it!
+
+Specify `target_model_names` to use the same file id across different providers. This is the list of model_names set via config.yaml (or 'public_model_names' on UI).
+
+```python
+target_model_names="gpt-4o-mini-openai, gemini-2.0-flash" # 👈 Specify model_names
+```
+
+Check `/v1/models` to see the list of available model names for a key.
+
+#### **Store a PDF file**
+
+```python
+from openai import OpenAI
+
+client = OpenAI(base_url="http://0.0.0.0:4000", api_key="sk-1234", max_retries=0)
+
+
+# Download and save the PDF locally
+url = (
+ "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/2403.05530.pdf"
+)
+response = requests.get(url)
+response.raise_for_status()
+
+# Save the PDF locally
+with open("2403.05530.pdf", "wb") as f:
+ f.write(response.content)
+
+file = client.files.create(
+ file=open("2403.05530.pdf", "rb"),
+ purpose="user_data", # can be any openai 'purpose' value
+ extra_body={"target_model_names": "gpt-4o-mini-openai, gemini-2.0-flash"}, # 👈 Specify model_names
+)
+
+print(f"file id={file.id}")
+```
+
+#### **Use the same file id across different providers**
+
+
+
+
+```python
+completion = client.chat.completions.create(
+ model="gpt-4o-mini-openai",
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": "What is in this recording?"},
+ {
+ "type": "file",
+ "file": {
+ "file_id": file.id,
+ },
+ },
+ ],
+ },
+ ]
+)
+
+print(completion.choices[0].message)
+```
+
+
+
+
+
+```python
+completion = client.chat.completions.create(
+ model="gemini-2.0-flash",
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": "What is in this recording?"},
+ {
+ "type": "file",
+ "file": {
+ "file_id": file.id,
+ },
+ },
+ ],
+ },
+ ]
+)
+
+print(completion.choices[0].message)
+
+```
+
+
+
+
+### Complete Example
+
+```python
+import base64
+import requests
+from openai import OpenAI
+
+client = OpenAI(base_url="http://0.0.0.0:4000", api_key="sk-1234", max_retries=0)
+
+
+# Download and save the PDF locally
+url = (
+ "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/2403.05530.pdf"
+)
+response = requests.get(url)
+response.raise_for_status()
+
+# Save the PDF locally
+with open("2403.05530.pdf", "wb") as f:
+ f.write(response.content)
+
+# Read the local PDF file
+file = client.files.create(
+ file=open("2403.05530.pdf", "rb"),
+ purpose="user_data", # can be any openai 'purpose' value
+ extra_body={"target_model_names": "gpt-4o-mini-openai, vertex_ai/gemini-2.0-flash"},
+)
+
+print(f"file.id: {file.id}") # 👈 Unified file id
+
+## GEMINI CALL ###
+completion = client.chat.completions.create(
+ model="gemini-2.0-flash",
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": "What is in this recording?"},
+ {
+ "type": "file",
+ "file": {
+ "file_id": file.id,
+ },
+ },
+ ],
+ },
+ ]
+)
+
+print(completion.choices[0].message)
+
+
+### OPENAI CALL ###
+completion = client.chat.completions.create(
+ model="gpt-4o-mini-openai",
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": "What is in this recording?"},
+ {
+ "type": "file",
+ "file": {
+ "file_id": file.id,
+ },
+ },
+ ],
+ },
+ ],
+)
+
+print(completion.choices[0].message)
+
+```
+
+
+### Supported Endpoints
+
+#### Create a file - `/files`
+
+```python
+from openai import OpenAI
+
+client = OpenAI(base_url="http://0.0.0.0:4000", api_key="sk-1234", max_retries=0)
+
+# Download and save the PDF locally
+url = (
+ "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/2403.05530.pdf"
+)
+response = requests.get(url)
+response.raise_for_status()
+
+# Save the PDF locally
+with open("2403.05530.pdf", "wb") as f:
+ f.write(response.content)
+
+# Read the local PDF file
+file = client.files.create(
+ file=open("2403.05530.pdf", "rb"),
+ purpose="user_data", # can be any openai 'purpose' value
+ extra_body={"target_model_names": "gpt-4o-mini-openai, vertex_ai/gemini-2.0-flash"},
+)
+```
+
+#### Retrieve a file - `/files/{file_id}`
+
+```python
+client = OpenAI(base_url="http://0.0.0.0:4000", api_key="sk-1234", max_retries=0)
+
+file = client.files.retrieve(file_id=file.id)
+```
+
+#### Delete a file - `/files/{file_id}/delete`
+
+```python
+client = OpenAI(base_url="http://0.0.0.0:4000", api_key="sk-1234", max_retries=0)
+
+file = client.files.delete(file_id=file.id)
+```
+
+### FAQ
+
+**1. Does LiteLLM store the file?**
+
+No, LiteLLM does not store the file. It only stores the file id's in the postgres DB.
+
+**2. How does LiteLLM know which file to use for a given file id?**
+
+LiteLLM stores a mapping of the litellm file id to the model-specific file id in the postgres DB. When a request comes in, LiteLLM looks up the model-specific file id and uses it in the request to the provider.
+
+**3. How do file deletions work?**
+
+When a file is deleted, LiteLLM deletes the mapping from the postgres DB, and the files on each provider.
+
+### Architecture
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/my-website/docs/proxy/logging.md b/docs/my-website/docs/proxy/logging.md
index e13a403634..c8731dd270 100644
--- a/docs/my-website/docs/proxy/logging.md
+++ b/docs/my-website/docs/proxy/logging.md
@@ -862,7 +862,7 @@ Add the following to your env
```shell
OTEL_EXPORTER="otlp_http"
-OTEL_ENDPOINT="http:/0.0.0.0:4317"
+OTEL_ENDPOINT="http://0.0.0.0:4317"
OTEL_HEADERS="x-honeycomb-team=" # Optional
```
@@ -2501,4 +2501,4 @@ litellm_settings:
:::info
`thresholds` are not required by default, but you can tune the values to your needs.
Default values is `4` for all categories
-::: -->
\ No newline at end of file
+::: -->
diff --git a/docs/my-website/docs/proxy/logging_spec.md b/docs/my-website/docs/proxy/logging_spec.md
index 7da937e565..b314dd350b 100644
--- a/docs/my-website/docs/proxy/logging_spec.md
+++ b/docs/my-website/docs/proxy/logging_spec.md
@@ -79,6 +79,7 @@ Inherits from `StandardLoggingUserAPIKeyMetadata` and adds:
| `response_cost` | `Optional[str]` | Optional response cost |
| `additional_headers` | `Optional[StandardLoggingAdditionalHeaders]` | Additional headers |
| `batch_models` | `Optional[List[str]]` | Only set for Batches API. Lists the models used for cost calculation |
+| `litellm_model_name` | `Optional[str]` | Model name sent in request |
## StandardLoggingModelInformation
diff --git a/docs/my-website/docs/proxy/model_discovery.md b/docs/my-website/docs/proxy/model_discovery.md
new file mode 100644
index 0000000000..5790dfc520
--- /dev/null
+++ b/docs/my-website/docs/proxy/model_discovery.md
@@ -0,0 +1,108 @@
+# Model Discovery
+
+Use this to give users an accurate list of models available behind provider endpoint, when calling `/v1/models` for wildcard models.
+
+## Supported Models
+
+- Fireworks AI
+- OpenAI
+- Gemini
+- LiteLLM Proxy
+- Topaz
+- Anthropic
+- XAI
+- VLLM
+- Vertex AI
+
+### Usage
+
+**1. Setup config.yaml**
+
+```yaml
+model_list:
+ - model_name: xai/*
+ litellm_params:
+ model: xai/*
+ api_key: os.environ/XAI_API_KEY
+
+litellm_settings:
+ check_provider_endpoint: true # 👈 Enable checking provider endpoint for wildcard models
+```
+
+**2. Start proxy**
+
+```bash
+litellm --config /path/to/config.yaml
+
+# RUNNING on http://0.0.0.0:4000
+```
+
+**3. Call `/v1/models`**
+
+```bash
+curl -X GET "http://localhost:4000/v1/models" -H "Authorization: Bearer $LITELLM_KEY"
+```
+
+Expected response
+
+```json
+{
+ "data": [
+ {
+ "id": "xai/grok-2-1212",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "openai"
+ },
+ {
+ "id": "xai/grok-2-vision-1212",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "openai"
+ },
+ {
+ "id": "xai/grok-3-beta",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "openai"
+ },
+ {
+ "id": "xai/grok-3-fast-beta",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "openai"
+ },
+ {
+ "id": "xai/grok-3-mini-beta",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "openai"
+ },
+ {
+ "id": "xai/grok-3-mini-fast-beta",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "openai"
+ },
+ {
+ "id": "xai/grok-beta",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "openai"
+ },
+ {
+ "id": "xai/grok-vision-beta",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "openai"
+ },
+ {
+ "id": "xai/grok-2-image-1212",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "openai"
+ }
+ ],
+ "object": "list"
+}
+```
\ No newline at end of file
diff --git a/docs/my-website/docs/proxy/prod.md b/docs/my-website/docs/proxy/prod.md
index d3ba2d6224..2d09502d52 100644
--- a/docs/my-website/docs/proxy/prod.md
+++ b/docs/my-website/docs/proxy/prod.md
@@ -94,15 +94,31 @@ This disables the load_dotenv() functionality, which will automatically load you
## 5. If running LiteLLM on VPC, gracefully handle DB unavailability
-This will allow LiteLLM to continue to process requests even if the DB is unavailable. This is better handling for DB unavailability.
+When running LiteLLM on a VPC (and inaccessible from the public internet), you can enable graceful degradation so that request processing continues even if the database is temporarily unavailable.
+
**WARNING: Only do this if you're running LiteLLM on VPC, that cannot be accessed from the public internet.**
-```yaml
+#### Configuration
+
+```yaml showLineNumbers title="litellm config.yaml"
general_settings:
allow_requests_on_db_unavailable: True
```
+#### Expected Behavior
+
+When `allow_requests_on_db_unavailable` is set to `true`, LiteLLM will handle errors as follows:
+
+| Type of Error | Expected Behavior | Details |
+|---------------|-------------------|----------------|
+| Prisma Errors | ✅ Request will be allowed | Covers issues like DB connection resets or rejections from the DB via Prisma, the ORM used by LiteLLM. |
+| Httpx Errors | ✅ Request will be allowed | Occurs when the database is unreachable, allowing the request to proceed despite the DB outage. |
+| Pod Startup Behavior | ✅ Pods start regardless | LiteLLM Pods will start even if the database is down or unreachable, ensuring higher uptime guarantees for deployments. |
+| Health/Readiness Check | ✅ Always returns 200 OK | The /health/readiness endpoint returns a 200 OK status to ensure that pods remain operational even when the database is unavailable.
+| LiteLLM Budget Errors or Model Errors | ❌ Request will be blocked | Triggered when the DB is reachable but the authentication token is invalid, lacks access, or exceeds budget limits. |
+
+
## 6. Disable spend_logs & error_logs if not using the LiteLLM UI
By default, LiteLLM writes several types of logs to the database:
@@ -161,6 +177,50 @@ export LITELLM_SALT_KEY="sk-1234"
[**See Code**](https://github.com/BerriAI/litellm/blob/036a6821d588bd36d170713dcf5a72791a694178/litellm/proxy/common_utils/encrypt_decrypt_utils.py#L15)
+
+## 9. Use `prisma migrate deploy`
+
+Use this to handle db migrations across LiteLLM versions in production
+
+
+
+
+```bash
+USE_PRISMA_MIGRATE="True"
+```
+
+
+
+
+
+```bash
+litellm --use_prisma_migrate
+```
+
+
+
+
+Benefits:
+
+The migrate deploy command:
+
+- **Does not** issue a warning if an already applied migration is missing from migration history
+- **Does not** detect drift (production database schema differs from migration history end state - for example, due to a hotfix)
+- **Does not** reset the database or generate artifacts (such as Prisma Client)
+- **Does not** rely on a shadow database
+
+
+### How does LiteLLM handle DB migrations in production?
+
+1. A new migration file is written to our `litellm-proxy-extras` package. [See all](https://github.com/BerriAI/litellm/tree/main/litellm-proxy-extras/litellm_proxy_extras/migrations)
+
+2. The core litellm pip package is bumped to point to the new `litellm-proxy-extras` package. This ensures, older versions of LiteLLM will continue to use the old migrations. [See code](https://github.com/BerriAI/litellm/blob/52b35cd8093b9ad833987b24f494586a1e923209/pyproject.toml#L58)
+
+3. When you upgrade to a new version of LiteLLM, the migration file is applied to the database. [See code](https://github.com/BerriAI/litellm/blob/52b35cd8093b9ad833987b24f494586a1e923209/litellm-proxy-extras/litellm_proxy_extras/utils.py#L42)
+
+
+
+
## Extras
### Expected Performance in Production
@@ -182,94 +242,4 @@ You should only see the following level of details in logs on the proxy server
# INFO: 192.168.2.205:11774 - "POST /chat/completions HTTP/1.1" 200 OK
# INFO: 192.168.2.205:34717 - "POST /chat/completions HTTP/1.1" 200 OK
# INFO: 192.168.2.205:29734 - "POST /chat/completions HTTP/1.1" 200 OK
-```
-
-
-### Machine Specifications to Deploy LiteLLM
-
-| Service | Spec | CPUs | Memory | Architecture | Version|
-| --- | --- | --- | --- | --- | --- |
-| Server | `t2.small`. | `1vCPUs` | `8GB` | `x86` |
-| Redis Cache | - | - | - | - | 7.0+ Redis Engine|
-
-
-### Reference Kubernetes Deployment YAML
-
-Reference Kubernetes `deployment.yaml` that was load tested by us
-
-```yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: litellm-deployment
-spec:
- replicas: 3
- selector:
- matchLabels:
- app: litellm
- template:
- metadata:
- labels:
- app: litellm
- spec:
- containers:
- - name: litellm-container
- image: ghcr.io/berriai/litellm:main-latest
- imagePullPolicy: Always
- env:
- - name: AZURE_API_KEY
- value: "d6******"
- - name: AZURE_API_BASE
- value: "https://ope******"
- - name: LITELLM_MASTER_KEY
- value: "sk-1234"
- - name: DATABASE_URL
- value: "po**********"
- args:
- - "--config"
- - "/app/proxy_config.yaml" # Update the path to mount the config file
- volumeMounts: # Define volume mount for proxy_config.yaml
- - name: config-volume
- mountPath: /app
- readOnly: true
- livenessProbe:
- httpGet:
- path: /health/liveliness
- port: 4000
- initialDelaySeconds: 120
- periodSeconds: 15
- successThreshold: 1
- failureThreshold: 3
- timeoutSeconds: 10
- readinessProbe:
- httpGet:
- path: /health/readiness
- port: 4000
- initialDelaySeconds: 120
- periodSeconds: 15
- successThreshold: 1
- failureThreshold: 3
- timeoutSeconds: 10
- volumes: # Define volume to mount proxy_config.yaml
- - name: config-volume
- configMap:
- name: litellm-config
-
-```
-
-
-Reference Kubernetes `service.yaml` that was load tested by us
-```yaml
-apiVersion: v1
-kind: Service
-metadata:
- name: litellm-service
-spec:
- selector:
- app: litellm
- ports:
- - protocol: TCP
- port: 4000
- targetPort: 4000
- type: LoadBalancer
-```
+```
\ No newline at end of file
diff --git a/docs/my-website/docs/proxy/prometheus.md b/docs/my-website/docs/proxy/prometheus.md
index 8dff527ae5..0ce94ab962 100644
--- a/docs/my-website/docs/proxy/prometheus.md
+++ b/docs/my-website/docs/proxy/prometheus.md
@@ -95,7 +95,14 @@ Use this for for tracking per [user, key, team, etc.](virtual_keys)
### Initialize Budget Metrics on Startup
-If you want to initialize the key/team budget metrics on startup, you can set the `prometheus_initialize_budget_metrics` to `true` in the `config.yaml`
+If you want litellm to emit the budget metrics for all keys, teams irrespective of whether they are getting requests or not, set `prometheus_initialize_budget_metrics` to `true` in the `config.yaml`
+
+**How this works:**
+
+- If the `prometheus_initialize_budget_metrics` is set to `true`
+ - Every 5 minutes litellm runs a cron job to read all keys, teams from the database
+ - It then emits the budget metrics for each key, team
+ - This is used to populate the budget metrics on the `/metrics` endpoint
```yaml
litellm_settings:
@@ -242,6 +249,19 @@ litellm_settings:
| `litellm_redis_fails` | Number of failed redis calls |
| `litellm_self_latency` | Histogram latency for successful litellm api call |
+#### DB Transaction Queue Health Metrics
+
+Use these metrics to monitor the health of the DB Transaction Queue. Eg. Monitoring the size of the in-memory and redis buffers.
+
+| Metric Name | Description | Storage Type |
+|-----------------------------------------------------|-----------------------------------------------------------------------------|--------------|
+| `litellm_pod_lock_manager_size` | Indicates which pod has the lock to write updates to the database. | Redis |
+| `litellm_in_memory_daily_spend_update_queue_size` | Number of items in the in-memory daily spend update queue. These are the aggregate spend logs for each user. | In-Memory |
+| `litellm_redis_daily_spend_update_queue_size` | Number of items in the Redis daily spend update queue. These are the aggregate spend logs for each user. | Redis |
+| `litellm_in_memory_spend_update_queue_size` | In-memory aggregate spend values for keys, users, teams, team members, etc.| In-Memory |
+| `litellm_redis_spend_update_queue_size` | Redis aggregate spend values for keys, users, teams, etc. | Redis |
+
+
## **🔥 LiteLLM Maintained Grafana Dashboards **
@@ -268,6 +288,17 @@ Here is a screenshot of the metrics you can monitor with the LiteLLM Grafana Das
+## Add authentication on /metrics endpoint
+
+**By default /metrics endpoint is unauthenticated.**
+
+You can opt into running litellm authentication on the /metrics endpoint by setting the following on the config
+
+```yaml
+litellm_settings:
+ require_auth_for_metrics_endpoint: true
+```
+
## FAQ
### What are `_created` vs. `_total` metrics?
diff --git a/docs/my-website/docs/proxy/prompt_management.md b/docs/my-website/docs/proxy/prompt_management.md
index 980043f455..c09231dd59 100644
--- a/docs/my-website/docs/proxy/prompt_management.md
+++ b/docs/my-website/docs/proxy/prompt_management.md
@@ -2,7 +2,7 @@ import Image from '@theme/IdealImage';
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-# [BETA] Prompt Management
+# Prompt Management
:::info
@@ -12,9 +12,10 @@ This feature is currently in beta, and might change unexpectedly. We expect this
Run experiments or change the specific model (e.g. from gpt-4o to gpt4o-mini finetune) from your prompt management tool (e.g. Langfuse) instead of making changes in the application.
-Supported Integrations:
-- [Langfuse](https://langfuse.com/docs/prompts/get-started)
-- [Humanloop](../observability/humanloop)
+| Supported Integrations | Link |
+|------------------------|------|
+| Langfuse | [Get Started](https://langfuse.com/docs/prompts/get-started) |
+| Humanloop | [Get Started](../observability/humanloop) |
## Quick Start
diff --git a/docs/my-website/docs/proxy/release_cycle.md b/docs/my-website/docs/proxy/release_cycle.md
index 947a4ae6b3..c5782087f2 100644
--- a/docs/my-website/docs/proxy/release_cycle.md
+++ b/docs/my-website/docs/proxy/release_cycle.md
@@ -4,9 +4,17 @@ Litellm Proxy has the following release cycle:
- `v1.x.x-nightly`: These are releases which pass ci/cd.
- `v1.x.x.rc`: These are releases which pass ci/cd + [manual review](https://github.com/BerriAI/litellm/discussions/8495#discussioncomment-12180711).
-- `v1.x.x` OR `v1.x.x-stable`: These are releases which pass ci/cd + manual review + 3 days of production testing.
+- `v1.x.x:main-stable`: These are releases which pass ci/cd + manual review + 3 days of production testing.
-In production, we recommend using the latest `v1.x.x` release.
+In production, we recommend using the latest `v1.x.x:main-stable` release.
-Follow our release notes [here](https://github.com/BerriAI/litellm/releases).
\ No newline at end of file
+Follow our release notes [here](https://github.com/BerriAI/litellm/releases).
+
+
+## FAQ
+
+### Is there a release schedule for LiteLLM stable release?
+
+Stable releases come out every week (typically Sunday)
+
diff --git a/docs/my-website/docs/proxy/response_headers.md b/docs/my-website/docs/proxy/response_headers.md
index b07f82d780..32f09fab42 100644
--- a/docs/my-website/docs/proxy/response_headers.md
+++ b/docs/my-website/docs/proxy/response_headers.md
@@ -43,19 +43,19 @@ These headers are useful for clients to understand the current rate limit status
| `x-litellm-max-fallbacks` | int | Maximum number of fallback attempts allowed |
## Cost Tracking Headers
-| Header | Type | Description |
-|--------|------|-------------|
-| `x-litellm-response-cost` | float | Cost of the API call |
-| `x-litellm-key-spend` | float | Total spend for the API key |
+| Header | Type | Description | Available on Pass-Through Endpoints |
+|--------|------|-------------|-------------|
+| `x-litellm-response-cost` | float | Cost of the API call | |
+| `x-litellm-key-spend` | float | Total spend for the API key | ✅ |
## LiteLLM Specific Headers
-| Header | Type | Description |
-|--------|------|-------------|
-| `x-litellm-call-id` | string | Unique identifier for the API call |
-| `x-litellm-model-id` | string | Unique identifier for the model used |
-| `x-litellm-model-api-base` | string | Base URL of the API endpoint |
-| `x-litellm-version` | string | Version of LiteLLM being used |
-| `x-litellm-model-group` | string | Model group identifier |
+| Header | Type | Description | Available on Pass-Through Endpoints |
+|--------|------|-------------|-------------|
+| `x-litellm-call-id` | string | Unique identifier for the API call | ✅ |
+| `x-litellm-model-id` | string | Unique identifier for the model used | |
+| `x-litellm-model-api-base` | string | Base URL of the API endpoint | ✅ |
+| `x-litellm-version` | string | Version of LiteLLM being used | |
+| `x-litellm-model-group` | string | Model group identifier | |
## Response headers from LLM providers
diff --git a/docs/my-website/docs/proxy/self_serve.md b/docs/my-website/docs/proxy/self_serve.md
index 604ceee3e5..a1e7c64cd9 100644
--- a/docs/my-website/docs/proxy/self_serve.md
+++ b/docs/my-website/docs/proxy/self_serve.md
@@ -161,6 +161,83 @@ Here's the available UI roles for a LiteLLM Internal User:
- `internal_user`: can login, view/create/delete their own keys, view their spend. **Cannot** add new users.
- `internal_user_viewer`: can login, view their own keys, view their own spend. **Cannot** create/delete keys, add new users.
+## Auto-add SSO users to teams
+
+This walks through setting up sso auto-add for **Okta, Google SSO**
+
+### Okta, Google SSO
+
+1. Specify the JWT field that contains the team ids, that the user belongs to.
+
+```yaml
+general_settings:
+ master_key: sk-1234
+ litellm_jwtauth:
+ team_ids_jwt_field: "groups" # 👈 CAN BE ANY FIELD
+```
+
+This is assuming your SSO token looks like this. **If you need to inspect the JWT fields received from your SSO provider by LiteLLM, follow these instructions [here](#debugging-sso-jwt-fields)**
+
+```
+{
+ ...,
+ "groups": ["team_id_1", "team_id_2"]
+}
+```
+
+2. Create the teams on LiteLLM
+
+```bash
+curl -X POST '/team/new' \
+-H 'Authorization: Bearer ' \
+-H 'Content-Type: application/json' \
+-D '{
+ "team_alias": "team_1",
+ "team_id": "team_id_1" # 👈 MUST BE THE SAME AS THE SSO GROUP ID
+}'
+```
+
+3. Test the SSO flow
+
+Here's a walkthrough of [how it works](https://www.loom.com/share/8959be458edf41fd85937452c29a33f3?sid=7ebd6d37-569a-4023-866e-e0cde67cb23e)
+
+### Microsoft Entra ID SSO group assignment
+
+Follow this [tutorial for auto-adding sso users to teams with Microsoft Entra ID](https://docs.litellm.ai/docs/tutorials/msft_sso)
+
+### Debugging SSO JWT fields
+
+If you need to inspect the JWT fields received from your SSO provider by LiteLLM, follow these instructions. This guide walks you through setting up a debug callback to view the JWT data during the SSO process.
+
+
+
+
+
+1. Add `/sso/debug/callback` as a redirect URL in your SSO provider
+
+ In your SSO provider's settings, add the following URL as a new redirect (callback) URL:
+
+ ```bash showLineNumbers title="Redirect URL"
+ http:///sso/debug/callback
+ ```
+
+
+2. Navigate to the debug login page on your browser
+
+ Navigate to the following URL on your browser:
+
+ ```bash showLineNumbers title="URL to navigate to"
+ https:///sso/debug/login
+ ```
+
+ This will initiate the standard SSO flow. You will be redirected to your SSO provider's login screen, and after successful authentication, you will be redirected back to LiteLLM's debug callback route.
+
+
+3. View the JWT fields
+
+Once redirected, you should see a page called "SSO Debug Information". This page displays the JWT fields received from your SSO provider (as shown in the image above)
+
+
## Advanced
### Setting custom logout URLs
@@ -196,40 +273,26 @@ This budget does not apply to keys created under non-default teams.
[**Go Here**](./team_budgets.md)
-### Auto-add SSO users to teams
+### Set default params for new teams
-1. Specify the JWT field that contains the team ids, that the user belongs to.
+When you connect litellm to your SSO provider, litellm can auto-create teams. Use this to set the default `models`, `max_budget`, `budget_duration` for these auto-created teams.
-```yaml
-general_settings:
- master_key: sk-1234
- litellm_jwtauth:
- team_ids_jwt_field: "groups" # 👈 CAN BE ANY FIELD
+**How it works**
+
+1. When litellm fetches `groups` from your SSO provider, it will check if the corresponding group_id exists as a `team_id` in litellm.
+2. If the team_id does not exist, litellm will auto-create a team with the default params you've set.
+3. If the team_id already exist, litellm will not apply any settings on the team.
+
+**Usage**
+
+```yaml showLineNumbers title="Default Params for new teams"
+litellm_settings:
+ default_team_params: # Default Params to apply when litellm auto creates a team from SSO IDP provider
+ max_budget: 100 # Optional[float], optional): $100 budget for the team
+ budget_duration: 30d # Optional[str], optional): 30 days budget_duration for the team
+ models: ["gpt-3.5-turbo"] # Optional[List[str]], optional): models to be used by the team
```
-This is assuming your SSO token looks like this:
-```
-{
- ...,
- "groups": ["team_id_1", "team_id_2"]
-}
-```
-
-2. Create the teams on LiteLLM
-
-```bash
-curl -X POST '/team/new' \
--H 'Authorization: Bearer ' \
--H 'Content-Type: application/json' \
--D '{
- "team_alias": "team_1",
- "team_id": "team_id_1" # 👈 MUST BE THE SAME AS THE SSO GROUP ID
-}'
-```
-
-3. Test the SSO flow
-
-Here's a walkthrough of [how it works](https://www.loom.com/share/8959be458edf41fd85937452c29a33f3?sid=7ebd6d37-569a-4023-866e-e0cde67cb23e)
### Restrict Users from creating personal keys
@@ -241,7 +304,7 @@ This will also prevent users from using their session tokens on the test keys ch
## **All Settings for Self Serve / SSO Flow**
-```yaml
+```yaml showLineNumbers title="All Settings for Self Serve / SSO Flow"
litellm_settings:
max_internal_user_budget: 10 # max budget for internal users
internal_user_budget_duration: "1mo" # reset every month
@@ -251,6 +314,11 @@ litellm_settings:
max_budget: 100 # Optional[float], optional): $100 budget for a new SSO sign in user
budget_duration: 30d # Optional[str], optional): 30 days budget_duration for a new SSO sign in user
models: ["gpt-3.5-turbo"] # Optional[List[str]], optional): models to be used by a new SSO sign in user
+
+ default_team_params: # Default Params to apply when litellm auto creates a team from SSO IDP provider
+ max_budget: 100 # Optional[float], optional): $100 budget for the team
+ budget_duration: 30d # Optional[str], optional): 30 days budget_duration for the team
+ models: ["gpt-3.5-turbo"] # Optional[List[str]], optional): models to be used by the team
upperbound_key_generate_params: # Upperbound for /key/generate requests when self-serve flow is on
diff --git a/docs/my-website/docs/reasoning_content.md b/docs/my-website/docs/reasoning_content.md
index 1cce3f0570..12a0f17ba0 100644
--- a/docs/my-website/docs/reasoning_content.md
+++ b/docs/my-website/docs/reasoning_content.md
@@ -15,14 +15,17 @@ Supported Providers:
- Bedrock (Anthropic + Deepseek) (`bedrock/`)
- Vertex AI (Anthropic) (`vertexai/`)
- OpenRouter (`openrouter/`)
+- XAI (`xai/`)
+- Google AI Studio (`google/`)
+- Vertex AI (`vertex_ai/`)
LiteLLM will standardize the `reasoning_content` in the response and `thinking_blocks` in the assistant message.
-```python
+```python title="Example response from litellm"
"message": {
...
"reasoning_content": "The capital of France is Paris.",
- "thinking_blocks": [
+ "thinking_blocks": [ # only returned for Anthropic models
{
"type": "thinking",
"thinking": "The capital of France is Paris.",
@@ -37,7 +40,7 @@ LiteLLM will standardize the `reasoning_content` in the response and `thinking_b
-```python
+```python showLineNumbers
from litellm import completion
import os
@@ -48,7 +51,7 @@ response = completion(
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
- thinking={"type": "enabled", "budget_tokens": 1024} # 👈 REQUIRED FOR ANTHROPIC models (on `anthropic/`, `bedrock/`, `vertexai/`)
+ reasoning_effort="low",
)
print(response.choices[0].message.content)
```
@@ -68,7 +71,7 @@ curl http://0.0.0.0:4000/v1/chat/completions \
"content": "What is the capital of France?"
}
],
- "thinking": {"type": "enabled", "budget_tokens": 1024}
+ "reasoning_effort": "low"
}'
```
@@ -111,7 +114,7 @@ Here's how to use `thinking` blocks by Anthropic with tool calling.
-```python
+```python showLineNumbers
litellm._turn_on_debug()
litellm.modify_params = True
model = "anthropic/claude-3-7-sonnet-20250219" # works across Anthropic, Bedrock, Vertex AI
@@ -150,7 +153,7 @@ response = litellm.completion(
messages=messages,
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
- thinking={"type": "enabled", "budget_tokens": 1024},
+ reasoning_effort="low",
)
print("Response\n", response)
response_message = response.choices[0].message
@@ -198,9 +201,9 @@ if tool_calls:
model=model,
messages=messages,
seed=22,
+ reasoning_effort="low",
# tools=tools,
drop_params=True,
- thinking={"type": "enabled", "budget_tokens": 1024},
) # get a new response from the model where it can see the function response
print("second response\n", second_response)
```
@@ -210,7 +213,7 @@ if tool_calls:
1. Setup config.yaml
-```yaml
+```yaml showLineNumbers
model_list:
- model_name: claude-3-7-sonnet-thinking
litellm_params:
@@ -224,7 +227,7 @@ model_list:
2. Run proxy
-```bash
+```bash showLineNumbers
litellm --config config.yaml
# RUNNING on http://0.0.0.0:4000
@@ -332,7 +335,7 @@ curl http://0.0.0.0:4000/v1/chat/completions \
Set `drop_params=True` to drop the 'thinking' blocks when swapping from Anthropic to Deepseek models. Suggest improvements to this approach [here](https://github.com/BerriAI/litellm/discussions/8927).
-```python
+```python showLineNumbers
litellm.drop_params = True # 👈 EITHER GLOBALLY or per request
# or per request
@@ -340,7 +343,7 @@ litellm.drop_params = True # 👈 EITHER GLOBALLY or per request
response = litellm.completion(
model="anthropic/claude-3-7-sonnet-20250219",
messages=[{"role": "user", "content": "What is the capital of France?"}],
- thinking={"type": "enabled", "budget_tokens": 1024},
+ reasoning_effort="low",
drop_params=True,
)
@@ -348,7 +351,7 @@ response = litellm.completion(
response = litellm.completion(
model="deepseek/deepseek-chat",
messages=[{"role": "user", "content": "What is the capital of France?"}],
- thinking={"type": "enabled", "budget_tokens": 1024},
+ reasoning_effort="low",
drop_params=True,
)
```
@@ -364,3 +367,123 @@ These fields can be accessed via `response.choices[0].message.reasoning_content`
- `thinking` - str: The thinking from the model.
- `signature` - str: The signature delta from the model.
+
+
+## Pass `thinking` to Anthropic models
+
+You can also pass the `thinking` parameter to Anthropic models.
+
+
+
+
+```python showLineNumbers
+response = litellm.completion(
+ model="anthropic/claude-3-7-sonnet-20250219",
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
+ thinking={"type": "enabled", "budget_tokens": 1024},
+)
+```
+
+
+
+
+```bash
+curl http://0.0.0.0:4000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer $LITELLM_KEY" \
+ -d '{
+ "model": "anthropic/claude-3-7-sonnet-20250219",
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
+ "thinking": {"type": "enabled", "budget_tokens": 1024}
+ }'
+```
+
+
+
+
+## Checking if a model supports reasoning
+
+
+
+
+Use `litellm.supports_reasoning(model="")` -> returns `True` if model supports reasoning and `False` if not.
+
+```python showLineNumbers title="litellm.supports_reasoning() usage"
+import litellm
+
+# Example models that support reasoning
+assert litellm.supports_reasoning(model="anthropic/claude-3-7-sonnet-20250219") == True
+assert litellm.supports_reasoning(model="deepseek/deepseek-chat") == True
+
+# Example models that do not support reasoning
+assert litellm.supports_reasoning(model="openai/gpt-3.5-turbo") == False
+```
+
+
+
+
+1. Define models that support reasoning in your `config.yaml`. You can optionally add `supports_reasoning: True` to the `model_info` if LiteLLM does not automatically detect it for your custom model.
+
+```yaml showLineNumbers title="litellm proxy config.yaml"
+model_list:
+ - model_name: claude-3-sonnet-reasoning
+ litellm_params:
+ model: anthropic/claude-3-7-sonnet-20250219
+ api_key: os.environ/ANTHROPIC_API_KEY
+ - model_name: deepseek-reasoning
+ litellm_params:
+ model: deepseek/deepseek-chat
+ api_key: os.environ/DEEPSEEK_API_KEY
+ # Example for a custom model where detection might be needed
+ - model_name: my-custom-reasoning-model
+ litellm_params:
+ model: openai/my-custom-model # Assuming it's OpenAI compatible
+ api_base: http://localhost:8000
+ api_key: fake-key
+ model_info:
+ supports_reasoning: True # Explicitly mark as supporting reasoning
+```
+
+2. Run the proxy server:
+
+```bash showLineNumbers title="litellm --config config.yaml"
+litellm --config config.yaml
+```
+
+3. Call `/model_group/info` to check if your model supports `reasoning`
+
+```shell showLineNumbers title="curl /model_group/info"
+curl -X 'GET' \
+ 'http://localhost:4000/model_group/info' \
+ -H 'accept: application/json' \
+ -H 'x-api-key: sk-1234'
+```
+
+Expected Response
+
+```json showLineNumbers title="response from /model_group/info"
+{
+ "data": [
+ {
+ "model_group": "claude-3-sonnet-reasoning",
+ "providers": ["anthropic"],
+ "mode": "chat",
+ "supports_reasoning": true,
+ },
+ {
+ "model_group": "deepseek-reasoning",
+ "providers": ["deepseek"],
+ "supports_reasoning": true,
+ },
+ {
+ "model_group": "my-custom-reasoning-model",
+ "providers": ["openai"],
+ "supports_reasoning": true,
+ }
+ ]
+}
+````
+
+
+
+
diff --git a/docs/my-website/docs/response_api.md b/docs/my-website/docs/response_api.md
index 0604a42586..532f20bc05 100644
--- a/docs/my-website/docs/response_api.md
+++ b/docs/my-website/docs/response_api.md
@@ -14,22 +14,22 @@ LiteLLM provides a BETA endpoint in the spec of [OpenAI's `/responses` API](http
| Fallbacks | ✅ | Works between supported models |
| Loadbalancing | ✅ | Works between supported models |
| Supported LiteLLM Versions | 1.63.8+ | |
-| Supported LLM providers | `openai` | |
+| Supported LLM providers | **All LiteLLM supported providers** | `openai`, `anthropic`, `bedrock`, `vertex_ai`, `gemini`, `azure`, `azure_ai` etc. |
## Usage
-## Create a model response
+### LiteLLM Python SDK
-
+
#### Non-streaming
-```python
+```python showLineNumbers title="OpenAI Non-streaming Response"
import litellm
# Non-streaming response
response = litellm.responses(
- model="gpt-4o",
+ model="openai/o1-pro",
input="Tell me a three sentence bedtime story about a unicorn.",
max_output_tokens=100
)
@@ -38,12 +38,12 @@ print(response)
```
#### Streaming
-```python
+```python showLineNumbers title="OpenAI Streaming Response"
import litellm
# Streaming response
response = litellm.responses(
- model="gpt-4o",
+ model="openai/o1-pro",
input="Tell me a three sentence bedtime story about a unicorn.",
stream=True
)
@@ -53,58 +53,169 @@ for event in response:
```
-
-First, add this to your litellm proxy config.yaml:
-```yaml
-model_list:
- - model_name: gpt-4o
- litellm_params:
- model: openai/gpt-4o
- api_key: os.environ/OPENAI_API_KEY
-```
-
-Start your LiteLLM proxy:
-```bash
-litellm --config /path/to/config.yaml
-
-# RUNNING on http://0.0.0.0:4000
-```
-
-Then use the OpenAI SDK pointed to your proxy:
+
#### Non-streaming
-```python
-from openai import OpenAI
+```python showLineNumbers title="Anthropic Non-streaming Response"
+import litellm
+import os
-# Initialize client with your proxy URL
-client = OpenAI(
- base_url="http://localhost:4000", # Your proxy URL
- api_key="your-api-key" # Your proxy API key
-)
+# Set API key
+os.environ["ANTHROPIC_API_KEY"] = "your-anthropic-api-key"
# Non-streaming response
-response = client.responses.create(
- model="gpt-4o",
- input="Tell me a three sentence bedtime story about a unicorn."
+response = litellm.responses(
+ model="anthropic/claude-3-5-sonnet-20240620",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ max_output_tokens=100
)
print(response)
```
#### Streaming
-```python
-from openai import OpenAI
+```python showLineNumbers title="Anthropic Streaming Response"
+import litellm
+import os
-# Initialize client with your proxy URL
-client = OpenAI(
- base_url="http://localhost:4000", # Your proxy URL
- api_key="your-api-key" # Your proxy API key
-)
+# Set API key
+os.environ["ANTHROPIC_API_KEY"] = "your-anthropic-api-key"
# Streaming response
-response = client.responses.create(
- model="gpt-4o",
+response = litellm.responses(
+ model="anthropic/claude-3-5-sonnet-20240620",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ stream=True
+)
+
+for event in response:
+ print(event)
+```
+
+
+
+
+
+#### Non-streaming
+```python showLineNumbers title="Vertex AI Non-streaming Response"
+import litellm
+import os
+
+# Set credentials - Vertex AI uses application default credentials
+# Run 'gcloud auth application-default login' to authenticate
+os.environ["VERTEXAI_PROJECT"] = "your-gcp-project-id"
+os.environ["VERTEXAI_LOCATION"] = "us-central1"
+
+# Non-streaming response
+response = litellm.responses(
+ model="vertex_ai/gemini-1.5-pro",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ max_output_tokens=100
+)
+
+print(response)
+```
+
+#### Streaming
+```python showLineNumbers title="Vertex AI Streaming Response"
+import litellm
+import os
+
+# Set credentials - Vertex AI uses application default credentials
+# Run 'gcloud auth application-default login' to authenticate
+os.environ["VERTEXAI_PROJECT"] = "your-gcp-project-id"
+os.environ["VERTEXAI_LOCATION"] = "us-central1"
+
+# Streaming response
+response = litellm.responses(
+ model="vertex_ai/gemini-1.5-pro",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ stream=True
+)
+
+for event in response:
+ print(event)
+```
+
+
+
+
+
+#### Non-streaming
+```python showLineNumbers title="AWS Bedrock Non-streaming Response"
+import litellm
+import os
+
+# Set AWS credentials
+os.environ["AWS_ACCESS_KEY_ID"] = "your-access-key-id"
+os.environ["AWS_SECRET_ACCESS_KEY"] = "your-secret-access-key"
+os.environ["AWS_REGION_NAME"] = "us-west-2" # or your AWS region
+
+# Non-streaming response
+response = litellm.responses(
+ model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ max_output_tokens=100
+)
+
+print(response)
+```
+
+#### Streaming
+```python showLineNumbers title="AWS Bedrock Streaming Response"
+import litellm
+import os
+
+# Set AWS credentials
+os.environ["AWS_ACCESS_KEY_ID"] = "your-access-key-id"
+os.environ["AWS_SECRET_ACCESS_KEY"] = "your-secret-access-key"
+os.environ["AWS_REGION_NAME"] = "us-west-2" # or your AWS region
+
+# Streaming response
+response = litellm.responses(
+ model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ stream=True
+)
+
+for event in response:
+ print(event)
+```
+
+
+
+
+
+#### Non-streaming
+```python showLineNumbers title="Google AI Studio Non-streaming Response"
+import litellm
+import os
+
+# Set API key for Google AI Studio
+os.environ["GEMINI_API_KEY"] = "your-gemini-api-key"
+
+# Non-streaming response
+response = litellm.responses(
+ model="gemini/gemini-1.5-flash",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ max_output_tokens=100
+)
+
+print(response)
+```
+
+#### Streaming
+```python showLineNumbers title="Google AI Studio Streaming Response"
+import litellm
+import os
+
+# Set API key for Google AI Studio
+os.environ["GEMINI_API_KEY"] = "your-gemini-api-key"
+
+# Streaming response
+response = litellm.responses(
+ model="gemini/gemini-1.5-flash",
input="Tell me a three sentence bedtime story about a unicorn.",
stream=True
)
@@ -115,3 +226,408 @@ for event in response:
+
+### LiteLLM Proxy with OpenAI SDK
+
+First, set up and start your LiteLLM proxy server.
+
+```bash title="Start LiteLLM Proxy Server"
+litellm --config /path/to/config.yaml
+
+# RUNNING on http://0.0.0.0:4000
+```
+
+
+
+
+First, add this to your litellm proxy config.yaml:
+```yaml showLineNumbers title="OpenAI Proxy Configuration"
+model_list:
+ - model_name: openai/o1-pro
+ litellm_params:
+ model: openai/o1-pro
+ api_key: os.environ/OPENAI_API_KEY
+```
+
+#### Non-streaming
+```python showLineNumbers title="OpenAI Proxy Non-streaming Response"
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Non-streaming response
+response = client.responses.create(
+ model="openai/o1-pro",
+ input="Tell me a three sentence bedtime story about a unicorn."
+)
+
+print(response)
+```
+
+#### Streaming
+```python showLineNumbers title="OpenAI Proxy Streaming Response"
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Streaming response
+response = client.responses.create(
+ model="openai/o1-pro",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ stream=True
+)
+
+for event in response:
+ print(event)
+```
+
+
+
+
+
+First, add this to your litellm proxy config.yaml:
+```yaml showLineNumbers title="Anthropic Proxy Configuration"
+model_list:
+ - model_name: anthropic/claude-3-5-sonnet-20240620
+ litellm_params:
+ model: anthropic/claude-3-5-sonnet-20240620
+ api_key: os.environ/ANTHROPIC_API_KEY
+```
+
+#### Non-streaming
+```python showLineNumbers title="Anthropic Proxy Non-streaming Response"
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Non-streaming response
+response = client.responses.create(
+ model="anthropic/claude-3-5-sonnet-20240620",
+ input="Tell me a three sentence bedtime story about a unicorn."
+)
+
+print(response)
+```
+
+#### Streaming
+```python showLineNumbers title="Anthropic Proxy Streaming Response"
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Streaming response
+response = client.responses.create(
+ model="anthropic/claude-3-5-sonnet-20240620",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ stream=True
+)
+
+for event in response:
+ print(event)
+```
+
+
+
+
+
+First, add this to your litellm proxy config.yaml:
+```yaml showLineNumbers title="Vertex AI Proxy Configuration"
+model_list:
+ - model_name: vertex_ai/gemini-1.5-pro
+ litellm_params:
+ model: vertex_ai/gemini-1.5-pro
+ vertex_project: your-gcp-project-id
+ vertex_location: us-central1
+```
+
+#### Non-streaming
+```python showLineNumbers title="Vertex AI Proxy Non-streaming Response"
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Non-streaming response
+response = client.responses.create(
+ model="vertex_ai/gemini-1.5-pro",
+ input="Tell me a three sentence bedtime story about a unicorn."
+)
+
+print(response)
+```
+
+#### Streaming
+```python showLineNumbers title="Vertex AI Proxy Streaming Response"
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Streaming response
+response = client.responses.create(
+ model="vertex_ai/gemini-1.5-pro",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ stream=True
+)
+
+for event in response:
+ print(event)
+```
+
+
+
+
+
+First, add this to your litellm proxy config.yaml:
+```yaml showLineNumbers title="AWS Bedrock Proxy Configuration"
+model_list:
+ - model_name: bedrock/anthropic.claude-3-sonnet-20240229-v1:0
+ litellm_params:
+ model: bedrock/anthropic.claude-3-sonnet-20240229-v1:0
+ aws_access_key_id: os.environ/AWS_ACCESS_KEY_ID
+ aws_secret_access_key: os.environ/AWS_SECRET_ACCESS_KEY
+ aws_region_name: us-west-2
+```
+
+#### Non-streaming
+```python showLineNumbers title="AWS Bedrock Proxy Non-streaming Response"
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Non-streaming response
+response = client.responses.create(
+ model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
+ input="Tell me a three sentence bedtime story about a unicorn."
+)
+
+print(response)
+```
+
+#### Streaming
+```python showLineNumbers title="AWS Bedrock Proxy Streaming Response"
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Streaming response
+response = client.responses.create(
+ model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ stream=True
+)
+
+for event in response:
+ print(event)
+```
+
+
+
+
+
+First, add this to your litellm proxy config.yaml:
+```yaml showLineNumbers title="Google AI Studio Proxy Configuration"
+model_list:
+ - model_name: gemini/gemini-1.5-flash
+ litellm_params:
+ model: gemini/gemini-1.5-flash
+ api_key: os.environ/GEMINI_API_KEY
+```
+
+#### Non-streaming
+```python showLineNumbers title="Google AI Studio Proxy Non-streaming Response"
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Non-streaming response
+response = client.responses.create(
+ model="gemini/gemini-1.5-flash",
+ input="Tell me a three sentence bedtime story about a unicorn."
+)
+
+print(response)
+```
+
+#### Streaming
+```python showLineNumbers title="Google AI Studio Proxy Streaming Response"
+from openai import OpenAI
+
+# Initialize client with your proxy URL
+client = OpenAI(
+ base_url="http://localhost:4000", # Your proxy URL
+ api_key="your-api-key" # Your proxy API key
+)
+
+# Streaming response
+response = client.responses.create(
+ model="gemini/gemini-1.5-flash",
+ input="Tell me a three sentence bedtime story about a unicorn.",
+ stream=True
+)
+
+for event in response:
+ print(event)
+```
+
+
+
+
+## Supported Responses API Parameters
+
+| Provider | Supported Parameters |
+|----------|---------------------|
+| `openai` | [All Responses API parameters are supported](https://github.com/BerriAI/litellm/blob/7c3df984da8e4dff9201e4c5353fdc7a2b441831/litellm/llms/openai/responses/transformation.py#L23) |
+| `azure` | [All Responses API parameters are supported](https://github.com/BerriAI/litellm/blob/7c3df984da8e4dff9201e4c5353fdc7a2b441831/litellm/llms/openai/responses/transformation.py#L23) |
+| `anthropic` | [See supported parameters here](https://github.com/BerriAI/litellm/blob/f39d9178868662746f159d5ef642c7f34f9bfe5f/litellm/responses/litellm_completion_transformation/transformation.py#L57) |
+| `bedrock` | [See supported parameters here](https://github.com/BerriAI/litellm/blob/f39d9178868662746f159d5ef642c7f34f9bfe5f/litellm/responses/litellm_completion_transformation/transformation.py#L57) |
+| `gemini` | [See supported parameters here](https://github.com/BerriAI/litellm/blob/f39d9178868662746f159d5ef642c7f34f9bfe5f/litellm/responses/litellm_completion_transformation/transformation.py#L57) |
+| `vertex_ai` | [See supported parameters here](https://github.com/BerriAI/litellm/blob/f39d9178868662746f159d5ef642c7f34f9bfe5f/litellm/responses/litellm_completion_transformation/transformation.py#L57) |
+| `azure_ai` | [See supported parameters here](https://github.com/BerriAI/litellm/blob/f39d9178868662746f159d5ef642c7f34f9bfe5f/litellm/responses/litellm_completion_transformation/transformation.py#L57) |
+| All other llm api providers | [See supported parameters here](https://github.com/BerriAI/litellm/blob/f39d9178868662746f159d5ef642c7f34f9bfe5f/litellm/responses/litellm_completion_transformation/transformation.py#L57) |
+
+## Load Balancing with Routing Affinity
+
+When using the Responses API with multiple deployments of the same model (e.g., multiple Azure OpenAI endpoints), LiteLLM provides routing affinity for conversations. This ensures that follow-up requests using a `previous_response_id` are routed to the same deployment that generated the original response.
+
+
+#### Example Usage
+
+
+
+
+```python showLineNumbers title="Python SDK with Routing Affinity"
+import litellm
+
+# Set up router with multiple deployments of the same model
+router = litellm.Router(
+ model_list=[
+ {
+ "model_name": "azure-gpt4-turbo",
+ "litellm_params": {
+ "model": "azure/gpt-4-turbo",
+ "api_key": "your-api-key-1",
+ "api_version": "2024-06-01",
+ "api_base": "https://endpoint1.openai.azure.com",
+ },
+ },
+ {
+ "model_name": "azure-gpt4-turbo",
+ "litellm_params": {
+ "model": "azure/gpt-4-turbo",
+ "api_key": "your-api-key-2",
+ "api_version": "2024-06-01",
+ "api_base": "https://endpoint2.openai.azure.com",
+ },
+ },
+ ],
+ optional_pre_call_checks=["responses_api_deployment_check"],
+)
+
+# Initial request
+response = await router.aresponses(
+ model="azure-gpt4-turbo",
+ input="Hello, who are you?",
+ truncation="auto",
+)
+
+# Store the response ID
+response_id = response.id
+
+# Follow-up request - will be automatically routed to the same deployment
+follow_up = await router.aresponses(
+ model="azure-gpt4-turbo",
+ input="Tell me more about yourself",
+ truncation="auto",
+ previous_response_id=response_id # This ensures routing to the same deployment
+)
+```
+
+
+
+
+#### 1. Setup routing affinity on proxy config.yaml
+
+To enable routing affinity for Responses API in your LiteLLM proxy, set `optional_pre_call_checks: ["responses_api_deployment_check"]` in your proxy config.yaml.
+
+```yaml showLineNumbers title="config.yaml with Responses API Routing Affinity"
+model_list:
+ - model_name: azure-gpt4-turbo
+ litellm_params:
+ model: azure/gpt-4-turbo
+ api_key: your-api-key-1
+ api_version: 2024-06-01
+ api_base: https://endpoint1.openai.azure.com
+ - model_name: azure-gpt4-turbo
+ litellm_params:
+ model: azure/gpt-4-turbo
+ api_key: your-api-key-2
+ api_version: 2024-06-01
+ api_base: https://endpoint2.openai.azure.com
+
+router_settings:
+ optional_pre_call_checks: ["responses_api_deployment_check"]
+```
+
+#### 2. Use the OpenAI Python SDK to make requests to LiteLLM Proxy
+
+```python showLineNumbers title="OpenAI Client with Proxy Server"
+from openai import OpenAI
+
+client = OpenAI(
+ base_url="http://localhost:4000",
+ api_key="your-api-key"
+)
+
+# Initial request
+response = client.responses.create(
+ model="azure-gpt4-turbo",
+ input="Hello, who are you?"
+)
+
+response_id = response.id
+
+# Follow-up request - will be automatically routed to the same deployment
+follow_up = client.responses.create(
+ model="azure-gpt4-turbo",
+ input="Tell me more about yourself",
+ previous_response_id=response_id # This ensures routing to the same deployment
+)
+```
+
+
+
diff --git a/docs/my-website/docs/set_keys.md b/docs/my-website/docs/set_keys.md
index 3a5ff08d63..693cf5f7f4 100644
--- a/docs/my-website/docs/set_keys.md
+++ b/docs/my-website/docs/set_keys.md
@@ -188,7 +188,13 @@ Currently implemented for:
- OpenAI (if OPENAI_API_KEY is set)
- Fireworks AI (if FIREWORKS_AI_API_KEY is set)
- LiteLLM Proxy (if LITELLM_PROXY_API_KEY is set)
+- Gemini (if GEMINI_API_KEY is set)
+- XAI (if XAI_API_KEY is set)
+- Anthropic (if ANTHROPIC_API_KEY is set)
+You can also specify a custom provider to check:
+
+**All providers**:
```python
from litellm import get_valid_models
@@ -196,6 +202,14 @@ valid_models = get_valid_models(check_provider_endpoint=True)
print(valid_models)
```
+**Specific provider**:
+```python
+from litellm import get_valid_models
+
+valid_models = get_valid_models(check_provider_endpoint=True, custom_llm_provider="openai")
+print(valid_models)
+```
+
### `validate_environment(model: str)`
This helper tells you if you have all the required environment variables for a model, and if not - what's missing.
diff --git a/docs/my-website/docs/tutorials/msft_sso.md b/docs/my-website/docs/tutorials/msft_sso.md
new file mode 100644
index 0000000000..f7ad6440f2
--- /dev/null
+++ b/docs/my-website/docs/tutorials/msft_sso.md
@@ -0,0 +1,162 @@
+import Image from '@theme/IdealImage';
+
+# Microsoft SSO: Sync Groups, Members with LiteLLM
+
+Sync Microsoft SSO Groups, Members with LiteLLM Teams.
+
+
+
+
+
+
+
+## Prerequisites
+
+- An Azure Entra ID account with administrative access
+- A LiteLLM Enterprise App set up in your Azure Portal
+- Access to Microsoft Entra ID (Azure AD)
+
+
+## Overview of this tutorial
+
+1. Auto-Create Entra ID Groups on LiteLLM Teams
+2. Sync Entra ID Team Memberships
+3. Set default params for new teams and users auto-created on LiteLLM
+
+## 1. Auto-Create Entra ID Groups on LiteLLM Teams
+
+In this step, our goal is to have LiteLLM automatically create a new team on the LiteLLM DB when there is a new Group Added to the LiteLLM Enterprise App on Azure Entra ID.
+
+### 1.1 Create a new group in Entra ID
+
+
+Navigate to [your Azure Portal](https://portal.azure.com/) > Groups > New Group. Create a new group.
+
+
+
+### 1.2 Assign the group to your LiteLLM Enterprise App
+
+On your Azure Portal, navigate to `Enterprise Applications` > Select your litellm app
+
+
+
+
+
+
+Once you've selected your litellm app, click on `Users and Groups` > `Add user/group`
+
+
+
+
+
+Now select the group you created in step 1.1. And add it to the LiteLLM Enterprise App. At this point we have added `Production LLM Evals Group` to the LiteLLM Enterprise App. The next steps is having LiteLLM automatically create the `Production LLM Evals Group` on the LiteLLM DB when a new user signs in.
+
+
+
+
+### 1.3 Sign in to LiteLLM UI via SSO
+
+Sign into the LiteLLM UI via SSO. You should be redirected to the Entra ID SSO page. This SSO sign in flow will trigger LiteLLM to fetch the latest Groups and Members from Azure Entra ID.
+
+
+
+### 1.4 Check the new team on LiteLLM UI
+
+On the LiteLLM UI, Navigate to `Teams`, You should see the new team `Production LLM Evals Group` auto-created on LiteLLM.
+
+
+
+#### How this works
+
+When a SSO user signs in to LiteLLM:
+- LiteLLM automatically fetches the Groups under the LiteLLM Enterprise App
+- It finds the Production LLM Evals Group assigned to the LiteLLM Enterprise App
+- LiteLLM checks if this group's ID exists in the LiteLLM Teams Table
+- Since the ID doesn't exist, LiteLLM automatically creates a new team with:
+ - Name: Production LLM Evals Group
+ - ID: Same as the Entra ID group's ID
+
+## 2. Sync Entra ID Team Memberships
+
+In this step, we will have LiteLLM automatically add a user to the `Production LLM Evals` Team on the LiteLLM DB when a new user is added to the `Production LLM Evals` Group in Entra ID.
+
+### 2.1 Navigate to the `Production LLM Evals` Group in Entra ID
+
+Navigate to the `Production LLM Evals` Group in Entra ID.
+
+
+
+
+### 2.2 Add a member to the group in Entra ID
+
+Select `Members` > `Add members`
+
+In this stage you should add the user you want to add to the `Production LLM Evals` Team.
+
+
+
+
+
+### 2.3 Sign in as the new user on LiteLLM UI
+
+Sign in as the new user on LiteLLM UI. You should be redirected to the Entra ID SSO page. This SSO sign in flow will trigger LiteLLM to fetch the latest Groups and Members from Azure Entra ID. During this step LiteLLM sync it's teams, team members with what is available from Entra ID
+
+
+
+
+
+### 2.4 Check the team membership on LiteLLM UI
+
+On the LiteLLM UI, Navigate to `Teams`, You should see the new team `Production LLM Evals Group`. Since your are now a member of the `Production LLM Evals Group` in Entra ID, you should see the new team `Production LLM Evals Group` on the LiteLLM UI.
+
+
+
+## 3. Set default params for new teams auto-created on LiteLLM
+
+Since litellm auto creates a new team on the LiteLLM DB when there is a new Group Added to the LiteLLM Enterprise App on Azure Entra ID, we can set default params for new teams created.
+
+This allows you to set a default budget, models, etc for new teams created.
+
+### 3.1 Set `default_team_params` on litellm
+
+Navigate to your litellm config file and set the following params
+
+```yaml showLineNumbers title="litellm config with default_team_params"
+litellm_settings:
+ default_team_params: # Default Params to apply when litellm auto creates a team from SSO IDP provider
+ max_budget: 100 # Optional[float], optional): $100 budget for the team
+ budget_duration: 30d # Optional[str], optional): 30 days budget_duration for the team
+ models: ["gpt-3.5-turbo"] # Optional[List[str]], optional): models to be used by the team
+```
+
+### 3.2 Auto-create a new team on LiteLLM
+
+- In this step you should add a new group to the LiteLLM Enterprise App on Azure Entra ID (like we did in step 1.1). We will call this group `Default LiteLLM Prod Team` on Azure Entra ID.
+- Start litellm proxy server with your config
+- Sign into LiteLLM UI via SSO
+- Navigate to `Teams` and you should see the new team `Default LiteLLM Prod Team` auto-created on LiteLLM
+- Note LiteLLM will set the default params for this new team.
+
+
+
+
+## Video Walkthrough
+
+This walks through setting up sso auto-add for **Microsoft Entra ID**
+
+Follow along this video for a walkthrough of how to set this up with Microsoft Entra ID
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/my-website/docs/tutorials/openai_codex.md b/docs/my-website/docs/tutorials/openai_codex.md
new file mode 100644
index 0000000000..bb5af956b0
--- /dev/null
+++ b/docs/my-website/docs/tutorials/openai_codex.md
@@ -0,0 +1,146 @@
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Using LiteLLM with OpenAI Codex
+
+This guide walks you through connecting OpenAI Codex to LiteLLM. Using LiteLLM with Codex allows teams to:
+- Access 100+ LLMs through the Codex interface
+- Use powerful models like Gemini through a familiar interface
+- Track spend and usage with LiteLLM's built-in analytics
+- Control model access with virtual keys
+
+
+
+## Quickstart
+
+:::info
+
+Requires LiteLLM v1.66.3.dev5 and higher
+
+:::
+
+
+Make sure to set up LiteLLM with the [LiteLLM Getting Started Guide](../proxy/docker_quick_start.md).
+
+## 1. Install OpenAI Codex
+
+Install the OpenAI Codex CLI tool globally using npm:
+
+
+
+
+```bash showLineNumbers
+npm i -g @openai/codex
+```
+
+
+
+
+```bash showLineNumbers
+yarn global add @openai/codex
+```
+
+
+
+
+## 2. Start LiteLLM Proxy
+
+
+
+
+```bash showLineNumbers
+docker run \
+ -v $(pwd)/litellm_config.yaml:/app/config.yaml \
+ -p 4000:4000 \
+ ghcr.io/berriai/litellm:main-latest \
+ --config /app/config.yaml
+```
+
+
+
+
+```bash showLineNumbers
+litellm --config /path/to/config.yaml
+```
+
+
+
+
+LiteLLM should now be running on [http://localhost:4000](http://localhost:4000)
+
+## 3. Configure LiteLLM for Model Routing
+
+Ensure your LiteLLM Proxy is properly configured to route to your desired models. Create a `litellm_config.yaml` file with the following content:
+
+```yaml showLineNumbers
+model_list:
+ - model_name: o3-mini
+ litellm_params:
+ model: openai/o3-mini
+ api_key: os.environ/OPENAI_API_KEY
+ - model_name: claude-3-7-sonnet-latest
+ litellm_params:
+ model: anthropic/claude-3-7-sonnet-latest
+ api_key: os.environ/ANTHROPIC_API_KEY
+ - model_name: gemini-2.0-flash
+ litellm_params:
+ model: gemini/gemini-2.0-flash
+ api_key: os.environ/GEMINI_API_KEY
+
+litellm_settings:
+ drop_params: true
+```
+
+This configuration enables routing to specific OpenAI, Anthropic, and Gemini models with explicit names.
+
+## 4. Configure Codex to Use LiteLLM Proxy
+
+Set the required environment variables to point Codex to your LiteLLM Proxy:
+
+```bash
+# Point to your LiteLLM Proxy server
+export OPENAI_BASE_URL=http://0.0.0.0:4000
+
+# Use your LiteLLM API key (if you've set up authentication)
+export OPENAI_API_KEY="sk-1234"
+```
+
+## 5. Run Codex with Gemini
+
+With everything configured, you can now run Codex with Gemini:
+
+```bash showLineNumbers
+codex --model gemini-2.0-flash --full-auto
+```
+
+
+
+The `--full-auto` flag allows Codex to automatically generate code without additional prompting.
+
+## 6. Advanced Options
+
+### Using Different Models
+
+You can use any model configured in your LiteLLM proxy:
+
+```bash
+# Use Claude models
+codex --model claude-3-7-sonnet-latest
+
+# Use Google AI Studio Gemini models
+codex --model gemini/gemini-2.0-flash
+```
+
+## Troubleshooting
+
+- If you encounter connection issues, ensure your LiteLLM Proxy is running and accessible at the specified URL
+- Verify your LiteLLM API key is valid if you're using authentication
+- Check that your model routing configuration is correct
+- For model-specific errors, ensure the model is properly configured in your LiteLLM setup
+
+## Additional Resources
+
+- [LiteLLM Docker Quick Start Guide](../proxy/docker_quick_start.md)
+- [OpenAI Codex GitHub Repository](https://github.com/openai/codex)
+- [LiteLLM Virtual Keys and Authentication](../proxy/virtual_keys.md)
diff --git a/docs/my-website/docs/tutorials/openweb_ui.md b/docs/my-website/docs/tutorials/openweb_ui.md
index ab1e2e121e..b2c1204069 100644
--- a/docs/my-website/docs/tutorials/openweb_ui.md
+++ b/docs/my-website/docs/tutorials/openweb_ui.md
@@ -98,6 +98,5 @@ On the models dropdown select `thinking-anthropic-claude-3-7-sonnet`
-
-
-
+## Additional Resources
+- Running LiteLLM and OpenWebUI on Windows Localhost: A Comprehensive Guide [https://www.tanyongsheng.com/note/running-litellm-and-openwebui-on-windows-localhost-a-comprehensive-guide/](https://www.tanyongsheng.com/note/running-litellm-and-openwebui-on-windows-localhost-a-comprehensive-guide/)
\ No newline at end of file
diff --git a/docs/my-website/docs/tutorials/prompt_caching.md b/docs/my-website/docs/tutorials/prompt_caching.md
new file mode 100644
index 0000000000..bf3d5a8dda
--- /dev/null
+++ b/docs/my-website/docs/tutorials/prompt_caching.md
@@ -0,0 +1,128 @@
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Auto-Inject Prompt Caching Checkpoints
+
+Reduce costs by up to 90% by using LiteLLM to auto-inject prompt caching checkpoints.
+
+
+
+
+## How it works
+
+LiteLLM can automatically inject prompt caching checkpoints into your requests to LLM providers. This allows:
+
+- **Cost Reduction**: Long, static parts of your prompts can be cached to avoid repeated processing
+- **No need to modify your application code**: You can configure the auto-caching behavior in the LiteLLM UI or in the `litellm config.yaml` file.
+
+## Configuration
+
+You need to specify `cache_control_injection_points` in your model configuration. This tells LiteLLM:
+1. Where to add the caching directive (`location`)
+2. Which message to target (`role`)
+
+LiteLLM will then automatically add a `cache_control` directive to the specified messages in your requests:
+
+```json
+"cache_control": {
+ "type": "ephemeral"
+}
+```
+
+## Usage Example
+
+In this example, we'll configure caching for system messages by adding the directive to all messages with `role: system`.
+
+
+
+
+```yaml showLineNumbers title="litellm config.yaml"
+model_list:
+ - model_name: anthropic-auto-inject-cache-system-message
+ litellm_params:
+ model: anthropic/claude-3-5-sonnet-20240620
+ api_key: os.environ/ANTHROPIC_API_KEY
+ cache_control_injection_points:
+ - location: message
+ role: system
+```
+
+
+
+
+On the LiteLLM UI, you can specify the `cache_control_injection_points` in the `Advanced Settings` tab when adding a model.
+
+
+
+
+
+
+## Detailed Example
+
+### 1. Original Request to LiteLLM
+
+In this example, we have a very long, static system message and a varying user message. It's efficient to cache the system message since it rarely changes.
+
+```json
+{
+ "messages": [
+ {
+ "role": "system",
+ "content": [
+ {
+ "type": "text",
+ "text": "You are a helpful assistant. This is a set of very long instructions that you will follow. Here is a legal document that you will use to answer the user's question."
+ }
+ ]
+ },
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "What is the main topic of this legal document?"
+ }
+ ]
+ }
+ ]
+}
+```
+
+### 2. LiteLLM's Modified Request
+
+LiteLLM auto-injects the caching directive into the system message based on our configuration:
+
+```json
+{
+ "messages": [
+ {
+ "role": "system",
+ "content": [
+ {
+ "type": "text",
+ "text": "You are a helpful assistant. This is a set of very long instructions that you will follow. Here is a legal document that you will use to answer the user's question.",
+ "cache_control": {"type": "ephemeral"}
+ }
+ ]
+ },
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "What is the main topic of this legal document?"
+ }
+ ]
+ }
+ ]
+}
+```
+
+When the model provider processes this request, it will recognize the caching directive and only process the system message once, caching it for subsequent requests.
+
+
+
+
+
+
diff --git a/docs/my-website/docs/tutorials/scim_litellm.md b/docs/my-website/docs/tutorials/scim_litellm.md
new file mode 100644
index 0000000000..c744abe4b4
--- /dev/null
+++ b/docs/my-website/docs/tutorials/scim_litellm.md
@@ -0,0 +1,74 @@
+
+import Image from '@theme/IdealImage';
+
+# SCIM with LiteLLM
+
+Enables identity providers (Okta, Azure AD, OneLogin, etc.) to automate user and team (group) provisioning, updates, and deprovisioning on LiteLLM.
+
+
+This tutorial will walk you through the steps to connect your IDP to LiteLLM SCIM Endpoints.
+
+### Supported SSO Providers for SCIM
+Below is a list of supported SSO providers for connecting to LiteLLM SCIM Endpoints.
+- Microsoft Entra ID (Azure AD)
+- Okta
+- Google Workspace
+- OneLogin
+- Keycloak
+- Auth0
+
+
+## 1. Get your SCIM Tenant URL and Bearer Token
+
+On LiteLLM, navigate to the Settings > Admin Settings > SCIM. On this page you will create a SCIM Token, this allows your IDP to authenticate to litellm `/scim` endpoints.
+
+
+
+## 2. Connect your IDP to LiteLLM SCIM Endpoints
+
+On your IDP provider, navigate to your SSO application and select `Provisioning` > `New provisioning configuration`.
+
+On this page, paste in your litellm scim tenant url and bearer token.
+
+Once this is pasted in, click on `Test Connection` to ensure your IDP can authenticate to the LiteLLM SCIM endpoints.
+
+
+
+
+## 3. Test SCIM Connection
+
+### 3.1 Assign the group to your LiteLLM Enterprise App
+
+On your IDP Portal, navigate to `Enterprise Applications` > Select your litellm app
+
+
+
+
+
+
+Once you've selected your litellm app, click on `Users and Groups` > `Add user/group`
+
+
+
+
+
+Now select the group you created in step 1.1. And add it to the LiteLLM Enterprise App. At this point we have added `Production LLM Evals Group` to the LiteLLM Enterprise App. The next step is having LiteLLM automatically create the `Production LLM Evals Group` on the LiteLLM DB when a new user signs in.
+
+
+
+
+### 3.2 Sign in to LiteLLM UI via SSO
+
+Sign into the LiteLLM UI via SSO. You should be redirected to the Entra ID SSO page. This SSO sign in flow will trigger LiteLLM to fetch the latest Groups and Members from Azure Entra ID.
+
+
+
+### 3.3 Check the new team on LiteLLM UI
+
+On the LiteLLM UI, Navigate to `Teams`, You should see the new team `Production LLM Evals Group` auto-created on LiteLLM.
+
+
+
+
+
+
diff --git a/docs/my-website/docs/tutorials/tag_management.md b/docs/my-website/docs/tutorials/tag_management.md
new file mode 100644
index 0000000000..9b00db47d1
--- /dev/null
+++ b/docs/my-website/docs/tutorials/tag_management.md
@@ -0,0 +1,145 @@
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# [Beta] Routing based on request metadata
+
+Create routing rules based on request metadata.
+
+## Setup
+
+Add the following to your litellm proxy config yaml file.
+
+```yaml showLineNumbers title="litellm proxy config.yaml"
+router_settings:
+ enable_tag_filtering: True # 👈 Key Change
+```
+
+## 1. Create a tag
+
+On the LiteLLM UI, navigate to Experimental > Tag Management > Create Tag.
+
+Create a tag called `private-data` and only select the allowed models for requests with this tag. Once created, you will see the tag in the Tag Management page.
+
+
+
+
+## 2. Test Tag Routing
+
+Now we will test the tag based routing rules.
+
+### 2.1 Invalid model
+
+This request will fail since we send `tags=private-data` but the model `gpt-4o` is not in the allowed models for the `private-data` tag.
+
+
+
+
+
+Here is an example sending the same request using the OpenAI Python SDK.
+
+
+
+```python showLineNumbers
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="sk-1234",
+ base_url="http://0.0.0.0:4000/v1/"
+)
+
+response = client.chat.completions.create(
+ model="gpt-4o",
+ messages=[
+ {"role": "user", "content": "Hello, how are you?"}
+ ],
+ extra_body={
+ "tags": "private-data"
+ }
+)
+```
+
+
+
+
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/v1/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hello, how are you?"
+ }
+ ],
+ "tags": "private-data"
+}'
+```
+
+
+
+
+
+
+### 2.2 Valid model
+
+This request will succeed since we send `tags=private-data` and the model `us.anthropic.claude-3-7-sonnet-20250219-v1:0` is in the allowed models for the `private-data` tag.
+
+
+
+Here is an example sending the same request using the OpenAI Python SDK.
+
+
+
+
+```python showLineNumbers
+from openai import OpenAI
+
+client = OpenAI(
+ api_key="sk-1234",
+ base_url="http://0.0.0.0:4000/v1/"
+)
+
+response = client.chat.completions.create(
+ model="us.anthropic.claude-3-7-sonnet-20250219-v1:0",
+ messages=[
+ {"role": "user", "content": "Hello, how are you?"}
+ ],
+ extra_body={
+ "tags": "private-data"
+ }
+)
+```
+
+
+
+
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/v1/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hello, how are you?"
+ }
+ ],
+ "tags": "private-data"
+}'
+```
+
+
+
+
+
+
+## Additional Tag Features
+- [Sending tags in request headers](https://docs.litellm.ai/docs/proxy/tag_routing#calling-via-request-header)
+- [Tag based routing](https://docs.litellm.ai/docs/proxy/tag_routing)
+- [Track spend per tag](cost_tracking#-custom-tags)
+- [Setup Budgets per Virtual Key, Team](users)
+
diff --git a/docs/my-website/img/arize.png b/docs/my-website/img/arize.png
new file mode 100644
index 0000000000..45d6dacda9
Binary files /dev/null and b/docs/my-website/img/arize.png differ
diff --git a/docs/my-website/img/auto_prompt_caching.png b/docs/my-website/img/auto_prompt_caching.png
new file mode 100644
index 0000000000..6cd3785512
Binary files /dev/null and b/docs/my-website/img/auto_prompt_caching.png differ
diff --git a/docs/my-website/img/custom_prompt_management.png b/docs/my-website/img/custom_prompt_management.png
new file mode 100644
index 0000000000..2c96e0d116
Binary files /dev/null and b/docs/my-website/img/custom_prompt_management.png differ
diff --git a/docs/my-website/img/deadlock_fix_1.png b/docs/my-website/img/deadlock_fix_1.png
new file mode 100644
index 0000000000..df651f440c
Binary files /dev/null and b/docs/my-website/img/deadlock_fix_1.png differ
diff --git a/docs/my-website/img/deadlock_fix_2.png b/docs/my-website/img/deadlock_fix_2.png
new file mode 100644
index 0000000000..0f139d84e5
Binary files /dev/null and b/docs/my-website/img/deadlock_fix_2.png differ
diff --git a/docs/my-website/img/debug_sso.png b/docs/my-website/img/debug_sso.png
new file mode 100644
index 0000000000..d7dde36892
Binary files /dev/null and b/docs/my-website/img/debug_sso.png differ
diff --git a/docs/my-website/img/enterprise_vs_oss.png b/docs/my-website/img/enterprise_vs_oss.png
new file mode 100644
index 0000000000..f2b58fbc14
Binary files /dev/null and b/docs/my-website/img/enterprise_vs_oss.png differ
diff --git a/docs/my-website/img/entra_create_team.png b/docs/my-website/img/entra_create_team.png
new file mode 100644
index 0000000000..223a897d87
Binary files /dev/null and b/docs/my-website/img/entra_create_team.png differ
diff --git a/docs/my-website/img/hf_filter_inference_providers.png b/docs/my-website/img/hf_filter_inference_providers.png
new file mode 100644
index 0000000000..d4c7188919
Binary files /dev/null and b/docs/my-website/img/hf_filter_inference_providers.png differ
diff --git a/docs/my-website/img/image_handling.png b/docs/my-website/img/image_handling.png
new file mode 100644
index 0000000000..bd56206911
Binary files /dev/null and b/docs/my-website/img/image_handling.png differ
diff --git a/docs/my-website/img/litellm_codex.gif b/docs/my-website/img/litellm_codex.gif
new file mode 100644
index 0000000000..04332b5053
Binary files /dev/null and b/docs/my-website/img/litellm_codex.gif differ
diff --git a/docs/my-website/img/litellm_entra_id.png b/docs/my-website/img/litellm_entra_id.png
new file mode 100644
index 0000000000..4cfbd0747f
Binary files /dev/null and b/docs/my-website/img/litellm_entra_id.png differ
diff --git a/docs/my-website/img/litellm_mcp.png b/docs/my-website/img/litellm_mcp.png
new file mode 100644
index 0000000000..cef822eeb2
Binary files /dev/null and b/docs/my-website/img/litellm_mcp.png differ
diff --git a/docs/my-website/img/managed_files_arch.png b/docs/my-website/img/managed_files_arch.png
new file mode 100644
index 0000000000..e49c47334d
Binary files /dev/null and b/docs/my-website/img/managed_files_arch.png differ
diff --git a/docs/my-website/img/mcp_2.png b/docs/my-website/img/mcp_2.png
new file mode 100644
index 0000000000..98e063efc5
Binary files /dev/null and b/docs/my-website/img/mcp_2.png differ
diff --git a/docs/my-website/img/mcp_ui.png b/docs/my-website/img/mcp_ui.png
new file mode 100644
index 0000000000..6731fba71b
Binary files /dev/null and b/docs/my-website/img/mcp_ui.png differ
diff --git a/docs/my-website/img/msft_auto_team.png b/docs/my-website/img/msft_auto_team.png
new file mode 100644
index 0000000000..a50c5bbfbd
Binary files /dev/null and b/docs/my-website/img/msft_auto_team.png differ
diff --git a/docs/my-website/img/msft_default_settings.png b/docs/my-website/img/msft_default_settings.png
new file mode 100644
index 0000000000..0caa60b1f5
Binary files /dev/null and b/docs/my-website/img/msft_default_settings.png differ
diff --git a/docs/my-website/img/msft_enterprise_app.png b/docs/my-website/img/msft_enterprise_app.png
new file mode 100644
index 0000000000..0a8c849a5c
Binary files /dev/null and b/docs/my-website/img/msft_enterprise_app.png differ
diff --git a/docs/my-website/img/msft_enterprise_assign_group.png b/docs/my-website/img/msft_enterprise_assign_group.png
new file mode 100644
index 0000000000..d43e1c6684
Binary files /dev/null and b/docs/my-website/img/msft_enterprise_assign_group.png differ
diff --git a/docs/my-website/img/msft_enterprise_select_group.png b/docs/my-website/img/msft_enterprise_select_group.png
new file mode 100644
index 0000000000..e49032db9f
Binary files /dev/null and b/docs/my-website/img/msft_enterprise_select_group.png differ
diff --git a/docs/my-website/img/msft_member_1.png b/docs/my-website/img/msft_member_1.png
new file mode 100644
index 0000000000..2fe627f773
Binary files /dev/null and b/docs/my-website/img/msft_member_1.png differ
diff --git a/docs/my-website/img/msft_member_2.png b/docs/my-website/img/msft_member_2.png
new file mode 100644
index 0000000000..9757aa9cea
Binary files /dev/null and b/docs/my-website/img/msft_member_2.png differ
diff --git a/docs/my-website/img/msft_member_3.png b/docs/my-website/img/msft_member_3.png
new file mode 100644
index 0000000000..783a4a1dd8
Binary files /dev/null and b/docs/my-website/img/msft_member_3.png differ
diff --git a/docs/my-website/img/msft_sso_sign_in.png b/docs/my-website/img/msft_sso_sign_in.png
new file mode 100644
index 0000000000..43c5173295
Binary files /dev/null and b/docs/my-website/img/msft_sso_sign_in.png differ
diff --git a/docs/my-website/img/prevent_deadlocks.jpg b/docs/my-website/img/prevent_deadlocks.jpg
new file mode 100644
index 0000000000..2807f327d1
Binary files /dev/null and b/docs/my-website/img/prevent_deadlocks.jpg differ
diff --git a/docs/my-website/img/realtime_api.png b/docs/my-website/img/realtime_api.png
new file mode 100644
index 0000000000..798525278c
Binary files /dev/null and b/docs/my-website/img/realtime_api.png differ
diff --git a/docs/my-website/img/release_notes/chat_metrics.png b/docs/my-website/img/release_notes/chat_metrics.png
new file mode 100644
index 0000000000..2e45392cd6
Binary files /dev/null and b/docs/my-website/img/release_notes/chat_metrics.png differ
diff --git a/docs/my-website/img/release_notes/mcp_ui.png b/docs/my-website/img/release_notes/mcp_ui.png
new file mode 100644
index 0000000000..8f4cd4ea19
Binary files /dev/null and b/docs/my-website/img/release_notes/mcp_ui.png differ
diff --git a/docs/my-website/img/release_notes/new_activity_tab.png b/docs/my-website/img/release_notes/new_activity_tab.png
new file mode 100644
index 0000000000..e8cea22a90
Binary files /dev/null and b/docs/my-website/img/release_notes/new_activity_tab.png differ
diff --git a/docs/my-website/img/release_notes/new_tag_usage.png b/docs/my-website/img/release_notes/new_tag_usage.png
new file mode 100644
index 0000000000..4188cbc245
Binary files /dev/null and b/docs/my-website/img/release_notes/new_tag_usage.png differ
diff --git a/docs/my-website/img/release_notes/new_team_usage.png b/docs/my-website/img/release_notes/new_team_usage.png
new file mode 100644
index 0000000000..5fea2506d9
Binary files /dev/null and b/docs/my-website/img/release_notes/new_team_usage.png differ
diff --git a/docs/my-website/img/release_notes/new_team_usage_highlight.jpg b/docs/my-website/img/release_notes/new_team_usage_highlight.jpg
new file mode 100644
index 0000000000..05dbf4b918
Binary files /dev/null and b/docs/my-website/img/release_notes/new_team_usage_highlight.jpg differ
diff --git a/docs/my-website/img/release_notes/spend_by_model.jpg b/docs/my-website/img/release_notes/spend_by_model.jpg
new file mode 100644
index 0000000000..2584949eff
Binary files /dev/null and b/docs/my-website/img/release_notes/spend_by_model.jpg differ
diff --git a/docs/my-website/img/release_notes/sso_sync.png b/docs/my-website/img/release_notes/sso_sync.png
new file mode 100644
index 0000000000..a7bf6b838b
Binary files /dev/null and b/docs/my-website/img/release_notes/sso_sync.png differ
diff --git a/docs/my-website/img/release_notes/tag_management.png b/docs/my-website/img/release_notes/tag_management.png
new file mode 100644
index 0000000000..eca7b8cbb1
Binary files /dev/null and b/docs/my-website/img/release_notes/tag_management.png differ
diff --git a/docs/my-website/img/release_notes/team_model_add.png b/docs/my-website/img/release_notes/team_model_add.png
new file mode 100644
index 0000000000..f548469846
Binary files /dev/null and b/docs/my-website/img/release_notes/team_model_add.png differ
diff --git a/docs/my-website/img/release_notes/ui_usage.png b/docs/my-website/img/release_notes/ui_usage.png
new file mode 100644
index 0000000000..ac39ffb918
Binary files /dev/null and b/docs/my-website/img/release_notes/ui_usage.png differ
diff --git a/docs/my-website/img/release_notes/unified_responses_api_rn.png b/docs/my-website/img/release_notes/unified_responses_api_rn.png
new file mode 100644
index 0000000000..60ede0e211
Binary files /dev/null and b/docs/my-website/img/release_notes/unified_responses_api_rn.png differ
diff --git a/docs/my-website/img/scim_0.png b/docs/my-website/img/scim_0.png
new file mode 100644
index 0000000000..265271b78c
Binary files /dev/null and b/docs/my-website/img/scim_0.png differ
diff --git a/docs/my-website/img/scim_1.png b/docs/my-website/img/scim_1.png
new file mode 100644
index 0000000000..c6d64b5d11
Binary files /dev/null and b/docs/my-website/img/scim_1.png differ
diff --git a/docs/my-website/img/scim_2.png b/docs/my-website/img/scim_2.png
new file mode 100644
index 0000000000..c96cf9f0b5
Binary files /dev/null and b/docs/my-website/img/scim_2.png differ
diff --git a/docs/my-website/img/scim_3.png b/docs/my-website/img/scim_3.png
new file mode 100644
index 0000000000..5ecd3906bd
Binary files /dev/null and b/docs/my-website/img/scim_3.png differ
diff --git a/docs/my-website/img/scim_4.png b/docs/my-website/img/scim_4.png
new file mode 100644
index 0000000000..b4b484418c
Binary files /dev/null and b/docs/my-website/img/scim_4.png differ
diff --git a/docs/my-website/img/scim_integration.png b/docs/my-website/img/scim_integration.png
new file mode 100644
index 0000000000..2cfeb872bf
Binary files /dev/null and b/docs/my-website/img/scim_integration.png differ
diff --git a/docs/my-website/img/tag_create.png b/docs/my-website/img/tag_create.png
new file mode 100644
index 0000000000..d515b3a9f4
Binary files /dev/null and b/docs/my-website/img/tag_create.png differ
diff --git a/docs/my-website/img/tag_invalid.png b/docs/my-website/img/tag_invalid.png
new file mode 100644
index 0000000000..e12f7197b1
Binary files /dev/null and b/docs/my-website/img/tag_invalid.png differ
diff --git a/docs/my-website/img/tag_valid.png b/docs/my-website/img/tag_valid.png
new file mode 100644
index 0000000000..3b6e121d12
Binary files /dev/null and b/docs/my-website/img/tag_valid.png differ
diff --git a/docs/my-website/img/ui_auto_prompt_caching.png b/docs/my-website/img/ui_auto_prompt_caching.png
new file mode 100644
index 0000000000..e6f48e48d0
Binary files /dev/null and b/docs/my-website/img/ui_auto_prompt_caching.png differ
diff --git a/docs/my-website/package-lock.json b/docs/my-website/package-lock.json
index 6c07e67d91..e6f20d567b 100644
--- a/docs/my-website/package-lock.json
+++ b/docs/my-website/package-lock.json
@@ -2148,9 +2148,10 @@
}
},
"node_modules/@babel/runtime": {
- "version": "7.26.0",
- "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.0.tgz",
- "integrity": "sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw==",
+ "version": "7.27.0",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.0.tgz",
+ "integrity": "sha512-VtPOkrdPHZsKc/clNqyi9WUA8TINkZ4cGk63UUE3u4pmB2k+ZMQRDuIOagv8UVd6j7k0T3+RRIb7beKTebNbcw==",
+ "license": "MIT",
"dependencies": {
"regenerator-runtime": "^0.14.0"
},
@@ -12454,9 +12455,10 @@
}
},
"node_modules/http-proxy-middleware": {
- "version": "2.0.7",
- "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz",
- "integrity": "sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==",
+ "version": "2.0.9",
+ "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz",
+ "integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==",
+ "license": "MIT",
"dependencies": {
"@types/http-proxy": "^1.17.8",
"http-proxy": "^1.18.1",
@@ -12559,9 +12561,10 @@
}
},
"node_modules/image-size": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz",
- "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==",
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.2.1.tgz",
+ "integrity": "sha512-rH+46sQJ2dlwfjfhCyNx5thzrv+dtmBIhPHk0zgRUukHzZ/kRueTJXoYYsclBaKcSMBWuGbOFXtioLpzTb5euw==",
+ "license": "MIT",
"dependencies": {
"queue": "6.0.2"
},
diff --git a/docs/my-website/release_notes/v1.55.10/index.md b/docs/my-website/release_notes/v1.55.10/index.md
index 7f9839c2b5..2b5ce75cf0 100644
--- a/docs/my-website/release_notes/v1.55.10/index.md
+++ b/docs/my-website/release_notes/v1.55.10/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.55.8-stable/index.md b/docs/my-website/release_notes/v1.55.8-stable/index.md
index 7e82e94747..38c78eb537 100644
--- a/docs/my-website/release_notes/v1.55.8-stable/index.md
+++ b/docs/my-website/release_notes/v1.55.8-stable/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.56.1/index.md b/docs/my-website/release_notes/v1.56.1/index.md
index 7c4ccc74ea..74f3606b90 100644
--- a/docs/my-website/release_notes/v1.56.1/index.md
+++ b/docs/my-website/release_notes/v1.56.1/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.56.3/index.md b/docs/my-website/release_notes/v1.56.3/index.md
index 95205633ea..3d996ba5b8 100644
--- a/docs/my-website/release_notes/v1.56.3/index.md
+++ b/docs/my-website/release_notes/v1.56.3/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.56.4/index.md b/docs/my-website/release_notes/v1.56.4/index.md
index 93f8725632..bf9cc2d94e 100644
--- a/docs/my-website/release_notes/v1.56.4/index.md
+++ b/docs/my-website/release_notes/v1.56.4/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.57.3/index.md b/docs/my-website/release_notes/v1.57.3/index.md
index 3bee71a8e1..ab1154a0a8 100644
--- a/docs/my-website/release_notes/v1.57.3/index.md
+++ b/docs/my-website/release_notes/v1.57.3/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.57.7/index.md b/docs/my-website/release_notes/v1.57.7/index.md
index ce987baf77..4da2402efa 100644
--- a/docs/my-website/release_notes/v1.57.7/index.md
+++ b/docs/my-website/release_notes/v1.57.7/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.57.8-stable/index.md b/docs/my-website/release_notes/v1.57.8-stable/index.md
index d37a7b9ff8..78fe13f2ed 100644
--- a/docs/my-website/release_notes/v1.57.8-stable/index.md
+++ b/docs/my-website/release_notes/v1.57.8-stable/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
@@ -38,7 +38,7 @@ hide_table_of_contents: false
2. OpenAI Moderations - `omni-moderation-latest` support. [Start Here](https://docs.litellm.ai/docs/moderation)
3. Azure O1 - fake streaming support. This ensures if a `stream=true` is passed, the response is streamed. [Start Here](https://docs.litellm.ai/docs/providers/azure)
4. Anthropic - non-whitespace char stop sequence handling - [PR](https://github.com/BerriAI/litellm/pull/7484)
-5. Azure OpenAI - support entrata id username + password based auth. [Start Here](https://docs.litellm.ai/docs/providers/azure#entrata-id---use-tenant_id-client_id-client_secret)
+5. Azure OpenAI - support Entra ID username + password based auth. [Start Here](https://docs.litellm.ai/docs/providers/azure#entra-id---use-tenant_id-client_id-client_secret)
6. LM Studio - embedding route support. [Start Here](https://docs.litellm.ai/docs/providers/lm-studio)
7. WatsonX - ZenAPIKeyAuth support. [Start Here](https://docs.litellm.ai/docs/providers/watsonx)
diff --git a/docs/my-website/release_notes/v1.59.0/index.md b/docs/my-website/release_notes/v1.59.0/index.md
index 5343ba49ad..2699e42020 100644
--- a/docs/my-website/release_notes/v1.59.0/index.md
+++ b/docs/my-website/release_notes/v1.59.0/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.59.8-stable/index.md b/docs/my-website/release_notes/v1.59.8-stable/index.md
index fa9825fb66..023f284ad5 100644
--- a/docs/my-website/release_notes/v1.59.8-stable/index.md
+++ b/docs/my-website/release_notes/v1.59.8-stable/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.61.20-stable/index.md b/docs/my-website/release_notes/v1.61.20-stable/index.md
index 132c1aa318..5012e2aa90 100644
--- a/docs/my-website/release_notes/v1.61.20-stable/index.md
+++ b/docs/my-website/release_notes/v1.61.20-stable/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.63.0/index.md b/docs/my-website/release_notes/v1.63.0/index.md
index e74a2f9b86..ab74b11b4d 100644
--- a/docs/my-website/release_notes/v1.63.0/index.md
+++ b/docs/my-website/release_notes/v1.63.0/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.63.11-stable/index.md b/docs/my-website/release_notes/v1.63.11-stable/index.md
index 55fefb737a..882747a07b 100644
--- a/docs/my-website/release_notes/v1.63.11-stable/index.md
+++ b/docs/my-website/release_notes/v1.63.11-stable/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
@@ -26,13 +26,18 @@ This release is primarily focused on:
- UI - Credential Management, re-use credentials when adding new models
- UI - Test Connection to LLM Provider before adding a model
-:::info
+## Known Issues
+- 🚨 Known issue on Azure OpenAI - We don't recommend upgrading if you use Azure OpenAI. This version failed our Azure OpenAI load test
-This release will be live on 03/16/2025
-:::
+## Docker Run LiteLLM Proxy
-
+```
+docker run
+-e STORE_MODEL_IN_DB=True
+-p 4000:4000
+ghcr.io/berriai/litellm:main-v1.63.11-stable
+```
## Demo Instance
diff --git a/docs/my-website/release_notes/v1.63.14/index.md b/docs/my-website/release_notes/v1.63.14/index.md
new file mode 100644
index 0000000000..ff2630468c
--- /dev/null
+++ b/docs/my-website/release_notes/v1.63.14/index.md
@@ -0,0 +1,131 @@
+---
+title: v1.63.14-stable
+slug: v1.63.14-stable
+date: 2025-03-22T10:00:00
+authors:
+ - name: Krrish Dholakia
+ title: CEO, LiteLLM
+ url: https://www.linkedin.com/in/krish-d/
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
+ - name: Ishaan Jaffer
+ title: CTO, LiteLLM
+ url: https://www.linkedin.com/in/reffajnaahsi/
+ image_url: https://pbs.twimg.com/profile_images/1613813310264340481/lz54oEiB_400x400.jpg
+
+tags: [credential management, thinking content, responses api, snowflake]
+hide_table_of_contents: false
+---
+
+import Image from '@theme/IdealImage';
+
+These are the changes since `v1.63.11-stable`.
+
+This release brings:
+- LLM Translation Improvements (MCP Support and Bedrock Application Profiles)
+- Perf improvements for Usage-based Routing
+- Streaming guardrail support via websockets
+- Azure OpenAI client perf fix (from previous release)
+
+## Docker Run LiteLLM Proxy
+
+```
+docker run
+-e STORE_MODEL_IN_DB=True
+-p 4000:4000
+ghcr.io/berriai/litellm:main-v1.63.14-stable.patch1
+```
+
+## Demo Instance
+
+Here's a Demo Instance to test changes:
+- Instance: https://demo.litellm.ai/
+- Login Credentials:
+ - Username: admin
+ - Password: sk-1234
+
+
+
+## New Models / Updated Models
+
+- Azure gpt-4o - fixed pricing to latest global pricing - [PR](https://github.com/BerriAI/litellm/pull/9361)
+- O1-Pro - add pricing + model information - [PR](https://github.com/BerriAI/litellm/pull/9397)
+- Azure AI - mistral 3.1 small pricing added - [PR](https://github.com/BerriAI/litellm/pull/9453)
+- Azure - gpt-4.5-preview pricing added - [PR](https://github.com/BerriAI/litellm/pull/9453)
+
+
+
+## LLM Translation
+
+1. **New LLM Features**
+
+- Bedrock: Support bedrock application inference profiles [Docs](https://docs.litellm.ai/docs/providers/bedrock#bedrock-application-inference-profile)
+ - Infer aws region from bedrock application profile id - (`arn:aws:bedrock:us-east-1:...`)
+- Ollama - support calling via `/v1/completions` [Get Started](../../docs/providers/ollama#using-ollama-fim-on-v1completions)
+- Bedrock - support `us.deepseek.r1-v1:0` model name [Docs](../../docs/providers/bedrock#supported-aws-bedrock-models)
+- OpenRouter - `OPENROUTER_API_BASE` env var support [Docs](../../docs/providers/openrouter.md)
+- Azure - add audio model parameter support - [Docs](../../docs/providers/azure#azure-audio-model)
+- OpenAI - PDF File support [Docs](../../docs/completion/document_understanding#openai-file-message-type)
+- OpenAI - o1-pro Responses API streaming support [Docs](../../docs/response_api.md#streaming)
+- [BETA] MCP - Use MCP Tools with LiteLLM SDK [Docs](../../docs/mcp)
+
+2. **Bug Fixes**
+
+- Voyage: prompt token on embedding tracking fix - [PR](https://github.com/BerriAI/litellm/commit/56d3e75b330c3c3862dc6e1c51c1210e48f1068e)
+- Sagemaker - Fix ‘Too little data for declared Content-Length’ error - [PR](https://github.com/BerriAI/litellm/pull/9326)
+- OpenAI-compatible models - fix issue when calling openai-compatible models w/ custom_llm_provider set - [PR](https://github.com/BerriAI/litellm/pull/9355)
+- VertexAI - Embedding ‘outputDimensionality’ support - [PR](https://github.com/BerriAI/litellm/commit/437dbe724620675295f298164a076cbd8019d304)
+- Anthropic - return consistent json response format on streaming/non-streaming - [PR](https://github.com/BerriAI/litellm/pull/9437)
+
+## Spend Tracking Improvements
+
+- `litellm_proxy/` - support reading litellm response cost header from proxy, when using client sdk
+- Reset Budget Job - fix budget reset error on keys/teams/users [PR](https://github.com/BerriAI/litellm/pull/9329)
+- Streaming - Prevents final chunk w/ usage from being ignored (impacted bedrock streaming + cost tracking) [PR](https://github.com/BerriAI/litellm/pull/9314)
+
+
+## UI
+
+1. Users Page
+ - Feature: Control default internal user settings [PR](https://github.com/BerriAI/litellm/pull/9328)
+2. Icons:
+ - Feature: Replace external "artificialanalysis.ai" icons by local svg [PR](https://github.com/BerriAI/litellm/pull/9374)
+3. Sign In/Sign Out
+ - Fix: Default login when `default_user_id` user does not exist in DB [PR](https://github.com/BerriAI/litellm/pull/9395)
+
+
+## Logging Integrations
+
+- Support post-call guardrails for streaming responses [Get Started](../../docs/proxy/guardrails/custom_guardrail#1-write-a-customguardrail-class)
+- Arize [Get Started](../../docs/observability/arize_integration)
+ - fix invalid package import [PR](https://github.com/BerriAI/litellm/pull/9338)
+ - migrate to using standardloggingpayload for metadata, ensures spans land successfully [PR](https://github.com/BerriAI/litellm/pull/9338)
+ - fix logging to just log the LLM I/O [PR](https://github.com/BerriAI/litellm/pull/9353)
+ - Dynamic API Key/Space param support [Get Started](../../docs/observability/arize_integration#pass-arize-spacekey-per-request)
+- StandardLoggingPayload - Log litellm_model_name in payload. Allows knowing what the model sent to API provider was [Get Started](../../docs/proxy/logging_spec#standardlogginghiddenparams)
+- Prompt Management - Allow building custom prompt management integration [Get Started](../../docs/proxy/custom_prompt_management.md)
+
+## Performance / Reliability improvements
+
+- Redis Caching - add 5s default timeout, prevents hanging redis connection from impacting llm calls [PR](https://github.com/BerriAI/litellm/commit/db92956ae33ed4c4e3233d7e1b0c7229817159bf)
+- Allow disabling all spend updates / writes to DB - patch to allow disabling all spend updates to DB with a flag [PR](https://github.com/BerriAI/litellm/pull/9331)
+- Azure OpenAI - correctly re-use azure openai client, fixes perf issue from previous Stable release [PR](https://github.com/BerriAI/litellm/commit/f2026ef907c06d94440930917add71314b901413)
+- Azure OpenAI - uses litellm.ssl_verify on Azure/OpenAI clients [PR](https://github.com/BerriAI/litellm/commit/f2026ef907c06d94440930917add71314b901413)
+- Usage-based routing - Wildcard model support [Get Started](../../docs/proxy/usage_based_routing#wildcard-model-support)
+- Usage-based routing - Support batch writing increments to redis - reduces latency to same as ‘simple-shuffle’ [PR](https://github.com/BerriAI/litellm/pull/9357)
+- Router - show reason for model cooldown on ‘no healthy deployments available error’ [PR](https://github.com/BerriAI/litellm/pull/9438)
+- Caching - add max value limit to an item in in-memory cache (1MB) - prevents OOM errors on large image url’s being sent through proxy [PR](https://github.com/BerriAI/litellm/pull/9448)
+
+
+## General Improvements
+
+- Passthrough Endpoints - support returning api-base on pass-through endpoints Response Headers [Docs](../../docs/proxy/response_headers#litellm-specific-headers)
+- SSL - support reading ssl security level from env var - Allows user to specify lower security settings [Get Started](../../docs/guides/security_settings)
+- Credentials - only poll Credentials table when `STORE_MODEL_IN_DB` is True [PR](https://github.com/BerriAI/litellm/pull/9376)
+- Image URL Handling - new architecture doc on image url handling [Docs](../../docs/proxy/image_handling)
+- OpenAI - bump to pip install "openai==1.68.2" [PR](https://github.com/BerriAI/litellm/commit/e85e3bc52a9de86ad85c3dbb12d87664ee567a5a)
+- Gunicorn - security fix - bump gunicorn==23.0.0 [PR](https://github.com/BerriAI/litellm/commit/7e9fc92f5c7fea1e7294171cd3859d55384166eb)
+
+
+## Complete Git Diff
+
+[Here's the complete git diff](https://github.com/BerriAI/litellm/compare/v1.63.11-stable...v1.63.14.rc)
\ No newline at end of file
diff --git a/docs/my-website/release_notes/v1.63.2-stable/index.md b/docs/my-website/release_notes/v1.63.2-stable/index.md
index 0c359452dc..3d47e02ac1 100644
--- a/docs/my-website/release_notes/v1.63.2-stable/index.md
+++ b/docs/my-website/release_notes/v1.63.2-stable/index.md
@@ -6,7 +6,7 @@ authors:
- name: Krrish Dholakia
title: CEO, LiteLLM
url: https://www.linkedin.com/in/krish-d/
- image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1743638400&v=beta&t=39KOXMUFedvukiWWVPHf3qI45fuQD7lNglICwN31DrI
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
- name: Ishaan Jaffer
title: CTO, LiteLLM
url: https://www.linkedin.com/in/reffajnaahsi/
diff --git a/docs/my-website/release_notes/v1.65.0-stable/index.md b/docs/my-website/release_notes/v1.65.0-stable/index.md
new file mode 100644
index 0000000000..3696f5023c
--- /dev/null
+++ b/docs/my-website/release_notes/v1.65.0-stable/index.md
@@ -0,0 +1,160 @@
+---
+title: v1.65.0-stable - Model Context Protocol
+slug: v1.65.0-stable
+date: 2025-03-30T10:00:00
+authors:
+ - name: Krrish Dholakia
+ title: CEO, LiteLLM
+ url: https://www.linkedin.com/in/krish-d/
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
+ - name: Ishaan Jaffer
+ title: CTO, LiteLLM
+ url: https://www.linkedin.com/in/reffajnaahsi/
+ image_url: https://pbs.twimg.com/profile_images/1613813310264340481/lz54oEiB_400x400.jpg
+tags: [mcp, custom_prompt_management]
+hide_table_of_contents: false
+---
+import Image from '@theme/IdealImage';
+
+v1.65.0-stable is live now. Here are the key highlights of this release:
+- **MCP Support**: Support for adding and using MCP servers on the LiteLLM proxy.
+- **UI view total usage after 1M+ logs**: You can now view usage analytics after crossing 1M+ logs in DB.
+
+## Model Context Protocol (MCP)
+
+This release introduces support for centrally adding MCP servers on LiteLLM. This allows you to add MCP server endpoints and your developers can `list` and `call` MCP tools through LiteLLM.
+
+Read more about MCP [here](https://docs.litellm.ai/docs/mcp).
+
+
+
+ Expose and use MCP servers through LiteLLM
+
+
+## UI view total usage after 1M+ logs
+
+This release brings the ability to view total usage analytics even after exceeding 1M+ logs in your database. We've implemented a scalable architecture that stores only aggregate usage data, resulting in significantly more efficient queries and reduced database CPU utilization.
+
+
+
+
+ View total usage after 1M+ logs
+
+
+
+- How this works:
+ - We now aggregate usage data into a dedicated DailyUserSpend table, significantly reducing query load and CPU usage even beyond 1M+ logs.
+
+- Daily Spend Breakdown API:
+
+ - Retrieve granular daily usage data (by model, provider, and API key) with a single endpoint.
+ Example Request:
+
+ ```shell title="Daily Spend Breakdown API" showLineNumbers
+ curl -L -X GET 'http://localhost:4000/user/daily/activity?start_date=2025-03-20&end_date=2025-03-27' \
+ -H 'Authorization: Bearer sk-...'
+ ```
+
+ ```json title="Daily Spend Breakdown API Response" showLineNumbers
+ {
+ "results": [
+ {
+ "date": "2025-03-27",
+ "metrics": {
+ "spend": 0.0177072,
+ "prompt_tokens": 111,
+ "completion_tokens": 1711,
+ "total_tokens": 1822,
+ "api_requests": 11
+ },
+ "breakdown": {
+ "models": {
+ "gpt-4o-mini": {
+ "spend": 1.095e-05,
+ "prompt_tokens": 37,
+ "completion_tokens": 9,
+ "total_tokens": 46,
+ "api_requests": 1
+ },
+ "providers": { "openai": { ... }, "azure_ai": { ... } },
+ "api_keys": { "3126b6eaf1...": { ... } }
+ }
+ }
+ ],
+ "metadata": {
+ "total_spend": 0.7274667,
+ "total_prompt_tokens": 280990,
+ "total_completion_tokens": 376674,
+ "total_api_requests": 14
+ }
+ }
+ ```
+
+
+
+
+## New Models / Updated Models
+- Support for Vertex AI gemini-2.0-flash-lite & Google AI Studio gemini-2.0-flash-lite [PR](https://github.com/BerriAI/litellm/pull/9523)
+- Support for Vertex AI Fine-Tuned LLMs [PR](https://github.com/BerriAI/litellm/pull/9542)
+- Nova Canvas image generation support [PR](https://github.com/BerriAI/litellm/pull/9525)
+- OpenAI gpt-4o-transcribe support [PR](https://github.com/BerriAI/litellm/pull/9517)
+- Added new Vertex AI text embedding model [PR](https://github.com/BerriAI/litellm/pull/9476)
+
+## LLM Translation
+- OpenAI Web Search Tool Call Support [PR](https://github.com/BerriAI/litellm/pull/9465)
+- Vertex AI topLogprobs support [PR](https://github.com/BerriAI/litellm/pull/9518)
+- Support for sending images and video to Vertex AI multimodal embedding [Doc](https://docs.litellm.ai/docs/providers/vertex#multi-modal-embeddings)
+- Support litellm.api_base for Vertex AI + Gemini across completion, embedding, image_generation [PR](https://github.com/BerriAI/litellm/pull/9516)
+- Bug fix for returning `response_cost` when using litellm python SDK with LiteLLM Proxy [PR](https://github.com/BerriAI/litellm/commit/6fd18651d129d606182ff4b980e95768fc43ca3d)
+- Support for `max_completion_tokens` on Mistral API [PR](https://github.com/BerriAI/litellm/pull/9606)
+- Refactored Vertex AI passthrough routes - fixes unpredictable behaviour with auto-setting default_vertex_region on router model add [PR](https://github.com/BerriAI/litellm/pull/9467)
+
+## Spend Tracking Improvements
+- Log 'api_base' on spend logs [PR](https://github.com/BerriAI/litellm/pull/9509)
+- Support for Gemini audio token cost tracking [PR](https://github.com/BerriAI/litellm/pull/9535)
+- Fixed OpenAI audio input token cost tracking [PR](https://github.com/BerriAI/litellm/pull/9535)
+
+## UI
+
+### Model Management
+- Allowed team admins to add/update/delete models on UI [PR](https://github.com/BerriAI/litellm/pull/9572)
+- Added render supports_web_search on model hub [PR](https://github.com/BerriAI/litellm/pull/9469)
+
+### Request Logs
+- Show API base and model ID on request logs [PR](https://github.com/BerriAI/litellm/pull/9572)
+- Allow viewing keyinfo on request logs [PR](https://github.com/BerriAI/litellm/pull/9568)
+
+### Usage Tab
+- Added Daily User Spend Aggregate view - allows UI Usage tab to work > 1m rows [PR](https://github.com/BerriAI/litellm/pull/9538)
+- Connected UI to "LiteLLM_DailyUserSpend" spend table [PR](https://github.com/BerriAI/litellm/pull/9603)
+
+## Logging Integrations
+- Fixed StandardLoggingPayload for GCS Pub Sub Logging Integration [PR](https://github.com/BerriAI/litellm/pull/9508)
+- Track `litellm_model_name` on `StandardLoggingPayload` [Docs](https://docs.litellm.ai/docs/proxy/logging_spec#standardlogginghiddenparams)
+
+## Performance / Reliability Improvements
+- LiteLLM Redis semantic caching implementation [PR](https://github.com/BerriAI/litellm/pull/9356)
+- Gracefully handle exceptions when DB is having an outage [PR](https://github.com/BerriAI/litellm/pull/9533)
+- Allow Pods to startup + passing /health/readiness when allow_requests_on_db_unavailable: True and DB is down [PR](https://github.com/BerriAI/litellm/pull/9569)
+
+
+## General Improvements
+- Support for exposing MCP tools on litellm proxy [PR](https://github.com/BerriAI/litellm/pull/9426)
+- Support discovering Gemini, Anthropic, xAI models by calling their /v1/model endpoint [PR](https://github.com/BerriAI/litellm/pull/9530)
+- Fixed route check for non-proxy admins on JWT auth [PR](https://github.com/BerriAI/litellm/pull/9454)
+- Added baseline Prisma database migrations [PR](https://github.com/BerriAI/litellm/pull/9565)
+- View all wildcard models on /model/info [PR](https://github.com/BerriAI/litellm/pull/9572)
+
+
+## Security
+- Bumped next from 14.2.21 to 14.2.25 in UI dashboard [PR](https://github.com/BerriAI/litellm/pull/9458)
+
+## Complete Git Diff
+
+[Here's the complete git diff](https://github.com/BerriAI/litellm/compare/v1.63.14-stable.patch1...v1.65.0-stable)
diff --git a/docs/my-website/release_notes/v1.65.0/index.md b/docs/my-website/release_notes/v1.65.0/index.md
new file mode 100644
index 0000000000..84276c997d
--- /dev/null
+++ b/docs/my-website/release_notes/v1.65.0/index.md
@@ -0,0 +1,34 @@
+---
+title: v1.65.0 - Team Model Add - update
+slug: v1.65.0
+date: 2025-03-28T10:00:00
+authors:
+ - name: Krrish Dholakia
+ title: CEO, LiteLLM
+ url: https://www.linkedin.com/in/krish-d/
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
+ - name: Ishaan Jaffer
+ title: CTO, LiteLLM
+ url: https://www.linkedin.com/in/reffajnaahsi/
+ image_url: https://pbs.twimg.com/profile_images/1613813310264340481/lz54oEiB_400x400.jpg
+tags: [management endpoints, team models, ui]
+hide_table_of_contents: false
+---
+
+import Image from '@theme/IdealImage';
+
+v1.65.0 updates the `/model/new` endpoint to prevent non-team admins from creating team models.
+
+This means that only proxy admins or team admins can create team models.
+
+## Additional Changes
+
+- Allows team admins to call `/model/update` to update team models.
+- Allows team admins to call `/model/delete` to delete team models.
+- Introduces new `user_models_only` param to `/v2/model/info` - only return models added by this user.
+
+
+These changes enable team admins to add and manage models for their team on the LiteLLM UI + API.
+
+
+
\ No newline at end of file
diff --git a/docs/my-website/release_notes/v1.65.4-stable/index.md b/docs/my-website/release_notes/v1.65.4-stable/index.md
new file mode 100644
index 0000000000..872024a47a
--- /dev/null
+++ b/docs/my-website/release_notes/v1.65.4-stable/index.md
@@ -0,0 +1,176 @@
+---
+title: v1.65.4-stable
+slug: v1.65.4-stable
+date: 2025-04-05T10:00:00
+authors:
+ - name: Krrish Dholakia
+ title: CEO, LiteLLM
+ url: https://www.linkedin.com/in/krish-d/
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
+ - name: Ishaan Jaffer
+ title: CTO, LiteLLM
+ url: https://www.linkedin.com/in/reffajnaahsi/
+ image_url: https://pbs.twimg.com/profile_images/1613813310264340481/lz54oEiB_400x400.jpg
+
+tags: []
+hide_table_of_contents: false
+---
+
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+## Deploy this version
+
+
+
+
+``` showLineNumbers title="docker run litellm"
+docker run
+-e STORE_MODEL_IN_DB=True
+-p 4000:4000
+ghcr.io/berriai/litellm:main-v1.65.4-stable
+```
+
+
+
+
+``` showLineNumbers title="pip install litellm"
+pip install litellm==1.65.4.post1
+```
+
+
+
+v1.65.4-stable is live. Here are the improvements since v1.65.0-stable.
+
+## Key Highlights
+- **Preventing DB Deadlocks**: Fixes a high-traffic issue when multiple instances were writing to the DB at the same time.
+- **New Usage Tab**: Enables viewing spend by model and customizing date range
+
+Let's dive in.
+
+### Preventing DB Deadlocks
+
+
+
+This release fixes the DB deadlocking issue that users faced in high traffic (10K+ RPS). This is great because it enables user/key/team spend tracking works at that scale.
+
+Read more about the new architecture [here](https://docs.litellm.ai/docs/proxy/db_deadlocks)
+
+
+### New Usage Tab
+
+
+
+The new Usage tab now brings the ability to track daily spend by model. This makes it easier to catch any spend tracking or token counting errors, when combined with the ability to view successful requests, and token usage.
+
+To test this out, just go to Experimental > New Usage > Activity.
+
+
+## New Models / Updated Models
+
+1. Databricks - claude-3-7-sonnet cost tracking [PR](https://github.com/BerriAI/litellm/blob/52b35cd8093b9ad833987b24f494586a1e923209/model_prices_and_context_window.json#L10350)
+2. VertexAI - `gemini-2.5-pro-exp-03-25` cost tracking [PR](https://github.com/BerriAI/litellm/blob/52b35cd8093b9ad833987b24f494586a1e923209/model_prices_and_context_window.json#L4492)
+3. VertexAI - `gemini-2.0-flash` cost tracking [PR](https://github.com/BerriAI/litellm/blob/52b35cd8093b9ad833987b24f494586a1e923209/model_prices_and_context_window.json#L4689)
+4. Groq - add whisper ASR models to model cost map [PR](https://github.com/BerriAI/litellm/blob/52b35cd8093b9ad833987b24f494586a1e923209/model_prices_and_context_window.json#L3324)
+5. IBM - Add watsonx/ibm/granite-3-8b-instruct to model cost map [PR](https://github.com/BerriAI/litellm/blob/52b35cd8093b9ad833987b24f494586a1e923209/model_prices_and_context_window.json#L91)
+6. Google AI Studio - add gemini/gemini-2.5-pro-preview-03-25 to model cost map [PR](https://github.com/BerriAI/litellm/blob/52b35cd8093b9ad833987b24f494586a1e923209/model_prices_and_context_window.json#L4850)
+
+## LLM Translation
+1. Vertex AI - Support anyOf param for OpenAI json schema translation [Get Started](https://docs.litellm.ai/docs/providers/vertex#json-schema)
+2. Anthropic- response_format + thinking param support (works across Anthropic API, Bedrock, Vertex) [Get Started](https://docs.litellm.ai/docs/reasoning_content)
+3. Anthropic - if thinking token is specified and max tokens is not - ensure max token to anthropic is higher than thinking tokens (works across Anthropic API, Bedrock, Vertex) [PR](https://github.com/BerriAI/litellm/pull/9594)
+4. Bedrock - latency optimized inference support [Get Started](https://docs.litellm.ai/docs/providers/bedrock#usage---latency-optimized-inference)
+5. Sagemaker - handle special tokens + multibyte character code in response [Get Started](https://docs.litellm.ai/docs/providers/aws_sagemaker)
+6. MCP - add support for using SSE MCP servers [Get Started](https://docs.litellm.ai/docs/mcp#usage)
+8. Anthropic - new `litellm.messages.create` interface for calling Anthropic `/v1/messages` via passthrough [Get Started](https://docs.litellm.ai/docs/anthropic_unified#usage)
+11. Anthropic - support ‘file’ content type in message param (works across Anthropic API, Bedrock, Vertex) [Get Started](https://docs.litellm.ai/docs/providers/anthropic#usage---pdf)
+12. Anthropic - map openai 'reasoning_effort' to anthropic 'thinking' param (works across Anthropic API, Bedrock, Vertex) [Get Started](https://docs.litellm.ai/docs/providers/anthropic#usage---thinking--reasoning_content)
+13. Google AI Studio (Gemini) - [BETA] `/v1/files` upload support [Get Started](../../docs/providers/google_ai_studio/files)
+14. Azure - fix o-series tool calling [Get Started](../../docs/providers/azure#tool-calling--function-calling)
+15. Unified file id - [ALPHA] allow calling multiple providers with same file id [PR](https://github.com/BerriAI/litellm/pull/9718)
+ - This is experimental, and not recommended for production use.
+ - We plan to have a production-ready implementation by next week.
+16. Google AI Studio (Gemini) - return logprobs [PR](https://github.com/BerriAI/litellm/pull/9713)
+17. Anthropic - Support prompt caching for Anthropic tool calls [Get Started](https://docs.litellm.ai/docs/completion/prompt_caching)
+18. OpenRouter - unwrap extra body on open router calls [PR](https://github.com/BerriAI/litellm/pull/9747)
+19. VertexAI - fix credential caching issue [PR](https://github.com/BerriAI/litellm/pull/9756)
+20. XAI - filter out 'name' param for XAI [PR](https://github.com/BerriAI/litellm/pull/9761)
+21. Gemini - image generation output support [Get Started](../../docs/providers/gemini#image-generation)
+22. Databricks - support claude-3-7-sonnet w/ thinking + response_format [Get Started](../../docs/providers/databricks#usage---thinking--reasoning_content)
+
+## Spend Tracking Improvements
+1. Reliability fix - Check sent and received model for cost calculation [PR](https://github.com/BerriAI/litellm/pull/9669)
+2. Vertex AI - Multimodal embedding cost tracking [Get Started](https://docs.litellm.ai/docs/providers/vertex#multi-modal-embeddings), [PR](https://github.com/BerriAI/litellm/pull/9623)
+
+## Management Endpoints / UI
+
+
+
+1. New Usage Tab
+ - Report 'total_tokens' + report success/failure calls
+ - Remove double bars on scroll
+ - Ensure ‘daily spend’ chart ordered from earliest to latest date
+ - showing spend per model per day
+ - show key alias on usage tab
+ - Allow non-admins to view their activity
+ - Add date picker to new usage tab
+2. Virtual Keys Tab
+ - remove 'default key' on user signup
+ - fix showing user models available for personal key creation
+3. Test Key Tab
+ - Allow testing image generation models
+4. Models Tab
+ - Fix bulk adding models
+ - support reusable credentials for passthrough endpoints
+ - Allow team members to see team models
+5. Teams Tab
+ - Fix json serialization error on update team metadata
+6. Request Logs Tab
+ - Add reasoning_content token tracking across all providers on streaming
+7. API
+ - return key alias on /user/daily/activity [Get Started](../../docs/proxy/cost_tracking#daily-spend-breakdown-api)
+8. SSO
+ - Allow assigning SSO users to teams on MSFT SSO [PR](https://github.com/BerriAI/litellm/pull/9745)
+
+## Logging / Guardrail Integrations
+
+1. Console Logs - Add json formatting for uncaught exceptions [PR](https://github.com/BerriAI/litellm/pull/9619)
+2. Guardrails - AIM Guardrails support for virtual key based policies [Get Started](../../docs/proxy/guardrails/aim_security)
+3. Logging - fix completion start time tracking [PR](https://github.com/BerriAI/litellm/pull/9688)
+4. Prometheus
+ - Allow adding authentication on Prometheus /metrics endpoints [PR](https://github.com/BerriAI/litellm/pull/9766)
+ - Distinguish LLM Provider Exception vs. LiteLLM Exception in metric naming [PR](https://github.com/BerriAI/litellm/pull/9760)
+ - Emit operational metrics for new DB Transaction architecture [PR](https://github.com/BerriAI/litellm/pull/9719)
+
+## Performance / Loadbalancing / Reliability improvements
+1. Preventing Deadlocks
+ - Reduce DB Deadlocks by storing spend updates in Redis and then committing to DB [PR](https://github.com/BerriAI/litellm/pull/9608)
+ - Ensure no deadlocks occur when updating DailyUserSpendTransaction [PR](https://github.com/BerriAI/litellm/pull/9690)
+ - High Traffic fix - ensure new DB + Redis architecture accurately tracks spend [PR](https://github.com/BerriAI/litellm/pull/9673)
+ - Use Redis for PodLock Manager instead of PG (ensures no deadlocks occur) [PR](https://github.com/BerriAI/litellm/pull/9715)
+ - v2 DB Deadlock Reduction Architecture – Add Max Size for In-Memory Queue + Backpressure Mechanism [PR](https://github.com/BerriAI/litellm/pull/9759)
+
+2. Prisma Migrations [Get Started](../../docs/proxy/prod#9-use-prisma-migrate-deploy)
+ - connects litellm proxy to litellm's prisma migration files
+ - Handle db schema updates from new `litellm-proxy-extras` sdk
+3. Redis - support password for sync sentinel clients [PR](https://github.com/BerriAI/litellm/pull/9622)
+4. Fix "Circular reference detected" error when max_parallel_requests = 0 [PR](https://github.com/BerriAI/litellm/pull/9671)
+5. Code QA - Ban hardcoded numbers [PR](https://github.com/BerriAI/litellm/pull/9709)
+
+## Helm
+1. fix: wrong indentation of ttlSecondsAfterFinished in chart [PR](https://github.com/BerriAI/litellm/pull/9611)
+
+## General Proxy Improvements
+1. Fix - only apply service_account_settings.enforced_params on service accounts [PR](https://github.com/BerriAI/litellm/pull/9683)
+2. Fix - handle metadata null on `/chat/completion` [PR](https://github.com/BerriAI/litellm/issues/9717)
+3. Fix - Move daily user transaction logging outside of 'disable_spend_logs' flag, as they’re unrelated [PR](https://github.com/BerriAI/litellm/pull/9772)
+
+## Demo
+
+Try this on the demo instance [today](https://docs.litellm.ai/docs/proxy/demo)
+
+## Complete Git Diff
+
+See the complete git diff since v1.65.0-stable, [here](https://github.com/BerriAI/litellm/releases/tag/v1.65.4-stable)
+
diff --git a/docs/my-website/release_notes/v1.66.0-stable/index.md b/docs/my-website/release_notes/v1.66.0-stable/index.md
new file mode 100644
index 0000000000..939322e031
--- /dev/null
+++ b/docs/my-website/release_notes/v1.66.0-stable/index.md
@@ -0,0 +1,197 @@
+---
+title: v1.66.0-stable - Realtime API Cost Tracking
+slug: v1.66.0-stable
+date: 2025-04-12T10:00:00
+authors:
+ - name: Krrish Dholakia
+ title: CEO, LiteLLM
+ url: https://www.linkedin.com/in/krish-d/
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
+ - name: Ishaan Jaffer
+ title: CTO, LiteLLM
+ url: https://www.linkedin.com/in/reffajnaahsi/
+ image_url: https://pbs.twimg.com/profile_images/1613813310264340481/lz54oEiB_400x400.jpg
+
+tags: ["sso", "unified_file_id", "cost_tracking", "security"]
+hide_table_of_contents: false
+---
+
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+## Deploy this version
+
+
+
+
+``` showLineNumbers title="docker run litellm"
+docker run
+-e STORE_MODEL_IN_DB=True
+-p 4000:4000
+ghcr.io/berriai/litellm:main-v1.66.0-stable
+```
+
+
+
+
+``` showLineNumbers title="pip install litellm"
+pip install litellm==1.66.0.post1
+```
+
+
+
+v1.66.0-stable is live now, here are the key highlights of this release
+
+## Key Highlights
+- **Realtime API Cost Tracking**: Track cost of realtime API calls
+- **Microsoft SSO Auto-sync**: Auto-sync groups and group members from Azure Entra ID to LiteLLM
+- **xAI grok-3**: Added support for `xai/grok-3` models
+- **Security Fixes**: Fixed [CVE-2025-0330](https://www.cve.org/CVERecord?id=CVE-2025-0330) and [CVE-2024-6825](https://www.cve.org/CVERecord?id=CVE-2024-6825) vulnerabilities
+
+Let's dive in.
+
+## Realtime API Cost Tracking
+
+
+
+
+This release adds Realtime API logging + cost tracking.
+- **Logging**: LiteLLM now logs the complete response from realtime calls to all logging integrations (DB, S3, Langfuse, etc.)
+- **Cost Tracking**: You can now set 'base_model' and custom pricing for realtime models. [Custom Pricing](../../docs/proxy/custom_pricing)
+- **Budgets**: Your key/user/team budgets now work for realtime models as well.
+
+Start [here](https://docs.litellm.ai/docs/realtime)
+
+
+
+## Microsoft SSO Auto-sync
+
+
+
+ Auto-sync groups and members from Azure Entra ID to LiteLLM
+
+
+This release adds support for auto-syncing groups and members on Microsoft Entra ID with LiteLLM. This means that LiteLLM proxy administrators can spend less time managing teams and members and LiteLLM handles the following:
+
+- Auto-create teams that exist on Microsoft Entra ID
+- Sync team members on Microsoft Entra ID with LiteLLM teams
+
+Get started with this [here](https://docs.litellm.ai/docs/tutorials/msft_sso)
+
+
+## New Models / Updated Models
+
+- **xAI**
+ 1. Added reasoning_effort support for `xai/grok-3-mini-beta` [Get Started](https://docs.litellm.ai/docs/providers/xai#reasoning-usage)
+ 2. Added cost tracking for `xai/grok-3` models [PR](https://github.com/BerriAI/litellm/pull/9920)
+
+- **Hugging Face**
+ 1. Added inference providers support [Get Started](https://docs.litellm.ai/docs/providers/huggingface#serverless-inference-providers)
+
+- **Azure**
+ 1. Added azure/gpt-4o-realtime-audio cost tracking [PR](https://github.com/BerriAI/litellm/pull/9893)
+
+- **VertexAI**
+ 1. Added enterpriseWebSearch tool support [Get Started](https://docs.litellm.ai/docs/providers/vertex#grounding---web-search)
+ 2. Moved to only passing keys accepted by the Vertex AI response schema [PR](https://github.com/BerriAI/litellm/pull/8992)
+
+- **Google AI Studio**
+ 1. Added cost tracking for `gemini-2.5-pro` [PR](https://github.com/BerriAI/litellm/pull/9837)
+ 2. Fixed pricing for 'gemini/gemini-2.5-pro-preview-03-25' [PR](https://github.com/BerriAI/litellm/pull/9896)
+ 3. Fixed handling file_data being passed in [PR](https://github.com/BerriAI/litellm/pull/9786)
+
+- **Azure**
+ 1. Updated Azure Phi-4 pricing [PR](https://github.com/BerriAI/litellm/pull/9862)
+ 2. Added azure/gpt-4o-realtime-audio cost tracking [PR](https://github.com/BerriAI/litellm/pull/9893)
+
+- **Databricks**
+ 1. Removed reasoning_effort from parameters [PR](https://github.com/BerriAI/litellm/pull/9811)
+ 2. Fixed custom endpoint check for Databricks [PR](https://github.com/BerriAI/litellm/pull/9925)
+
+- **General**
+ 1. Added litellm.supports_reasoning() util to track if an llm supports reasoning [Get Started](https://docs.litellm.ai/docs/providers/anthropic#reasoning)
+ 2. Function Calling - Handle pydantic base model in message tool calls, handle tools = [], and support fake streaming on tool calls for meta.llama3-3-70b-instruct-v1:0 [PR](https://github.com/BerriAI/litellm/pull/9774)
+ 3. LiteLLM Proxy - Allow passing `thinking` param to litellm proxy via client sdk [PR](https://github.com/BerriAI/litellm/pull/9386)
+ 4. Fixed correctly translating 'thinking' param for litellm [PR](https://github.com/BerriAI/litellm/pull/9904)
+
+
+## Spend Tracking Improvements
+- **OpenAI, Azure**
+ 1. Realtime API Cost tracking with token usage metrics in spend logs [Get Started](https://docs.litellm.ai/docs/realtime)
+- **Anthropic**
+ 1. Fixed Claude Haiku cache read pricing per token [PR](https://github.com/BerriAI/litellm/pull/9834)
+ 2. Added cost tracking for Claude responses with base_model [PR](https://github.com/BerriAI/litellm/pull/9897)
+ 3. Fixed Anthropic prompt caching cost calculation and trimmed logged message in db [PR](https://github.com/BerriAI/litellm/pull/9838)
+- **General**
+ 1. Added token tracking and log usage object in spend logs [PR](https://github.com/BerriAI/litellm/pull/9843)
+ 2. Handle custom pricing at deployment level [PR](https://github.com/BerriAI/litellm/pull/9855)
+
+
+## Management Endpoints / UI
+
+- **Test Key Tab**
+ 1. Added rendering of Reasoning content, ttft, usage metrics on test key page [PR](https://github.com/BerriAI/litellm/pull/9931)
+
+
+
+- **Tag / Policy Management**
+ 1. Added Tag/Policy Management. Create routing rules based on request metadata. This allows you to enforce that requests with `tags="private"` only go to specific models. [Get Started](https://docs.litellm.ai/docs/tutorials/tag_management)
+
+
+
+
+
+ Create and manage tags.
+
+- **Redesigned Login Screen**
+ 1. Polished login screen [PR](https://github.com/BerriAI/litellm/pull/9778)
+- **Microsoft SSO Auto-Sync**
+ 1. Added debug route to allow admins to debug SSO JWT fields [PR](https://github.com/BerriAI/litellm/pull/9835)
+ 2. Added ability to use MSFT Graph API to assign users to teams [PR](https://github.com/BerriAI/litellm/pull/9865)
+ 3. Connected litellm to Azure Entra ID Enterprise Application [PR](https://github.com/BerriAI/litellm/pull/9872)
+ 4. Added ability for admins to set `default_team_params` for when litellm SSO creates default teams [PR](https://github.com/BerriAI/litellm/pull/9895)
+ 5. Fixed MSFT SSO to use correct field for user email [PR](https://github.com/BerriAI/litellm/pull/9886)
+ 6. Added UI support for setting Default Team setting when litellm SSO auto creates teams [PR](https://github.com/BerriAI/litellm/pull/9918)
+- **UI Bug Fixes**
+ 1. Prevented team, key, org, model numerical values changing on scrolling [PR](https://github.com/BerriAI/litellm/pull/9776)
+ 2. Instantly reflect key and team updates in UI [PR](https://github.com/BerriAI/litellm/pull/9825)
+
+## Logging / Guardrail Improvements
+
+- **Prometheus**
+ 1. Emit Key and Team Budget metrics on a cron job schedule [Get Started](https://docs.litellm.ai/docs/proxy/prometheus#initialize-budget-metrics-on-startup)
+
+## Security Fixes
+
+- Fixed [CVE-2025-0330](https://www.cve.org/CVERecord?id=CVE-2025-0330) - Leakage of Langfuse API keys in team exception handling [PR](https://github.com/BerriAI/litellm/pull/9830)
+- Fixed [CVE-2024-6825](https://www.cve.org/CVERecord?id=CVE-2024-6825) - Remote code execution in post call rules [PR](https://github.com/BerriAI/litellm/pull/9826)
+
+## Helm
+
+- Added service annotations to litellm-helm chart [PR](https://github.com/BerriAI/litellm/pull/9840)
+- Added extraEnvVars to the helm deployment [PR](https://github.com/BerriAI/litellm/pull/9292)
+
+## Demo
+
+Try this on the demo instance [today](https://docs.litellm.ai/docs/proxy/demo)
+
+## Complete Git Diff
+
+See the complete git diff since v1.65.4-stable, [here](https://github.com/BerriAI/litellm/releases/tag/v1.66.0-stable)
+
+
diff --git a/docs/my-website/release_notes/v1.67.0-stable/index.md b/docs/my-website/release_notes/v1.67.0-stable/index.md
new file mode 100644
index 0000000000..cb7938fce5
--- /dev/null
+++ b/docs/my-website/release_notes/v1.67.0-stable/index.md
@@ -0,0 +1,153 @@
+---
+title: v1.67.0-stable - SCIM Integration
+slug: v1.67.0-stable
+date: 2025-04-19T10:00:00
+authors:
+ - name: Krrish Dholakia
+ title: CEO, LiteLLM
+ url: https://www.linkedin.com/in/krish-d/
+ image_url: https://media.licdn.com/dms/image/v2/D4D03AQGrlsJ3aqpHmQ/profile-displayphoto-shrink_400_400/B4DZSAzgP7HYAg-/0/1737327772964?e=1749686400&v=beta&t=Hkl3U8Ps0VtvNxX0BNNq24b4dtX5wQaPFp6oiKCIHD8
+ - name: Ishaan Jaffer
+ title: CTO, LiteLLM
+ url: https://www.linkedin.com/in/reffajnaahsi/
+ image_url: https://pbs.twimg.com/profile_images/1613813310264340481/lz54oEiB_400x400.jpg
+
+tags: ["sso", "unified_file_id", "cost_tracking", "security"]
+hide_table_of_contents: false
+---
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+## Key Highlights
+
+- **SCIM Integration**: Enables identity providers (Okta, Azure AD, OneLogin, etc.) to automate user and team (group) provisioning, updates, and deprovisioning
+- **Team and Tag based usage tracking**: You can now see usage and spend by team and tag at 1M+ spend logs.
+- **Unified Responses API**: Support for calling Anthropic, Gemini, Groq, etc. via OpenAI's new Responses API.
+
+Let's dive in.
+
+## SCIM Integration
+
+
+
+This release adds SCIM support to LiteLLM. This allows your SSO provider (Okta, Azure AD, etc) to automatically create/delete users, teams, and memberships on LiteLLM. This means that when you remove a team on your SSO provider, your SSO provider will automatically delete the corresponding team on LiteLLM.
+
+[Read more](../../docs/tutorials/scim_litellm)
+## Team and Tag based usage tracking
+
+
+
+
+This release improves team and tag based usage tracking at 1m+ spend logs, making it easy to monitor your LLM API Spend in production. This covers:
+
+- View **daily spend** by teams + tags
+- View **usage / spend by key**, within teams
+- View **spend by multiple tags**
+- Allow **internal users** to view spend of teams they're a member of
+
+[Read more](#management-endpoints--ui)
+
+## Unified Responses API
+
+This release allows you to call Azure OpenAI, Anthropic, AWS Bedrock, and Google Vertex AI models via the POST /v1/responses endpoint on LiteLLM. This means you can now use popular tools like [OpenAI Codex](https://docs.litellm.ai/docs/tutorials/openai_codex) with your own models.
+
+
+
+
+[Read more](https://docs.litellm.ai/docs/response_api)
+
+
+## New Models / Updated Models
+
+- **OpenAI**
+ 1. gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, o3, o3-mini, o4-mini pricing - [Get Started](../../docs/providers/openai#usage), [PR](https://github.com/BerriAI/litellm/pull/9990)
+ 2. o4 - correctly map o4 to openai o_series model
+- **Azure AI**
+ 1. Phi-4 output cost per token fix - [PR](https://github.com/BerriAI/litellm/pull/9880)
+ 2. Responses API support [Get Started](../../docs/providers/azure#azure-responses-api),[PR](https://github.com/BerriAI/litellm/pull/10116)
+- **Anthropic**
+ 1. redacted message thinking support - [Get Started](../../docs/providers/anthropic#usage---thinking--reasoning_content),[PR](https://github.com/BerriAI/litellm/pull/10129)
+- **Cohere**
+ 1. `/v2/chat` Passthrough endpoint support w/ cost tracking - [Get Started](../../docs/pass_through/cohere), [PR](https://github.com/BerriAI/litellm/pull/9997)
+- **Azure**
+ 1. Support azure tenant_id/client_id env vars - [Get Started](../../docs/providers/azure#entra-id---use-tenant_id-client_id-client_secret), [PR](https://github.com/BerriAI/litellm/pull/9993)
+ 2. Fix response_format check for 2025+ api versions - [PR](https://github.com/BerriAI/litellm/pull/9993)
+ 3. Add gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, o3, o3-mini, o4-mini pricing
+- **VLLM**
+ 1. Files - Support 'file' message type for VLLM video url's - [Get Started](../../docs/providers/vllm#send-video-url-to-vllm), [PR](https://github.com/BerriAI/litellm/pull/10129)
+ 2. Passthrough - new `/vllm/` passthrough endpoint support [Get Started](../../docs/pass_through/vllm), [PR](https://github.com/BerriAI/litellm/pull/10002)
+- **Mistral**
+ 1. new `/mistral` passthrough endpoint support [Get Started](../../docs/pass_through/mistral), [PR](https://github.com/BerriAI/litellm/pull/10002)
+- **AWS**
+ 1. New mapped bedrock regions - [PR](https://github.com/BerriAI/litellm/pull/9430)
+- **VertexAI / Google AI Studio**
+ 1. Gemini - Response format - Retain schema field ordering for google gemini and vertex by specifying propertyOrdering - [Get Started](../../docs/providers/vertex#json-schema), [PR](https://github.com/BerriAI/litellm/pull/9828)
+ 2. Gemini-2.5-flash - return reasoning content [Google AI Studio](../../docs/providers/gemini#usage---thinking--reasoning_content), [Vertex AI](../../docs/providers/vertex#thinking--reasoning_content)
+ 3. Gemini-2.5-flash - pricing + model information [PR](https://github.com/BerriAI/litellm/pull/10125)
+ 4. Passthrough - new `/vertex_ai/discovery` route - enables calling AgentBuilder API routes [Get Started](../../docs/pass_through/vertex_ai#supported-api-endpoints), [PR](https://github.com/BerriAI/litellm/pull/10084)
+- **Fireworks AI**
+ 1. return tool calling responses in `tool_calls` field (fireworks incorrectly returns this as a json str in content) [PR](https://github.com/BerriAI/litellm/pull/10130)
+- **Triton**
+ 1. Remove fixed remove bad_words / stop words from `/generate` call - [Get Started](../../docs/providers/triton-inference-server#triton-generate---chat-completion), [PR](https://github.com/BerriAI/litellm/pull/10163)
+- **Other**
+ 1. Support for all litellm providers on Responses API (works with Codex) - [Get Started](../../docs/tutorials/openai_codex), [PR](https://github.com/BerriAI/litellm/pull/10132)
+ 2. Fix combining multiple tool calls in streaming response - [Get Started](../../docs/completion/stream#helper-function), [PR](https://github.com/BerriAI/litellm/pull/10040)
+
+
+## Spend Tracking Improvements
+
+- **Cost Control** - inject cache control points in prompt for cost reduction [Get Started](../../docs/tutorials/prompt_caching), [PR](https://github.com/BerriAI/litellm/pull/10000)
+- **Spend Tags** - spend tags in headers - support x-litellm-tags even if tag based routing not enabled [Get Started](../../docs/proxy/request_headers#litellm-headers), [PR](https://github.com/BerriAI/litellm/pull/10000)
+- **Gemini-2.5-flash** - support cost calculation for reasoning tokens [PR](https://github.com/BerriAI/litellm/pull/10141)
+
+## Management Endpoints / UI
+- **Users**
+ 1. Show created_at and updated_at on users page - [PR](https://github.com/BerriAI/litellm/pull/10033)
+- **Virtual Keys**
+ 1. Filter by key alias - https://github.com/BerriAI/litellm/pull/10085
+- **Usage Tab**
+
+ 1. Team based usage
+
+ - New `LiteLLM_DailyTeamSpend` Table for aggregate team based usage logging - [PR](https://github.com/BerriAI/litellm/pull/10039)
+
+ - New Team based usage dashboard + new `/team/daily/activity` API - [PR](https://github.com/BerriAI/litellm/pull/10081)
+ - Return team alias on /team/daily/activity API - [PR](https://github.com/BerriAI/litellm/pull/10157)
+ - allow internal user view spend for teams they belong to - [PR](https://github.com/BerriAI/litellm/pull/10157)
+ - allow viewing top keys by team - [PR](https://github.com/BerriAI/litellm/pull/10157)
+
+
+
+ 2. Tag Based Usage
+ - New `LiteLLM_DailyTagSpend` Table for aggregate tag based usage logging - [PR](https://github.com/BerriAI/litellm/pull/10071)
+ - Restrict to only Proxy Admins - [PR](https://github.com/BerriAI/litellm/pull/10157)
+ - allow viewing top keys by tag
+ - Return tags passed in request (i.e. dynamic tags) on `/tag/list` API - [PR](https://github.com/BerriAI/litellm/pull/10157)
+
+ 3. Track prompt caching metrics in daily user, team, tag tables - [PR](https://github.com/BerriAI/litellm/pull/10029)
+ 4. Show usage by key (on all up, team, and tag usage dashboards) - [PR](https://github.com/BerriAI/litellm/pull/10157)
+ 5. swap old usage with new usage tab
+- **Models**
+ 1. Make columns resizable/hideable - [PR](https://github.com/BerriAI/litellm/pull/10119)
+- **API Playground**
+ 1. Allow internal user to call api playground - [PR](https://github.com/BerriAI/litellm/pull/10157)
+- **SCIM**
+ 1. Add LiteLLM SCIM Integration for Team and User management - [Get Started](../../docs/tutorials/scim_litellm), [PR](https://github.com/BerriAI/litellm/pull/10072)
+
+
+## Logging / Guardrail Integrations
+- **GCS**
+ 1. Fix gcs pub sub logging with env var GCS_PROJECT_ID - [Get Started](../../docs/observability/gcs_bucket_integration#usage), [PR](https://github.com/BerriAI/litellm/pull/10042)
+- **AIM**
+ 1. Add litellm call id passing to Aim guardrails on pre and post-hooks calls - [Get Started](../../docs/proxy/guardrails/aim_security), [PR](https://github.com/BerriAI/litellm/pull/10021)
+- **Azure blob storage**
+ 1. Ensure logging works in high throughput scenarios - [Get Started](../../docs/proxy/logging#azure-blob-storage), [PR](https://github.com/BerriAI/litellm/pull/9962)
+
+## General Proxy Improvements
+
+- **Support setting `litellm.modify_params` via env var** [PR](https://github.com/BerriAI/litellm/pull/9964)
+- **Model Discovery** - Check provider’s `/models` endpoints when calling proxy’s `/v1/models` endpoint - [Get Started](../../docs/proxy/model_discovery), [PR](https://github.com/BerriAI/litellm/pull/9958)
+- **`/utils/token_counter`** - fix retrieving custom tokenizer for db models - [Get Started](../../docs/proxy/configs#set-custom-tokenizer), [PR](https://github.com/BerriAI/litellm/pull/10047)
+- **Prisma migrate** - handle existing columns in db table - [PR](https://github.com/BerriAI/litellm/pull/10138)
+
diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js
index e8029560b0..c8096a21bb 100644
--- a/docs/my-website/sidebars.js
+++ b/docs/my-website/sidebars.js
@@ -53,7 +53,7 @@ const sidebars = {
{
type: "category",
label: "Architecture",
- items: ["proxy/architecture", "proxy/db_info", "router_architecture", "proxy/user_management_heirarchy", "proxy/jwt_auth_arch"],
+ items: ["proxy/architecture", "proxy/db_info", "proxy/db_deadlocks", "router_architecture", "proxy/user_management_heirarchy", "proxy/jwt_auth_arch", "proxy/image_handling"],
},
{
type: "link",
@@ -69,6 +69,7 @@ const sidebars = {
"proxy/clientside_auth",
"proxy/request_headers",
"proxy/response_headers",
+ "proxy/model_discovery",
],
},
{
@@ -101,6 +102,7 @@ const sidebars = {
"proxy/admin_ui_sso",
"proxy/self_serve",
"proxy/public_teams",
+ "tutorials/scim_litellm",
"proxy/custom_sso",
"proxy/ui_credentials",
"proxy/ui_logs"
@@ -137,15 +139,17 @@ const sidebars = {
label: "[Beta] Guardrails",
items: [
"proxy/guardrails/quick_start",
- "proxy/guardrails/aim_security",
- "proxy/guardrails/aporia_api",
- "proxy/guardrails/bedrock",
- "proxy/guardrails/guardrails_ai",
- "proxy/guardrails/lakera_ai",
- "proxy/guardrails/pii_masking_v2",
- "proxy/guardrails/secret_detection",
- "proxy/guardrails/custom_guardrail",
- "prompt_injection"
+ ...[
+ "proxy/guardrails/aim_security",
+ "proxy/guardrails/aporia_api",
+ "proxy/guardrails/bedrock",
+ "proxy/guardrails/guardrails_ai",
+ "proxy/guardrails/lakera_ai",
+ "proxy/guardrails/pii_masking_v2",
+ "proxy/guardrails/secret_detection",
+ "proxy/guardrails/custom_guardrail",
+ "proxy/guardrails/prompt_injection",
+ ].sort(),
],
},
{
@@ -186,7 +190,15 @@ const sidebars = {
"providers/azure_ai",
"providers/aiml",
"providers/vertex",
- "providers/gemini",
+
+ {
+ type: "category",
+ label: "Google AI Studio",
+ items: [
+ "providers/gemini",
+ "providers/google_ai_studio/files",
+ ]
+ },
"providers/anthropic",
"providers/aws_sagemaker",
"providers/bedrock",
@@ -244,7 +256,9 @@ const sidebars = {
"exception_mapping",
"completion/provider_specific_params",
"guides/finetuned_models",
+ "guides/security_settings",
"completion/audio",
+ "completion/web_search",
"completion/document_understanding",
"completion/vision",
"completion/json_mode",
@@ -294,6 +308,7 @@ const sidebars = {
"text_completion",
"embedding/supported_embedding",
"anthropic_unified",
+ "mcp",
{
type: "category",
label: "/images",
@@ -318,6 +333,8 @@ const sidebars = {
"pass_through/vertex_ai",
"pass_through/google_ai_studio",
"pass_through/cohere",
+ "pass_through/vllm",
+ "pass_through/mistral",
"pass_through/openai_passthrough",
"pass_through/anthropic_completion",
"pass_through/bedrock",
@@ -328,7 +345,15 @@ const sidebars = {
},
"rerank",
"assistants",
- "files_endpoints",
+
+ {
+ type: "category",
+ label: "/files",
+ items: [
+ "files_endpoints",
+ "proxy/litellm_managed_files",
+ ],
+ },
"batches",
"realtime",
"fine_tuning",
@@ -366,8 +391,12 @@ const sidebars = {
],
},
{
- type: "doc",
- id: "proxy/prompt_management"
+ type: "category",
+ label: "[Beta] Prompt Management",
+ items: [
+ "proxy/prompt_management",
+ "proxy/custom_prompt_management"
+ ],
},
{
type: "category",
@@ -383,9 +412,10 @@ const sidebars = {
type: "category",
label: "Logging & Observability",
items: [
+ "observability/agentops_integration",
+ "observability/langfuse_integration",
"observability/lunary_integration",
"observability/mlflow",
- "observability/langfuse_integration",
"observability/gcs_bucket_integration",
"observability/langsmith_integration",
"observability/literalai_integration",
@@ -393,6 +423,7 @@ const sidebars = {
"observability/logfire_integration",
"observability/argilla",
"observability/arize_integration",
+ "observability/phoenix_integration",
"debugging/local_debugging",
"observability/raw_request_response",
"observability/custom_callback",
@@ -418,6 +449,10 @@ const sidebars = {
label: "Tutorials",
items: [
"tutorials/openweb_ui",
+ "tutorials/openai_codex",
+ "tutorials/msft_sso",
+ "tutorials/prompt_caching",
+ "tutorials/tag_management",
'tutorials/litellm_proxy_aporia',
{
type: "category",
diff --git a/docs/my-website/src/components/TransformRequestPlayground.tsx b/docs/my-website/src/components/TransformRequestPlayground.tsx
new file mode 100644
index 0000000000..8f22e5e198
--- /dev/null
+++ b/docs/my-website/src/components/TransformRequestPlayground.tsx
@@ -0,0 +1,161 @@
+import React, { useState } from 'react';
+import styles from './transform_request.module.css';
+
+const DEFAULT_REQUEST = {
+ "model": "bedrock/gpt-4",
+ "messages": [
+ {
+ "role": "system",
+ "content": "You are a helpful assistant."
+ },
+ {
+ "role": "user",
+ "content": "Explain quantum computing in simple terms"
+ }
+ ],
+ "temperature": 0.7,
+ "max_tokens": 500,
+ "stream": true
+};
+
+type ViewMode = 'split' | 'request' | 'transformed';
+
+const TransformRequestPlayground: React.FC = () => {
+ const [request, setRequest] = useState(JSON.stringify(DEFAULT_REQUEST, null, 2));
+ const [transformedRequest, setTransformedRequest] = useState('');
+ const [viewMode, setViewMode] = useState('split');
+
+ const handleTransform = async () => {
+ try {
+ // Here you would make the actual API call to transform the request
+ // For now, we'll just set a sample response
+ const sampleResponse = `curl -X POST \\
+ https://api.openai.com/v1/chat/completions \\
+ -H 'Authorization: Bearer sk-xxx' \\
+ -H 'Content-Type: application/json' \\
+ -d '{
+ "model": "gpt-4",
+ "messages": [
+ {
+ "role": "system",
+ "content": "You are a helpful assistant."
+ }
+ ],
+ "temperature": 0.7
+ }'`;
+ setTransformedRequest(sampleResponse);
+ } catch (error) {
+ console.error('Error transforming request:', error);
+ }
+ };
+
+ const handleCopy = () => {
+ navigator.clipboard.writeText(transformedRequest);
+ };
+
+ const renderContent = () => {
+ switch (viewMode) {
+ case 'request':
+ return (
+
+
+
Original Request
+
The request you would send to LiteLLM /chat/completions endpoint.
+
+
+ );
+ case 'transformed':
+ return (
+
+
+
Transformed Request
+
How LiteLLM transforms your request for the specified provider.
+
Note: Sensitive headers are not shown.
+
+
+
{transformedRequest}
+
+
+
+ );
+ default:
+ return (
+ <>
+
+
+
Original Request
+
The request you would send to LiteLLM /chat/completions endpoint.
+
+
+
+
+
Transformed Request
+
How LiteLLM transforms your request for the specified provider.
+
Note: Sensitive headers are not shown.
+
+
+
{transformedRequest}
+
+
+
+ >
+ );
+ }
+ };
+
+ return (
+
+
+
+
+
+
+
+ {renderContent()}
+
+
+ );
+};
+
+export default TransformRequestPlayground;
\ No newline at end of file
diff --git a/enterprise/enterprise_hooks/secret_detection.py b/enterprise/enterprise_hooks/secret_detection.py
index 459fd374d1..158f26efa3 100644
--- a/enterprise/enterprise_hooks/secret_detection.py
+++ b/enterprise/enterprise_hooks/secret_detection.py
@@ -444,9 +444,7 @@ class _ENTERPRISE_SecretDetection(CustomGuardrail):
detected_secrets = []
for file in secrets.files:
-
for found_secret in secrets[file]:
-
if found_secret.secret_value is None:
continue
detected_secrets.append(
@@ -471,14 +469,12 @@ class _ENTERPRISE_SecretDetection(CustomGuardrail):
data: dict,
call_type: str, # "completion", "embeddings", "image_generation", "moderation"
):
-
if await self.should_run_check(user_api_key_dict) is False:
return
if "messages" in data and isinstance(data["messages"], list):
for message in data["messages"]:
if "content" in message and isinstance(message["content"], str):
-
detected_secrets = self.scan_message_for_secrets(message["content"])
for secret in detected_secrets:
diff --git a/litellm-proxy-extras/LICENSE b/litellm-proxy-extras/LICENSE
new file mode 100644
index 0000000000..3bfef5bae9
--- /dev/null
+++ b/litellm-proxy-extras/LICENSE
@@ -0,0 +1,26 @@
+Portions of this software are licensed as follows:
+
+* All content that resides under the "enterprise/" directory of this repository, if that directory exists, is licensed under the license defined in "enterprise/LICENSE".
+* Content outside of the above mentioned directories or restrictions above is available under the MIT license as defined below.
+---
+MIT License
+
+Copyright (c) 2023 Berri AI
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/litellm-proxy-extras/README.md b/litellm-proxy-extras/README.md
new file mode 100644
index 0000000000..29453f65ba
--- /dev/null
+++ b/litellm-proxy-extras/README.md
@@ -0,0 +1,21 @@
+Additional files for the proxy. Reduces the size of the main litellm package.
+
+Currently, only stores the migration.sql files for litellm-proxy.
+
+To install, run:
+
+```bash
+pip install litellm-proxy-extras
+```
+OR
+
+```bash
+pip install litellm[proxy] # installs litellm-proxy-extras and other proxy dependencies.
+```
+
+To use the migrations, run:
+
+```bash
+litellm --use_prisma_migrate
+```
+
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.0-py3-none-any.whl b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.0-py3-none-any.whl
new file mode 100644
index 0000000000..1aff64ef58
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.0-py3-none-any.whl differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.0.tar.gz b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.0.tar.gz
new file mode 100644
index 0000000000..0bdf828163
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.0.tar.gz differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.1-py3-none-any.whl b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.1-py3-none-any.whl
new file mode 100644
index 0000000000..e2583935a4
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.1-py3-none-any.whl differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.1.tar.gz b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.1.tar.gz
new file mode 100644
index 0000000000..c9111dd9c3
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.1.tar.gz differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.2-py3-none-any.whl b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.2-py3-none-any.whl
new file mode 100644
index 0000000000..a034034c24
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.2-py3-none-any.whl differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.2.tar.gz b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.2.tar.gz
new file mode 100644
index 0000000000..b3157d42cd
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.2.tar.gz differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.3-py3-none-any.whl b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.3-py3-none-any.whl
new file mode 100644
index 0000000000..12f72a933f
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.3-py3-none-any.whl differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.3.tar.gz b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.3.tar.gz
new file mode 100644
index 0000000000..590be31628
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.3.tar.gz differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.4-py3-none-any.whl b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.4-py3-none-any.whl
new file mode 100644
index 0000000000..498d0941ed
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.4-py3-none-any.whl differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.4.tar.gz b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.4.tar.gz
new file mode 100644
index 0000000000..80920457bf
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.4.tar.gz differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.7-py3-none-any.whl b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.7-py3-none-any.whl
new file mode 100644
index 0000000000..cf7b2a1953
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.7-py3-none-any.whl differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.7.tar.gz b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.7.tar.gz
new file mode 100644
index 0000000000..5934d5dfb9
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.7.tar.gz differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.8-py3-none-any.whl b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.8-py3-none-any.whl
new file mode 100644
index 0000000000..b4a2ca73d2
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.8-py3-none-any.whl differ
diff --git a/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.8.tar.gz b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.8.tar.gz
new file mode 100644
index 0000000000..a254112d2b
Binary files /dev/null and b/litellm-proxy-extras/dist/litellm_proxy_extras-0.1.8.tar.gz differ
diff --git a/litellm-proxy-extras/litellm_proxy_extras/__init__.py b/litellm-proxy-extras/litellm_proxy_extras/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/litellm-proxy-extras/litellm_proxy_extras/_logging.py b/litellm-proxy-extras/litellm_proxy_extras/_logging.py
new file mode 100644
index 0000000000..118caecf48
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/_logging.py
@@ -0,0 +1,12 @@
+import logging
+
+# Set up package logger
+logger = logging.getLogger("litellm_proxy_extras")
+if not logger.handlers: # Only add handler if none exists
+ handler = logging.StreamHandler()
+ formatter = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ )
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+ logger.setLevel(logging.INFO)
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250326162113_baseline/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250326162113_baseline/migration.sql
new file mode 100644
index 0000000000..fb8a44814f
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250326162113_baseline/migration.sql
@@ -0,0 +1,360 @@
+-- CreateTable
+CREATE TABLE "LiteLLM_BudgetTable" (
+ "budget_id" TEXT NOT NULL,
+ "max_budget" DOUBLE PRECISION,
+ "soft_budget" DOUBLE PRECISION,
+ "max_parallel_requests" INTEGER,
+ "tpm_limit" BIGINT,
+ "rpm_limit" BIGINT,
+ "model_max_budget" JSONB,
+ "budget_duration" TEXT,
+ "budget_reset_at" TIMESTAMP(3),
+ "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "created_by" TEXT NOT NULL,
+ "updated_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updated_by" TEXT NOT NULL,
+
+ CONSTRAINT "LiteLLM_BudgetTable_pkey" PRIMARY KEY ("budget_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_CredentialsTable" (
+ "credential_id" TEXT NOT NULL,
+ "credential_name" TEXT NOT NULL,
+ "credential_values" JSONB NOT NULL,
+ "credential_info" JSONB,
+ "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "created_by" TEXT NOT NULL,
+ "updated_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updated_by" TEXT NOT NULL,
+
+ CONSTRAINT "LiteLLM_CredentialsTable_pkey" PRIMARY KEY ("credential_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_ProxyModelTable" (
+ "model_id" TEXT NOT NULL,
+ "model_name" TEXT NOT NULL,
+ "litellm_params" JSONB NOT NULL,
+ "model_info" JSONB,
+ "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "created_by" TEXT NOT NULL,
+ "updated_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updated_by" TEXT NOT NULL,
+
+ CONSTRAINT "LiteLLM_ProxyModelTable_pkey" PRIMARY KEY ("model_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_OrganizationTable" (
+ "organization_id" TEXT NOT NULL,
+ "organization_alias" TEXT NOT NULL,
+ "budget_id" TEXT NOT NULL,
+ "metadata" JSONB NOT NULL DEFAULT '{}',
+ "models" TEXT[],
+ "spend" DOUBLE PRECISION NOT NULL DEFAULT 0.0,
+ "model_spend" JSONB NOT NULL DEFAULT '{}',
+ "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "created_by" TEXT NOT NULL,
+ "updated_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updated_by" TEXT NOT NULL,
+
+ CONSTRAINT "LiteLLM_OrganizationTable_pkey" PRIMARY KEY ("organization_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_ModelTable" (
+ "id" SERIAL NOT NULL,
+ "aliases" JSONB,
+ "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "created_by" TEXT NOT NULL,
+ "updated_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updated_by" TEXT NOT NULL,
+
+ CONSTRAINT "LiteLLM_ModelTable_pkey" PRIMARY KEY ("id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_TeamTable" (
+ "team_id" TEXT NOT NULL,
+ "team_alias" TEXT,
+ "organization_id" TEXT,
+ "admins" TEXT[],
+ "members" TEXT[],
+ "members_with_roles" JSONB NOT NULL DEFAULT '{}',
+ "metadata" JSONB NOT NULL DEFAULT '{}',
+ "max_budget" DOUBLE PRECISION,
+ "spend" DOUBLE PRECISION NOT NULL DEFAULT 0.0,
+ "models" TEXT[],
+ "max_parallel_requests" INTEGER,
+ "tpm_limit" BIGINT,
+ "rpm_limit" BIGINT,
+ "budget_duration" TEXT,
+ "budget_reset_at" TIMESTAMP(3),
+ "blocked" BOOLEAN NOT NULL DEFAULT false,
+ "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updated_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "model_spend" JSONB NOT NULL DEFAULT '{}',
+ "model_max_budget" JSONB NOT NULL DEFAULT '{}',
+ "model_id" INTEGER,
+
+ CONSTRAINT "LiteLLM_TeamTable_pkey" PRIMARY KEY ("team_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_UserTable" (
+ "user_id" TEXT NOT NULL,
+ "user_alias" TEXT,
+ "team_id" TEXT,
+ "sso_user_id" TEXT,
+ "organization_id" TEXT,
+ "password" TEXT,
+ "teams" TEXT[] DEFAULT ARRAY[]::TEXT[],
+ "user_role" TEXT,
+ "max_budget" DOUBLE PRECISION,
+ "spend" DOUBLE PRECISION NOT NULL DEFAULT 0.0,
+ "user_email" TEXT,
+ "models" TEXT[],
+ "metadata" JSONB NOT NULL DEFAULT '{}',
+ "max_parallel_requests" INTEGER,
+ "tpm_limit" BIGINT,
+ "rpm_limit" BIGINT,
+ "budget_duration" TEXT,
+ "budget_reset_at" TIMESTAMP(3),
+ "allowed_cache_controls" TEXT[] DEFAULT ARRAY[]::TEXT[],
+ "model_spend" JSONB NOT NULL DEFAULT '{}',
+ "model_max_budget" JSONB NOT NULL DEFAULT '{}',
+ "created_at" TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP,
+ "updated_at" TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP,
+
+ CONSTRAINT "LiteLLM_UserTable_pkey" PRIMARY KEY ("user_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_VerificationToken" (
+ "token" TEXT NOT NULL,
+ "key_name" TEXT,
+ "key_alias" TEXT,
+ "soft_budget_cooldown" BOOLEAN NOT NULL DEFAULT false,
+ "spend" DOUBLE PRECISION NOT NULL DEFAULT 0.0,
+ "expires" TIMESTAMP(3),
+ "models" TEXT[],
+ "aliases" JSONB NOT NULL DEFAULT '{}',
+ "config" JSONB NOT NULL DEFAULT '{}',
+ "user_id" TEXT,
+ "team_id" TEXT,
+ "permissions" JSONB NOT NULL DEFAULT '{}',
+ "max_parallel_requests" INTEGER,
+ "metadata" JSONB NOT NULL DEFAULT '{}',
+ "blocked" BOOLEAN,
+ "tpm_limit" BIGINT,
+ "rpm_limit" BIGINT,
+ "max_budget" DOUBLE PRECISION,
+ "budget_duration" TEXT,
+ "budget_reset_at" TIMESTAMP(3),
+ "allowed_cache_controls" TEXT[] DEFAULT ARRAY[]::TEXT[],
+ "model_spend" JSONB NOT NULL DEFAULT '{}',
+ "model_max_budget" JSONB NOT NULL DEFAULT '{}',
+ "budget_id" TEXT,
+ "organization_id" TEXT,
+ "created_at" TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP,
+ "created_by" TEXT,
+ "updated_at" TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP,
+ "updated_by" TEXT,
+
+ CONSTRAINT "LiteLLM_VerificationToken_pkey" PRIMARY KEY ("token")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_EndUserTable" (
+ "user_id" TEXT NOT NULL,
+ "alias" TEXT,
+ "spend" DOUBLE PRECISION NOT NULL DEFAULT 0.0,
+ "allowed_model_region" TEXT,
+ "default_model" TEXT,
+ "budget_id" TEXT,
+ "blocked" BOOLEAN NOT NULL DEFAULT false,
+
+ CONSTRAINT "LiteLLM_EndUserTable_pkey" PRIMARY KEY ("user_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_Config" (
+ "param_name" TEXT NOT NULL,
+ "param_value" JSONB,
+
+ CONSTRAINT "LiteLLM_Config_pkey" PRIMARY KEY ("param_name")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_SpendLogs" (
+ "request_id" TEXT NOT NULL,
+ "call_type" TEXT NOT NULL,
+ "api_key" TEXT NOT NULL DEFAULT '',
+ "spend" DOUBLE PRECISION NOT NULL DEFAULT 0.0,
+ "total_tokens" INTEGER NOT NULL DEFAULT 0,
+ "prompt_tokens" INTEGER NOT NULL DEFAULT 0,
+ "completion_tokens" INTEGER NOT NULL DEFAULT 0,
+ "startTime" TIMESTAMP(3) NOT NULL,
+ "endTime" TIMESTAMP(3) NOT NULL,
+ "completionStartTime" TIMESTAMP(3),
+ "model" TEXT NOT NULL DEFAULT '',
+ "model_id" TEXT DEFAULT '',
+ "model_group" TEXT DEFAULT '',
+ "custom_llm_provider" TEXT DEFAULT '',
+ "api_base" TEXT DEFAULT '',
+ "user" TEXT DEFAULT '',
+ "metadata" JSONB DEFAULT '{}',
+ "cache_hit" TEXT DEFAULT '',
+ "cache_key" TEXT DEFAULT '',
+ "request_tags" JSONB DEFAULT '[]',
+ "team_id" TEXT,
+ "end_user" TEXT,
+ "requester_ip_address" TEXT,
+ "messages" JSONB DEFAULT '{}',
+ "response" JSONB DEFAULT '{}',
+
+ CONSTRAINT "LiteLLM_SpendLogs_pkey" PRIMARY KEY ("request_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_ErrorLogs" (
+ "request_id" TEXT NOT NULL,
+ "startTime" TIMESTAMP(3) NOT NULL,
+ "endTime" TIMESTAMP(3) NOT NULL,
+ "api_base" TEXT NOT NULL DEFAULT '',
+ "model_group" TEXT NOT NULL DEFAULT '',
+ "litellm_model_name" TEXT NOT NULL DEFAULT '',
+ "model_id" TEXT NOT NULL DEFAULT '',
+ "request_kwargs" JSONB NOT NULL DEFAULT '{}',
+ "exception_type" TEXT NOT NULL DEFAULT '',
+ "exception_string" TEXT NOT NULL DEFAULT '',
+ "status_code" TEXT NOT NULL DEFAULT '',
+
+ CONSTRAINT "LiteLLM_ErrorLogs_pkey" PRIMARY KEY ("request_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_UserNotifications" (
+ "request_id" TEXT NOT NULL,
+ "user_id" TEXT NOT NULL,
+ "models" TEXT[],
+ "justification" TEXT NOT NULL,
+ "status" TEXT NOT NULL,
+
+ CONSTRAINT "LiteLLM_UserNotifications_pkey" PRIMARY KEY ("request_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_TeamMembership" (
+ "user_id" TEXT NOT NULL,
+ "team_id" TEXT NOT NULL,
+ "spend" DOUBLE PRECISION NOT NULL DEFAULT 0.0,
+ "budget_id" TEXT,
+
+ CONSTRAINT "LiteLLM_TeamMembership_pkey" PRIMARY KEY ("user_id","team_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_OrganizationMembership" (
+ "user_id" TEXT NOT NULL,
+ "organization_id" TEXT NOT NULL,
+ "user_role" TEXT,
+ "spend" DOUBLE PRECISION DEFAULT 0.0,
+ "budget_id" TEXT,
+ "created_at" TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP,
+ "updated_at" TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP,
+
+ CONSTRAINT "LiteLLM_OrganizationMembership_pkey" PRIMARY KEY ("user_id","organization_id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_InvitationLink" (
+ "id" TEXT NOT NULL,
+ "user_id" TEXT NOT NULL,
+ "is_accepted" BOOLEAN NOT NULL DEFAULT false,
+ "accepted_at" TIMESTAMP(3),
+ "expires_at" TIMESTAMP(3) NOT NULL,
+ "created_at" TIMESTAMP(3) NOT NULL,
+ "created_by" TEXT NOT NULL,
+ "updated_at" TIMESTAMP(3) NOT NULL,
+ "updated_by" TEXT NOT NULL,
+
+ CONSTRAINT "LiteLLM_InvitationLink_pkey" PRIMARY KEY ("id")
+);
+
+-- CreateTable
+CREATE TABLE "LiteLLM_AuditLog" (
+ "id" TEXT NOT NULL,
+ "updated_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "changed_by" TEXT NOT NULL DEFAULT '',
+ "changed_by_api_key" TEXT NOT NULL DEFAULT '',
+ "action" TEXT NOT NULL,
+ "table_name" TEXT NOT NULL,
+ "object_id" TEXT NOT NULL,
+ "before_value" JSONB,
+ "updated_values" JSONB,
+
+ CONSTRAINT "LiteLLM_AuditLog_pkey" PRIMARY KEY ("id")
+);
+
+-- CreateIndex
+CREATE UNIQUE INDEX "LiteLLM_CredentialsTable_credential_name_key" ON "LiteLLM_CredentialsTable"("credential_name");
+
+-- CreateIndex
+CREATE UNIQUE INDEX "LiteLLM_TeamTable_model_id_key" ON "LiteLLM_TeamTable"("model_id");
+
+-- CreateIndex
+CREATE UNIQUE INDEX "LiteLLM_UserTable_sso_user_id_key" ON "LiteLLM_UserTable"("sso_user_id");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_SpendLogs_startTime_idx" ON "LiteLLM_SpendLogs"("startTime");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_SpendLogs_end_user_idx" ON "LiteLLM_SpendLogs"("end_user");
+
+-- CreateIndex
+CREATE UNIQUE INDEX "LiteLLM_OrganizationMembership_user_id_organization_id_key" ON "LiteLLM_OrganizationMembership"("user_id", "organization_id");
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_OrganizationTable" ADD CONSTRAINT "LiteLLM_OrganizationTable_budget_id_fkey" FOREIGN KEY ("budget_id") REFERENCES "LiteLLM_BudgetTable"("budget_id") ON DELETE RESTRICT ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_TeamTable" ADD CONSTRAINT "LiteLLM_TeamTable_organization_id_fkey" FOREIGN KEY ("organization_id") REFERENCES "LiteLLM_OrganizationTable"("organization_id") ON DELETE SET NULL ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_TeamTable" ADD CONSTRAINT "LiteLLM_TeamTable_model_id_fkey" FOREIGN KEY ("model_id") REFERENCES "LiteLLM_ModelTable"("id") ON DELETE SET NULL ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_UserTable" ADD CONSTRAINT "LiteLLM_UserTable_organization_id_fkey" FOREIGN KEY ("organization_id") REFERENCES "LiteLLM_OrganizationTable"("organization_id") ON DELETE SET NULL ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_VerificationToken" ADD CONSTRAINT "LiteLLM_VerificationToken_budget_id_fkey" FOREIGN KEY ("budget_id") REFERENCES "LiteLLM_BudgetTable"("budget_id") ON DELETE SET NULL ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_VerificationToken" ADD CONSTRAINT "LiteLLM_VerificationToken_organization_id_fkey" FOREIGN KEY ("organization_id") REFERENCES "LiteLLM_OrganizationTable"("organization_id") ON DELETE SET NULL ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_EndUserTable" ADD CONSTRAINT "LiteLLM_EndUserTable_budget_id_fkey" FOREIGN KEY ("budget_id") REFERENCES "LiteLLM_BudgetTable"("budget_id") ON DELETE SET NULL ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_TeamMembership" ADD CONSTRAINT "LiteLLM_TeamMembership_budget_id_fkey" FOREIGN KEY ("budget_id") REFERENCES "LiteLLM_BudgetTable"("budget_id") ON DELETE SET NULL ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_OrganizationMembership" ADD CONSTRAINT "LiteLLM_OrganizationMembership_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "LiteLLM_UserTable"("user_id") ON DELETE RESTRICT ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_OrganizationMembership" ADD CONSTRAINT "LiteLLM_OrganizationMembership_organization_id_fkey" FOREIGN KEY ("organization_id") REFERENCES "LiteLLM_OrganizationTable"("organization_id") ON DELETE RESTRICT ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_OrganizationMembership" ADD CONSTRAINT "LiteLLM_OrganizationMembership_budget_id_fkey" FOREIGN KEY ("budget_id") REFERENCES "LiteLLM_BudgetTable"("budget_id") ON DELETE SET NULL ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_InvitationLink" ADD CONSTRAINT "LiteLLM_InvitationLink_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "LiteLLM_UserTable"("user_id") ON DELETE RESTRICT ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_InvitationLink" ADD CONSTRAINT "LiteLLM_InvitationLink_created_by_fkey" FOREIGN KEY ("created_by") REFERENCES "LiteLLM_UserTable"("user_id") ON DELETE RESTRICT ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "LiteLLM_InvitationLink" ADD CONSTRAINT "LiteLLM_InvitationLink_updated_by_fkey" FOREIGN KEY ("updated_by") REFERENCES "LiteLLM_UserTable"("user_id") ON DELETE RESTRICT ON UPDATE CASCADE;
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250326171002_add_daily_user_table/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250326171002_add_daily_user_table/migration.sql
new file mode 100644
index 0000000000..3379d8e9fd
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250326171002_add_daily_user_table/migration.sql
@@ -0,0 +1,33 @@
+-- CreateTable
+CREATE TABLE "LiteLLM_DailyUserSpend" (
+ "id" TEXT NOT NULL,
+ "user_id" TEXT NOT NULL,
+ "date" TEXT NOT NULL,
+ "api_key" TEXT NOT NULL,
+ "model" TEXT NOT NULL,
+ "model_group" TEXT,
+ "custom_llm_provider" TEXT,
+ "prompt_tokens" INTEGER NOT NULL DEFAULT 0,
+ "completion_tokens" INTEGER NOT NULL DEFAULT 0,
+ "spend" DOUBLE PRECISION NOT NULL DEFAULT 0.0,
+ "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updated_at" TIMESTAMP(3) NOT NULL,
+
+ CONSTRAINT "LiteLLM_DailyUserSpend_pkey" PRIMARY KEY ("id")
+);
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyUserSpend_date_idx" ON "LiteLLM_DailyUserSpend"("date");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyUserSpend_user_id_idx" ON "LiteLLM_DailyUserSpend"("user_id");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyUserSpend_api_key_idx" ON "LiteLLM_DailyUserSpend"("api_key");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyUserSpend_model_idx" ON "LiteLLM_DailyUserSpend"("model");
+
+-- CreateIndex
+CREATE UNIQUE INDEX "LiteLLM_DailyUserSpend_user_id_date_api_key_model_custom_ll_key" ON "LiteLLM_DailyUserSpend"("user_id", "date", "api_key", "model", "custom_llm_provider");
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250327180120_add_api_requests_to_daily_user_table/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250327180120_add_api_requests_to_daily_user_table/migration.sql
new file mode 100644
index 0000000000..e7c5ab566a
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250327180120_add_api_requests_to_daily_user_table/migration.sql
@@ -0,0 +1,3 @@
+-- AlterTable
+ALTER TABLE "LiteLLM_DailyUserSpend" ADD COLUMN "api_requests" INTEGER NOT NULL DEFAULT 0;
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250329084805_new_cron_job_table/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250329084805_new_cron_job_table/migration.sql
new file mode 100644
index 0000000000..e7ea2e9015
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250329084805_new_cron_job_table/migration.sql
@@ -0,0 +1,14 @@
+-- CreateEnum
+CREATE TYPE "JobStatus" AS ENUM ('ACTIVE', 'INACTIVE');
+
+-- CreateTable
+CREATE TABLE "LiteLLM_CronJob" (
+ "cronjob_id" TEXT NOT NULL,
+ "pod_id" TEXT NOT NULL,
+ "status" "JobStatus" NOT NULL DEFAULT 'INACTIVE',
+ "last_updated" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "ttl" TIMESTAMP(3) NOT NULL,
+
+ CONSTRAINT "LiteLLM_CronJob_pkey" PRIMARY KEY ("cronjob_id")
+);
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250331215456_track_success_and_failed_requests_daily_agg_table/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250331215456_track_success_and_failed_requests_daily_agg_table/migration.sql
new file mode 100644
index 0000000000..9f1693500d
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250331215456_track_success_and_failed_requests_daily_agg_table/migration.sql
@@ -0,0 +1,4 @@
+-- AlterTable
+ALTER TABLE "LiteLLM_DailyUserSpend" ADD COLUMN "failed_requests" INTEGER NOT NULL DEFAULT 0,
+ADD COLUMN "successful_requests" INTEGER NOT NULL DEFAULT 0;
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250411215431_add_managed_file_table/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250411215431_add_managed_file_table/migration.sql
new file mode 100644
index 0000000000..d14a629458
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250411215431_add_managed_file_table/migration.sql
@@ -0,0 +1,18 @@
+-- CreateTable
+CREATE TABLE "LiteLLM_ManagedFileTable" (
+ "id" TEXT NOT NULL,
+ "unified_file_id" TEXT NOT NULL,
+ "file_object" JSONB NOT NULL,
+ "model_mappings" JSONB NOT NULL,
+ "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updated_at" TIMESTAMP(3) NOT NULL,
+
+ CONSTRAINT "LiteLLM_ManagedFileTable_pkey" PRIMARY KEY ("id")
+);
+
+-- CreateIndex
+CREATE UNIQUE INDEX "LiteLLM_ManagedFileTable_unified_file_id_key" ON "LiteLLM_ManagedFileTable"("unified_file_id");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_ManagedFileTable_unified_file_id_idx" ON "LiteLLM_ManagedFileTable"("unified_file_id");
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250412081753_team_member_permissions/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250412081753_team_member_permissions/migration.sql
new file mode 100644
index 0000000000..c07df81379
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250412081753_team_member_permissions/migration.sql
@@ -0,0 +1,3 @@
+-- AlterTable
+ALTER TABLE "LiteLLM_TeamTable" ADD COLUMN "team_member_permissions" TEXT[] DEFAULT ARRAY[]::TEXT[];
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250415151647_add_cache_read_write_tokens_daily_spend_transactions/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250415151647_add_cache_read_write_tokens_daily_spend_transactions/migration.sql
new file mode 100644
index 0000000000..f47e1c2e91
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250415151647_add_cache_read_write_tokens_daily_spend_transactions/migration.sql
@@ -0,0 +1,4 @@
+-- AlterTable
+ALTER TABLE "LiteLLM_DailyUserSpend" ADD COLUMN "cache_creation_input_tokens" INTEGER NOT NULL DEFAULT 0,
+ADD COLUMN "cache_read_input_tokens" INTEGER NOT NULL DEFAULT 0;
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250415191926_add_daily_team_table/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250415191926_add_daily_team_table/migration.sql
new file mode 100644
index 0000000000..a6eb461bc2
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250415191926_add_daily_team_table/migration.sql
@@ -0,0 +1,36 @@
+-- CreateTable
+CREATE TABLE "LiteLLM_DailyTeamSpend" (
+ "id" TEXT NOT NULL,
+ "team_id" TEXT NOT NULL,
+ "date" TEXT NOT NULL,
+ "api_key" TEXT NOT NULL,
+ "model" TEXT NOT NULL,
+ "model_group" TEXT,
+ "custom_llm_provider" TEXT,
+ "prompt_tokens" INTEGER NOT NULL DEFAULT 0,
+ "completion_tokens" INTEGER NOT NULL DEFAULT 0,
+ "spend" DOUBLE PRECISION NOT NULL DEFAULT 0.0,
+ "api_requests" INTEGER NOT NULL DEFAULT 0,
+ "successful_requests" INTEGER NOT NULL DEFAULT 0,
+ "failed_requests" INTEGER NOT NULL DEFAULT 0,
+ "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updated_at" TIMESTAMP(3) NOT NULL,
+
+ CONSTRAINT "LiteLLM_DailyTeamSpend_pkey" PRIMARY KEY ("id")
+);
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyTeamSpend_date_idx" ON "LiteLLM_DailyTeamSpend"("date");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyTeamSpend_team_id_idx" ON "LiteLLM_DailyTeamSpend"("team_id");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyTeamSpend_api_key_idx" ON "LiteLLM_DailyTeamSpend"("api_key");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyTeamSpend_model_idx" ON "LiteLLM_DailyTeamSpend"("model");
+
+-- CreateIndex
+CREATE UNIQUE INDEX "LiteLLM_DailyTeamSpend_team_id_date_api_key_model_custom_ll_key" ON "LiteLLM_DailyTeamSpend"("team_id", "date", "api_key", "model", "custom_llm_provider");
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416115320_add_tag_table_to_db/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416115320_add_tag_table_to_db/migration.sql
new file mode 100644
index 0000000000..8c3cea7093
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416115320_add_tag_table_to_db/migration.sql
@@ -0,0 +1,45 @@
+-- AlterTable
+ALTER TABLE "LiteLLM_DailyTeamSpend" ADD COLUMN "cache_creation_input_tokens" INTEGER NOT NULL DEFAULT 0,
+ADD COLUMN "cache_read_input_tokens" INTEGER NOT NULL DEFAULT 0;
+
+-- CreateTable
+CREATE TABLE "LiteLLM_DailyTagSpend" (
+ "id" TEXT NOT NULL,
+ "tag" TEXT NOT NULL,
+ "date" TEXT NOT NULL,
+ "api_key" TEXT NOT NULL,
+ "model" TEXT NOT NULL,
+ "model_group" TEXT,
+ "custom_llm_provider" TEXT,
+ "prompt_tokens" INTEGER NOT NULL DEFAULT 0,
+ "completion_tokens" INTEGER NOT NULL DEFAULT 0,
+ "cache_read_input_tokens" INTEGER NOT NULL DEFAULT 0,
+ "cache_creation_input_tokens" INTEGER NOT NULL DEFAULT 0,
+ "spend" DOUBLE PRECISION NOT NULL DEFAULT 0.0,
+ "api_requests" INTEGER NOT NULL DEFAULT 0,
+ "successful_requests" INTEGER NOT NULL DEFAULT 0,
+ "failed_requests" INTEGER NOT NULL DEFAULT 0,
+ "created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updated_at" TIMESTAMP(3) NOT NULL,
+
+ CONSTRAINT "LiteLLM_DailyTagSpend_pkey" PRIMARY KEY ("id")
+);
+
+-- CreateIndex
+CREATE UNIQUE INDEX "LiteLLM_DailyTagSpend_tag_key" ON "LiteLLM_DailyTagSpend"("tag");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyTagSpend_date_idx" ON "LiteLLM_DailyTagSpend"("date");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyTagSpend_tag_idx" ON "LiteLLM_DailyTagSpend"("tag");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyTagSpend_api_key_idx" ON "LiteLLM_DailyTagSpend"("api_key");
+
+-- CreateIndex
+CREATE INDEX "LiteLLM_DailyTagSpend_model_idx" ON "LiteLLM_DailyTagSpend"("model");
+
+-- CreateIndex
+CREATE UNIQUE INDEX "LiteLLM_DailyTagSpend_tag_date_api_key_model_custom_llm_pro_key" ON "LiteLLM_DailyTagSpend"("tag", "date", "api_key", "model", "custom_llm_provider");
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416151339_drop_tag_uniqueness_requirement/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416151339_drop_tag_uniqueness_requirement/migration.sql
new file mode 100644
index 0000000000..5c27b84efb
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416151339_drop_tag_uniqueness_requirement/migration.sql
@@ -0,0 +1,3 @@
+-- DropIndex
+DROP INDEX "LiteLLM_DailyTagSpend_tag_key";
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416185146_add_allowed_routes_litellm_verification_token/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416185146_add_allowed_routes_litellm_verification_token/migration.sql
new file mode 100644
index 0000000000..2ee7838dcf
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416185146_add_allowed_routes_litellm_verification_token/migration.sql
@@ -0,0 +1,3 @@
+-- AlterTable
+ALTER TABLE "LiteLLM_VerificationToken" ADD COLUMN "allowed_routes" TEXT[] DEFAULT ARRAY[]::TEXT[];
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/migration_lock.toml b/litellm-proxy-extras/litellm_proxy_extras/migrations/migration_lock.toml
new file mode 100644
index 0000000000..2fe25d87cc
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/migration_lock.toml
@@ -0,0 +1 @@
+provider = "postgresql"
diff --git a/litellm-proxy-extras/litellm_proxy_extras/schema.prisma b/litellm-proxy-extras/litellm_proxy_extras/schema.prisma
new file mode 100644
index 0000000000..68e9382d75
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/schema.prisma
@@ -0,0 +1,425 @@
+datasource client {
+ provider = "postgresql"
+ url = env("DATABASE_URL")
+}
+
+generator client {
+ provider = "prisma-client-py"
+}
+
+// Budget / Rate Limits for an org
+model LiteLLM_BudgetTable {
+ budget_id String @id @default(uuid())
+ max_budget Float?
+ soft_budget Float?
+ max_parallel_requests Int?
+ tpm_limit BigInt?
+ rpm_limit BigInt?
+ model_max_budget Json?
+ budget_duration String?
+ budget_reset_at DateTime?
+ created_at DateTime @default(now()) @map("created_at")
+ created_by String
+ updated_at DateTime @default(now()) @updatedAt @map("updated_at")
+ updated_by String
+ organization LiteLLM_OrganizationTable[] // multiple orgs can have the same budget
+ keys LiteLLM_VerificationToken[] // multiple keys can have the same budget
+ end_users LiteLLM_EndUserTable[] // multiple end-users can have the same budget
+ team_membership LiteLLM_TeamMembership[] // budgets of Users within a Team
+ organization_membership LiteLLM_OrganizationMembership[] // budgets of Users within a Organization
+}
+
+// Models on proxy
+model LiteLLM_CredentialsTable {
+ credential_id String @id @default(uuid())
+ credential_name String @unique
+ credential_values Json
+ credential_info Json?
+ created_at DateTime @default(now()) @map("created_at")
+ created_by String
+ updated_at DateTime @default(now()) @updatedAt @map("updated_at")
+ updated_by String
+}
+
+// Models on proxy
+model LiteLLM_ProxyModelTable {
+ model_id String @id @default(uuid())
+ model_name String
+ litellm_params Json
+ model_info Json?
+ created_at DateTime @default(now()) @map("created_at")
+ created_by String
+ updated_at DateTime @default(now()) @updatedAt @map("updated_at")
+ updated_by String
+}
+
+model LiteLLM_OrganizationTable {
+ organization_id String @id @default(uuid())
+ organization_alias String
+ budget_id String
+ metadata Json @default("{}")
+ models String[]
+ spend Float @default(0.0)
+ model_spend Json @default("{}")
+ created_at DateTime @default(now()) @map("created_at")
+ created_by String
+ updated_at DateTime @default(now()) @updatedAt @map("updated_at")
+ updated_by String
+ litellm_budget_table LiteLLM_BudgetTable? @relation(fields: [budget_id], references: [budget_id])
+ teams LiteLLM_TeamTable[]
+ users LiteLLM_UserTable[]
+ keys LiteLLM_VerificationToken[]
+ members LiteLLM_OrganizationMembership[] @relation("OrganizationToMembership")
+}
+
+// Model info for teams, just has model aliases for now.
+model LiteLLM_ModelTable {
+ id Int @id @default(autoincrement())
+ model_aliases Json? @map("aliases")
+ created_at DateTime @default(now()) @map("created_at")
+ created_by String
+ updated_at DateTime @default(now()) @updatedAt @map("updated_at")
+ updated_by String
+ team LiteLLM_TeamTable?
+}
+
+
+// Assign prod keys to groups, not individuals
+model LiteLLM_TeamTable {
+ team_id String @id @default(uuid())
+ team_alias String?
+ organization_id String?
+ admins String[]
+ members String[]
+ members_with_roles Json @default("{}")
+ metadata Json @default("{}")
+ max_budget Float?
+ spend Float @default(0.0)
+ models String[]
+ max_parallel_requests Int?
+ tpm_limit BigInt?
+ rpm_limit BigInt?
+ budget_duration String?
+ budget_reset_at DateTime?
+ blocked Boolean @default(false)
+ created_at DateTime @default(now()) @map("created_at")
+ updated_at DateTime @default(now()) @updatedAt @map("updated_at")
+ model_spend Json @default("{}")
+ model_max_budget Json @default("{}")
+ team_member_permissions String[] @default([])
+ model_id Int? @unique // id for LiteLLM_ModelTable -> stores team-level model aliases
+ litellm_organization_table LiteLLM_OrganizationTable? @relation(fields: [organization_id], references: [organization_id])
+ litellm_model_table LiteLLM_ModelTable? @relation(fields: [model_id], references: [id])
+}
+
+// Track spend, rate limit, budget Users
+model LiteLLM_UserTable {
+ user_id String @id
+ user_alias String?
+ team_id String?
+ sso_user_id String? @unique
+ organization_id String?
+ password String?
+ teams String[] @default([])
+ user_role String?
+ max_budget Float?
+ spend Float @default(0.0)
+ user_email String?
+ models String[]
+ metadata Json @default("{}")
+ max_parallel_requests Int?
+ tpm_limit BigInt?
+ rpm_limit BigInt?
+ budget_duration String?
+ budget_reset_at DateTime?
+ allowed_cache_controls String[] @default([])
+ model_spend Json @default("{}")
+ model_max_budget Json @default("{}")
+ created_at DateTime? @default(now()) @map("created_at")
+ updated_at DateTime? @default(now()) @updatedAt @map("updated_at")
+
+ // relations
+ litellm_organization_table LiteLLM_OrganizationTable? @relation(fields: [organization_id], references: [organization_id])
+ organization_memberships LiteLLM_OrganizationMembership[]
+ invitations_created LiteLLM_InvitationLink[] @relation("CreatedBy")
+ invitations_updated LiteLLM_InvitationLink[] @relation("UpdatedBy")
+ invitations_user LiteLLM_InvitationLink[] @relation("UserId")
+}
+
+// Generate Tokens for Proxy
+model LiteLLM_VerificationToken {
+ token String @id
+ key_name String?
+ key_alias String?
+ soft_budget_cooldown Boolean @default(false) // key-level state on if budget alerts need to be cooled down
+ spend Float @default(0.0)
+ expires DateTime?
+ models String[]
+ aliases Json @default("{}")
+ config Json @default("{}")
+ user_id String?
+ team_id String?
+ permissions Json @default("{}")
+ max_parallel_requests Int?
+ metadata Json @default("{}")
+ blocked Boolean?
+ tpm_limit BigInt?
+ rpm_limit BigInt?
+ max_budget Float?
+ budget_duration String?
+ budget_reset_at DateTime?
+ allowed_cache_controls String[] @default([])
+ allowed_routes String[] @default([])
+ model_spend Json @default("{}")
+ model_max_budget Json @default("{}")
+ budget_id String?
+ organization_id String?
+ created_at DateTime? @default(now()) @map("created_at")
+ created_by String?
+ updated_at DateTime? @default(now()) @updatedAt @map("updated_at")
+ updated_by String?
+ litellm_budget_table LiteLLM_BudgetTable? @relation(fields: [budget_id], references: [budget_id])
+ litellm_organization_table LiteLLM_OrganizationTable? @relation(fields: [organization_id], references: [organization_id])
+}
+
+model LiteLLM_EndUserTable {
+ user_id String @id
+ alias String? // admin-facing alias
+ spend Float @default(0.0)
+ allowed_model_region String? // require all user requests to use models in this specific region
+ default_model String? // use along with 'allowed_model_region'. if no available model in region, default to this model.
+ budget_id String?
+ litellm_budget_table LiteLLM_BudgetTable? @relation(fields: [budget_id], references: [budget_id])
+ blocked Boolean @default(false)
+}
+
+// store proxy config.yaml
+model LiteLLM_Config {
+ param_name String @id
+ param_value Json?
+}
+
+// View spend, model, api_key per request
+model LiteLLM_SpendLogs {
+ request_id String @id
+ call_type String
+ api_key String @default ("") // Hashed API Token. Not the actual Virtual Key. Equivalent to 'token' column in LiteLLM_VerificationToken
+ spend Float @default(0.0)
+ total_tokens Int @default(0)
+ prompt_tokens Int @default(0)
+ completion_tokens Int @default(0)
+ startTime DateTime // Assuming start_time is a DateTime field
+ endTime DateTime // Assuming end_time is a DateTime field
+ completionStartTime DateTime? // Assuming completionStartTime is a DateTime field
+ model String @default("")
+ model_id String? @default("") // the model id stored in proxy model db
+ model_group String? @default("") // public model_name / model_group
+ custom_llm_provider String? @default("") // litellm used custom_llm_provider
+ api_base String? @default("")
+ user String? @default("")
+ metadata Json? @default("{}")
+ cache_hit String? @default("")
+ cache_key String? @default("")
+ request_tags Json? @default("[]")
+ team_id String?
+ end_user String?
+ requester_ip_address String?
+ messages Json? @default("{}")
+ response Json? @default("{}")
+ @@index([startTime])
+ @@index([end_user])
+}
+
+// View spend, model, api_key per request
+model LiteLLM_ErrorLogs {
+ request_id String @id @default(uuid())
+ startTime DateTime // Assuming start_time is a DateTime field
+ endTime DateTime // Assuming end_time is a DateTime field
+ api_base String @default("")
+ model_group String @default("") // public model_name / model_group
+ litellm_model_name String @default("") // model passed to litellm
+ model_id String @default("") // ID of model in ProxyModelTable
+ request_kwargs Json @default("{}")
+ exception_type String @default("")
+ exception_string String @default("")
+ status_code String @default("")
+}
+
+// Beta - allow team members to request access to a model
+model LiteLLM_UserNotifications {
+ request_id String @id
+ user_id String
+ models String[]
+ justification String
+ status String // approved, disapproved, pending
+}
+
+model LiteLLM_TeamMembership {
+ // Use this table to track the Internal User's Spend within a Team + Set Budgets, rpm limits for the user within the team
+ user_id String
+ team_id String
+ spend Float @default(0.0)
+ budget_id String?
+ litellm_budget_table LiteLLM_BudgetTable? @relation(fields: [budget_id], references: [budget_id])
+ @@id([user_id, team_id])
+}
+
+model LiteLLM_OrganizationMembership {
+ // Use this table to track Internal User and Organization membership. Helps tracking a users role within an Organization
+ user_id String
+ organization_id String
+ user_role String?
+ spend Float? @default(0.0)
+ budget_id String?
+ created_at DateTime? @default(now()) @map("created_at")
+ updated_at DateTime? @default(now()) @updatedAt @map("updated_at")
+
+ // relations
+ user LiteLLM_UserTable @relation(fields: [user_id], references: [user_id])
+ organization LiteLLM_OrganizationTable @relation("OrganizationToMembership", fields: [organization_id], references: [organization_id])
+ litellm_budget_table LiteLLM_BudgetTable? @relation(fields: [budget_id], references: [budget_id])
+
+
+
+ @@id([user_id, organization_id])
+ @@unique([user_id, organization_id])
+}
+
+model LiteLLM_InvitationLink {
+ // use this table to track invite links sent by admin for people to join the proxy
+ id String @id @default(uuid())
+ user_id String
+ is_accepted Boolean @default(false)
+ accepted_at DateTime? // when link is claimed (user successfully onboards via link)
+ expires_at DateTime // till when is link valid
+ created_at DateTime // when did admin create the link
+ created_by String // who created the link
+ updated_at DateTime // when was invite status updated
+ updated_by String // who updated the status (admin/user who accepted invite)
+
+ // Relations
+ liteLLM_user_table_user LiteLLM_UserTable @relation("UserId", fields: [user_id], references: [user_id])
+ liteLLM_user_table_created LiteLLM_UserTable @relation("CreatedBy", fields: [created_by], references: [user_id])
+ liteLLM_user_table_updated LiteLLM_UserTable @relation("UpdatedBy", fields: [updated_by], references: [user_id])
+}
+
+
+model LiteLLM_AuditLog {
+ id String @id @default(uuid())
+ updated_at DateTime @default(now())
+ changed_by String @default("") // user or system that performed the action
+ changed_by_api_key String @default("") // api key hash that performed the action
+ action String // create, update, delete
+ table_name String // on of LitellmTableNames.TEAM_TABLE_NAME, LitellmTableNames.USER_TABLE_NAME, LitellmTableNames.PROXY_MODEL_TABLE_NAME,
+ object_id String // id of the object being audited. This can be the key id, team id, user id, model id
+ before_value Json? // value of the row
+ updated_values Json? // value of the row after change
+}
+
+// Track daily user spend metrics per model and key
+model LiteLLM_DailyUserSpend {
+ id String @id @default(uuid())
+ user_id String
+ date String
+ api_key String
+ model String
+ model_group String?
+ custom_llm_provider String?
+ prompt_tokens Int @default(0)
+ completion_tokens Int @default(0)
+ cache_read_input_tokens Int @default(0)
+ cache_creation_input_tokens Int @default(0)
+ spend Float @default(0.0)
+ api_requests Int @default(0)
+ successful_requests Int @default(0)
+ failed_requests Int @default(0)
+ created_at DateTime @default(now())
+ updated_at DateTime @updatedAt
+
+ @@unique([user_id, date, api_key, model, custom_llm_provider])
+ @@index([date])
+ @@index([user_id])
+ @@index([api_key])
+ @@index([model])
+}
+
+// Track daily team spend metrics per model and key
+model LiteLLM_DailyTeamSpend {
+ id String @id @default(uuid())
+ team_id String
+ date String
+ api_key String
+ model String
+ model_group String?
+ custom_llm_provider String?
+ prompt_tokens Int @default(0)
+ completion_tokens Int @default(0)
+ cache_read_input_tokens Int @default(0)
+ cache_creation_input_tokens Int @default(0)
+ spend Float @default(0.0)
+ api_requests Int @default(0)
+ successful_requests Int @default(0)
+ failed_requests Int @default(0)
+ created_at DateTime @default(now())
+ updated_at DateTime @updatedAt
+
+ @@unique([team_id, date, api_key, model, custom_llm_provider])
+ @@index([date])
+ @@index([team_id])
+ @@index([api_key])
+ @@index([model])
+}
+
+// Track daily team spend metrics per model and key
+model LiteLLM_DailyTagSpend {
+ id String @id @default(uuid())
+ tag String
+ date String
+ api_key String
+ model String
+ model_group String?
+ custom_llm_provider String?
+ prompt_tokens Int @default(0)
+ completion_tokens Int @default(0)
+ cache_read_input_tokens Int @default(0)
+ cache_creation_input_tokens Int @default(0)
+ spend Float @default(0.0)
+ api_requests Int @default(0)
+ successful_requests Int @default(0)
+ failed_requests Int @default(0)
+ created_at DateTime @default(now())
+ updated_at DateTime @updatedAt
+
+ @@unique([tag, date, api_key, model, custom_llm_provider])
+ @@index([date])
+ @@index([tag])
+ @@index([api_key])
+ @@index([model])
+}
+
+
+// Track the status of cron jobs running. Only allow one pod to run the job at a time
+model LiteLLM_CronJob {
+ cronjob_id String @id @default(cuid()) // Unique ID for the record
+ pod_id String // Unique identifier for the pod acting as the leader
+ status JobStatus @default(INACTIVE) // Status of the cron job (active or inactive)
+ last_updated DateTime @default(now()) // Timestamp for the last update of the cron job record
+ ttl DateTime // Time when the leader's lease expires
+}
+
+enum JobStatus {
+ ACTIVE
+ INACTIVE
+}
+
+model LiteLLM_ManagedFileTable {
+ id String @id @default(uuid())
+ unified_file_id String @unique // The base64 encoded unified file ID
+ file_object Json // Stores the OpenAIFileObject
+ model_mappings Json // Stores the mapping of model_id -> provider_file_id
+ created_at DateTime @default(now())
+ updated_at DateTime @updatedAt
+
+ @@index([unified_file_id])
+}
+
diff --git a/litellm-proxy-extras/litellm_proxy_extras/utils.py b/litellm-proxy-extras/litellm_proxy_extras/utils.py
new file mode 100644
index 0000000000..e771e48e45
--- /dev/null
+++ b/litellm-proxy-extras/litellm_proxy_extras/utils.py
@@ -0,0 +1,252 @@
+import glob
+import os
+import random
+import re
+import subprocess
+import time
+from pathlib import Path
+from typing import Optional
+
+from litellm_proxy_extras._logging import logger
+
+
+def str_to_bool(value: Optional[str]) -> bool:
+ if value is None:
+ return False
+ return value.lower() in ("true", "1", "t", "y", "yes")
+
+
+class ProxyExtrasDBManager:
+ @staticmethod
+ def _get_prisma_dir() -> str:
+ """Get the path to the migrations directory"""
+ migrations_dir = os.path.dirname(__file__)
+ return migrations_dir
+
+ @staticmethod
+ def _create_baseline_migration(schema_path: str) -> bool:
+ """Create a baseline migration for an existing database"""
+ prisma_dir = ProxyExtrasDBManager._get_prisma_dir()
+ prisma_dir_path = Path(prisma_dir)
+ init_dir = prisma_dir_path / "migrations" / "0_init"
+
+ # Create migrations/0_init directory
+ init_dir.mkdir(parents=True, exist_ok=True)
+
+ # Generate migration SQL file
+ migration_file = init_dir / "migration.sql"
+
+ try:
+ # Generate migration diff with increased timeout
+ subprocess.run(
+ [
+ "prisma",
+ "migrate",
+ "diff",
+ "--from-empty",
+ "--to-schema-datamodel",
+ str(schema_path),
+ "--script",
+ ],
+ stdout=open(migration_file, "w"),
+ check=True,
+ timeout=30,
+ ) # 30 second timeout
+
+ # Mark migration as applied with increased timeout
+ subprocess.run(
+ [
+ "prisma",
+ "migrate",
+ "resolve",
+ "--applied",
+ "0_init",
+ ],
+ check=True,
+ timeout=30,
+ )
+
+ return True
+ except subprocess.TimeoutExpired:
+ logger.warning(
+ "Migration timed out - the database might be under heavy load."
+ )
+ return False
+ except subprocess.CalledProcessError as e:
+ logger.warning(f"Error creating baseline migration: {e}")
+ return False
+
+ @staticmethod
+ def _get_migration_names(migrations_dir: str) -> list:
+ """Get all migration directory names from the migrations folder"""
+ migration_paths = glob.glob(f"{migrations_dir}/migrations/*/migration.sql")
+ logger.info(f"Found {len(migration_paths)} migrations at {migrations_dir}")
+ return [Path(p).parent.name for p in migration_paths]
+
+ @staticmethod
+ def _roll_back_migration(migration_name: str):
+ """Mark a specific migration as rolled back"""
+ subprocess.run(
+ ["prisma", "migrate", "resolve", "--rolled-back", migration_name],
+ timeout=60,
+ check=True,
+ capture_output=True,
+ )
+
+ @staticmethod
+ def _resolve_specific_migration(migration_name: str):
+ """Mark a specific migration as applied"""
+ subprocess.run(
+ ["prisma", "migrate", "resolve", "--applied", migration_name],
+ timeout=60,
+ check=True,
+ capture_output=True,
+ )
+
+ @staticmethod
+ def _resolve_all_migrations(migrations_dir: str):
+ """Mark all existing migrations as applied"""
+ migration_names = ProxyExtrasDBManager._get_migration_names(migrations_dir)
+ logger.info(f"Resolving {len(migration_names)} migrations")
+ for migration_name in migration_names:
+ try:
+ logger.info(f"Resolving migration: {migration_name}")
+ subprocess.run(
+ ["prisma", "migrate", "resolve", "--applied", migration_name],
+ timeout=60,
+ check=True,
+ capture_output=True,
+ text=True,
+ )
+ logger.debug(f"Resolved migration: {migration_name}")
+ except subprocess.CalledProcessError as e:
+ if "is already recorded as applied in the database." not in e.stderr:
+ logger.warning(
+ f"Failed to resolve migration {migration_name}: {e.stderr}"
+ )
+
+ @staticmethod
+ def setup_database(schema_path: str, use_migrate: bool = False) -> bool:
+ """
+ Set up the database using either prisma migrate or prisma db push
+ Uses migrations from litellm-proxy-extras package
+
+ Args:
+ schema_path (str): Path to the Prisma schema file
+ use_migrate (bool): Whether to use prisma migrate instead of db push
+
+ Returns:
+ bool: True if setup was successful, False otherwise
+ """
+ use_migrate = str_to_bool(os.getenv("USE_PRISMA_MIGRATE")) or use_migrate
+ for attempt in range(4):
+ original_dir = os.getcwd()
+ migrations_dir = ProxyExtrasDBManager._get_prisma_dir()
+ os.chdir(migrations_dir)
+
+ try:
+ if use_migrate:
+ logger.info("Running prisma migrate deploy")
+ try:
+ # Set migrations directory for Prisma
+ result = subprocess.run(
+ ["prisma", "migrate", "deploy"],
+ timeout=60,
+ check=True,
+ capture_output=True,
+ text=True,
+ )
+ logger.info(f"prisma migrate deploy stdout: {result.stdout}")
+
+ logger.info("prisma migrate deploy completed")
+ return True
+ except subprocess.CalledProcessError as e:
+ logger.info(f"prisma db error: {e.stderr}, e: {e.stdout}")
+ if "P3009" in e.stderr:
+ # Extract the failed migration name from the error message
+ migration_match = re.search(
+ r"`(\d+_.*)` migration", e.stderr
+ )
+ if migration_match:
+ failed_migration = migration_match.group(1)
+ logger.info(
+ f"Found failed migration: {failed_migration}, marking as rolled back"
+ )
+ # Mark the failed migration as rolled back
+ subprocess.run(
+ [
+ "prisma",
+ "migrate",
+ "resolve",
+ "--rolled-back",
+ failed_migration,
+ ],
+ timeout=60,
+ check=True,
+ capture_output=True,
+ text=True,
+ )
+ logger.info(
+ f"✅ Migration {failed_migration} marked as rolled back... retrying"
+ )
+ elif (
+ "P3005" in e.stderr
+ and "database schema is not empty" in e.stderr
+ ):
+ logger.info(
+ "Database schema is not empty, creating baseline migration"
+ )
+ ProxyExtrasDBManager._create_baseline_migration(schema_path)
+ logger.info(
+ "Baseline migration created, resolving all migrations"
+ )
+ ProxyExtrasDBManager._resolve_all_migrations(migrations_dir)
+ logger.info("✅ All migrations resolved.")
+ return True
+ elif (
+ "P3018" in e.stderr
+ ): # PostgreSQL error code for duplicate column
+ logger.info(
+ "Migration already exists, resolving specific migration"
+ )
+ # Extract the migration name from the error message
+ migration_match = re.search(
+ r"Migration name: (\d+_.*)", e.stderr
+ )
+ if migration_match:
+ migration_name = migration_match.group(1)
+ logger.info(f"Rolling back migration {migration_name}")
+ ProxyExtrasDBManager._roll_back_migration(
+ migration_name
+ )
+ logger.info(
+ f"Resolving migration {migration_name} that failed due to existing columns"
+ )
+ ProxyExtrasDBManager._resolve_specific_migration(
+ migration_name
+ )
+ logger.info("✅ Migration resolved.")
+ else:
+ # Use prisma db push with increased timeout
+ subprocess.run(
+ ["prisma", "db", "push", "--accept-data-loss"],
+ timeout=60,
+ check=True,
+ )
+ return True
+ except subprocess.TimeoutExpired:
+ logger.info(f"Attempt {attempt + 1} timed out")
+ time.sleep(random.randrange(5, 15))
+ except subprocess.CalledProcessError as e:
+ attempts_left = 3 - attempt
+ retry_msg = (
+ f" Retrying... ({attempts_left} attempts left)"
+ if attempts_left > 0
+ else ""
+ )
+ logger.info(f"The process failed to execute. Details: {e}.{retry_msg}")
+ time.sleep(random.randrange(5, 15))
+ finally:
+ os.chdir(original_dir)
+ pass
+ return False
diff --git a/litellm-proxy-extras/poetry.lock b/litellm-proxy-extras/poetry.lock
new file mode 100644
index 0000000000..bb436a168c
--- /dev/null
+++ b/litellm-proxy-extras/poetry.lock
@@ -0,0 +1,7 @@
+# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
+package = []
+
+[metadata]
+lock-version = "2.1"
+python-versions = ">=3.8.1,<4.0, !=3.9.7"
+content-hash = "2cf39473e67ff0615f0a61c9d2ac9f02b38cc08cbb1bdb893d89bee002646623"
diff --git a/litellm-proxy-extras/pyproject.toml b/litellm-proxy-extras/pyproject.toml
new file mode 100644
index 0000000000..75e2ef9a5c
--- /dev/null
+++ b/litellm-proxy-extras/pyproject.toml
@@ -0,0 +1,30 @@
+[tool.poetry]
+name = "litellm-proxy-extras"
+version = "0.1.11"
+description = "Additional files for the LiteLLM Proxy. Reduces the size of the main litellm package."
+authors = ["BerriAI"]
+readme = "README.md"
+
+
+[tool.poetry.urls]
+homepage = "https://litellm.ai"
+Homepage = "https://litellm.ai"
+repository = "https://github.com/BerriAI/litellm"
+Repository = "https://github.com/BerriAI/litellm"
+documentation = "https://docs.litellm.ai"
+Documentation = "https://docs.litellm.ai"
+
+[tool.poetry.dependencies]
+python = ">=3.8.1,<4.0, !=3.9.7"
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
+
+[tool.commitizen]
+version = "0.1.11"
+version_files = [
+ "pyproject.toml:version",
+ "../requirements.txt:litellm-proxy-extras==",
+ "../pyproject.toml:litellm-proxy-extras = {version = \""
+]
\ No newline at end of file
diff --git a/litellm-proxy-extras/tests/__init__.py b/litellm-proxy-extras/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/litellm/__init__.py b/litellm/__init__.py
index a9c975419e..65c5775c04 100644
--- a/litellm/__init__.py
+++ b/litellm/__init__.py
@@ -2,7 +2,7 @@
import warnings
warnings.filterwarnings("ignore", message=".*conflict with protected namespace.*")
-### INIT VARIABLES #########
+### INIT VARIABLES ###########
import threading
import os
from typing import Callable, List, Optional, Dict, Union, Any, Literal, get_args
@@ -57,6 +57,9 @@ from litellm.constants import (
bedrock_embedding_models,
known_tokenizer_config,
BEDROCK_INVOKE_PROVIDERS_LITERAL,
+ DEFAULT_MAX_TOKENS,
+ DEFAULT_SOFT_BUDGET,
+ DEFAULT_ALLOWED_FAILS,
)
from litellm.types.guardrails import GuardrailItem
from litellm.proxy._types import (
@@ -64,6 +67,7 @@ from litellm.proxy._types import (
KeyManagementSettings,
LiteLLM_UpperboundKeyGenerateParams,
)
+from litellm.types.proxy.management_endpoints.ui_sso import DefaultTeamSSOParams
from litellm.types.utils import StandardKeyGenerationConfig, LlmProviders
from litellm.integrations.custom_logger import CustomLogger
from litellm.litellm_core_utils.logging_callback_manager import LoggingCallbackManager
@@ -110,6 +114,8 @@ _custom_logger_compatible_callbacks_literal = Literal[
"pagerduty",
"humanloop",
"gcs_pubsub",
+ "agentops",
+ "anthropic_cache_control_hook",
]
logged_real_time_event_types: Optional[Union[List[str], Literal["*"]]] = None
_known_custom_logger_compatible_callbacks: List = list(
@@ -121,8 +127,12 @@ callbacks: List[
langfuse_default_tags: Optional[List[str]] = None
langsmith_batch_size: Optional[int] = None
prometheus_initialize_budget_metrics: Optional[bool] = False
+require_auth_for_metrics_endpoint: Optional[bool] = False
argilla_batch_size: Optional[int] = None
datadog_use_v1: Optional[bool] = False # if you want to use v1 datadog logged payload
+gcs_pub_sub_use_v1: Optional[bool] = (
+ False # if you want to use v1 gcs pubsub logged payload
+)
argilla_transformation_object: Optional[Dict[str, Any]] = None
_async_input_callback: List[Union[str, Callable, CustomLogger]] = (
[]
@@ -153,9 +163,9 @@ token: Optional[str] = (
None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
)
telemetry = True
-max_tokens = 256 # OpenAI Defaults
+max_tokens: int = DEFAULT_MAX_TOKENS # OpenAI Defaults
drop_params = bool(os.getenv("LITELLM_DROP_PARAMS", False))
-modify_params = False
+modify_params = bool(os.getenv("LITELLM_MODIFY_PARAMS", False))
retry = True
### AUTH ###
api_key: Optional[str] = None
@@ -247,7 +257,7 @@ budget_duration: Optional[str] = (
None # proxy only - resets budget after fixed duration. You can set duration as seconds ("30s"), minutes ("30m"), hours ("30h"), days ("30d").
)
default_soft_budget: float = (
- 50.0 # by default all litellm proxy keys have a soft budget of 50.0
+ DEFAULT_SOFT_BUDGET # by default all litellm proxy keys have a soft budget of 50.0
)
forward_traceparent_to_llm_provider: bool = False
@@ -271,6 +281,7 @@ default_key_generate_params: Optional[Dict] = None
upperbound_key_generate_params: Optional[LiteLLM_UpperboundKeyGenerateParams] = None
key_generation_settings: Optional[StandardKeyGenerationConfig] = None
default_internal_user_params: Optional[Dict] = None
+default_team_params: Optional[Union[DefaultTeamSSOParams, Dict]] = None
default_team_settings: Optional[List] = None
max_user_budget: Optional[float] = None
default_max_internal_user_budget: Optional[float] = None
@@ -317,6 +328,7 @@ from litellm.litellm_core_utils.get_model_cost_map import get_model_cost_map
model_cost = get_model_cost_map(url=model_cost_map_url)
custom_prompt_dict: Dict[str, dict] = {}
+check_provider_endpoint = False
####### THREAD-SPECIFIC DATA ####################
@@ -406,6 +418,7 @@ deepseek_models: List = []
azure_ai_models: List = []
jina_ai_models: List = []
voyage_models: List = []
+infinity_models: List = []
databricks_models: List = []
cloudflare_models: List = []
codestral_models: List = []
@@ -548,6 +561,8 @@ def add_known_models():
azure_ai_models.append(key)
elif value.get("litellm_provider") == "voyage":
voyage_models.append(key)
+ elif value.get("litellm_provider") == "infinity":
+ infinity_models.append(key)
elif value.get("litellm_provider") == "databricks":
databricks_models.append(key)
elif value.get("litellm_provider") == "cloudflare":
@@ -636,6 +651,7 @@ model_list = (
+ deepseek_models
+ azure_ai_models
+ voyage_models
+ + infinity_models
+ databricks_models
+ cloudflare_models
+ codestral_models
@@ -692,6 +708,7 @@ models_by_provider: dict = {
"mistral": mistral_chat_models,
"azure_ai": azure_ai_models,
"voyage": voyage_models,
+ "infinity": infinity_models,
"databricks": databricks_models,
"cloudflare": cloudflare_models,
"codestral": codestral_models,
@@ -761,12 +778,14 @@ from .utils import (
create_pretrained_tokenizer,
create_tokenizer,
supports_function_calling,
+ supports_web_search,
supports_response_schema,
supports_parallel_function_calling,
supports_vision,
supports_audio_input,
supports_audio_output,
supports_system_messages,
+ supports_reasoning,
get_litellm_params,
acreate,
get_max_tokens,
@@ -807,13 +826,13 @@ from .llms.aiohttp_openai.chat.transformation import AiohttpOpenAIChatConfig
from .llms.galadriel.chat.transformation import GaladrielChatConfig
from .llms.github.chat.transformation import GithubChatConfig
from .llms.empower.chat.transformation import EmpowerChatConfig
-from .llms.huggingface.chat.transformation import (
- HuggingfaceChatConfig as HuggingfaceConfig,
-)
+from .llms.huggingface.chat.transformation import HuggingFaceChatConfig
+from .llms.huggingface.embedding.transformation import HuggingFaceEmbeddingConfig
from .llms.oobabooga.chat.transformation import OobaboogaConfig
from .llms.maritalk import MaritalkConfig
from .llms.openrouter.chat.transformation import OpenrouterConfig
from .llms.anthropic.chat.transformation import AnthropicConfig
+from .llms.anthropic.common_utils import AnthropicModelInfo
from .llms.groq.stt.transformation import GroqSTTConfig
from .llms.anthropic.completion.transformation import AnthropicTextConfig
from .llms.triton.completion.transformation import TritonConfig
@@ -849,6 +868,7 @@ from .llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
VertexGeminiConfig,
VertexGeminiConfig as VertexAIConfig,
)
+from .llms.gemini.common_utils import GeminiModelInfo
from .llms.gemini.chat.transformation import (
GoogleAIStudioGeminiConfig,
GoogleAIStudioGeminiConfig as GeminiConfig, # aliased to maintain backwards compatibility
@@ -944,9 +964,11 @@ from .llms.topaz.image_variations.transformation import TopazImageVariationConfi
from litellm.llms.openai.completion.transformation import OpenAITextCompletionConfig
from .llms.groq.chat.transformation import GroqChatConfig
from .llms.voyage.embedding.transformation import VoyageEmbeddingConfig
+from .llms.infinity.embedding.transformation import InfinityEmbeddingConfig
from .llms.azure_ai.chat.transformation import AzureAIStudioConfig
from .llms.mistral.mistral_chat_transformation import MistralConfig
from .llms.openai.responses.transformation import OpenAIResponsesAPIConfig
+from .llms.azure.responses.transformation import AzureOpenAIResponsesAPIConfig
from .llms.openai.chat.o_series_transformation import (
OpenAIOSeriesConfig as OpenAIO1Config, # maintain backwards compatibility
OpenAIOSeriesConfig,
@@ -958,6 +980,12 @@ openaiOSeriesConfig = OpenAIOSeriesConfig()
from .llms.openai.chat.gpt_transformation import (
OpenAIGPTConfig,
)
+from .llms.openai.transcriptions.whisper_transformation import (
+ OpenAIWhisperAudioTranscriptionConfig,
+)
+from .llms.openai.transcriptions.gpt_transformation import (
+ OpenAIGPTAudioTranscriptionConfig,
+)
openAIGPTConfig = OpenAIGPTConfig()
from .llms.openai.chat.gpt_audio_transformation import (
@@ -986,6 +1014,7 @@ from .llms.fireworks_ai.embed.fireworks_ai_transformation import (
from .llms.friendliai.chat.transformation import FriendliaiChatConfig
from .llms.jina_ai.embedding.transformation import JinaAIEmbeddingConfig
from .llms.xai.chat.transformation import XAIChatConfig
+from .llms.xai.common_utils import XAIModelInfo
from .llms.volcengine import VolcEngineConfig
from .llms.codestral.completion.transformation import CodestralTextCompletionConfig
from .llms.azure.azure import (
@@ -1047,6 +1076,7 @@ from .cost_calculator import response_cost_calculator, cost_per_token
### ADAPTERS ###
from .types.adapter import AdapterItem
+import litellm.anthropic_interface as anthropic
adapters: List[AdapterItem] = []
diff --git a/litellm/_logging.py b/litellm/_logging.py
index 151ae6003d..d7e2c9e778 100644
--- a/litellm/_logging.py
+++ b/litellm/_logging.py
@@ -1,6 +1,7 @@
import json
import logging
import os
+import sys
from datetime import datetime
from logging import Formatter
@@ -40,9 +41,56 @@ class JsonFormatter(Formatter):
return json.dumps(json_record)
+# Function to set up exception handlers for JSON logging
+def _setup_json_exception_handlers(formatter):
+ # Create a handler with JSON formatting for exceptions
+ error_handler = logging.StreamHandler()
+ error_handler.setFormatter(formatter)
+
+ # Setup excepthook for uncaught exceptions
+ def json_excepthook(exc_type, exc_value, exc_traceback):
+ record = logging.LogRecord(
+ name="LiteLLM",
+ level=logging.ERROR,
+ pathname="",
+ lineno=0,
+ msg=str(exc_value),
+ args=(),
+ exc_info=(exc_type, exc_value, exc_traceback),
+ )
+ error_handler.handle(record)
+
+ sys.excepthook = json_excepthook
+
+ # Configure asyncio exception handler if possible
+ try:
+ import asyncio
+
+ def async_json_exception_handler(loop, context):
+ exception = context.get("exception")
+ if exception:
+ record = logging.LogRecord(
+ name="LiteLLM",
+ level=logging.ERROR,
+ pathname="",
+ lineno=0,
+ msg=str(exception),
+ args=(),
+ exc_info=None,
+ )
+ error_handler.handle(record)
+ else:
+ loop.default_exception_handler(context)
+
+ asyncio.get_event_loop().set_exception_handler(async_json_exception_handler)
+ except Exception:
+ pass
+
+
# Create a formatter and set it for the handler
if json_logs:
handler.setFormatter(JsonFormatter())
+ _setup_json_exception_handlers(JsonFormatter())
else:
formatter = logging.Formatter(
"\033[92m%(asctime)s - %(name)s:%(levelname)s\033[0m: %(filename)s:%(lineno)s - %(message)s",
@@ -65,18 +113,24 @@ def _turn_on_json():
handler = logging.StreamHandler()
handler.setFormatter(JsonFormatter())
- # Define a list of the loggers to update
- loggers = [verbose_router_logger, verbose_proxy_logger, verbose_logger]
+ # Define all loggers to update, including root logger
+ loggers = [logging.getLogger()] + [
+ verbose_router_logger,
+ verbose_proxy_logger,
+ verbose_logger,
+ ]
# Iterate through each logger and update its handlers
for logger in loggers:
# Remove all existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
-
# Add the new handler
logger.addHandler(handler)
+ # Set up exception handlers
+ _setup_json_exception_handlers(JsonFormatter())
+
def _turn_on_debug():
verbose_logger.setLevel(level=logging.DEBUG) # set package log to debug
diff --git a/litellm/_redis.py b/litellm/_redis.py
index 1e03993c20..14813c436e 100644
--- a/litellm/_redis.py
+++ b/litellm/_redis.py
@@ -18,6 +18,7 @@ import redis # type: ignore
import redis.asyncio as async_redis # type: ignore
from litellm import get_secret, get_secret_str
+from litellm.constants import REDIS_CONNECTION_POOL_TIMEOUT, REDIS_SOCKET_TIMEOUT
from ._logging import verbose_logger
@@ -182,9 +183,7 @@ def init_redis_cluster(redis_kwargs) -> redis.RedisCluster:
"REDIS_CLUSTER_NODES environment variable is not valid JSON. Please ensure it's properly formatted."
)
- verbose_logger.debug(
- "init_redis_cluster: startup nodes are being initialized."
- )
+ verbose_logger.debug("init_redis_cluster: startup nodes are being initialized.")
from redis.cluster import ClusterNode
args = _get_redis_cluster_kwargs()
@@ -204,6 +203,7 @@ def init_redis_cluster(redis_kwargs) -> redis.RedisCluster:
def _init_redis_sentinel(redis_kwargs) -> redis.Redis:
sentinel_nodes = redis_kwargs.get("sentinel_nodes")
+ sentinel_password = redis_kwargs.get("sentinel_password")
service_name = redis_kwargs.get("service_name")
if not sentinel_nodes or not service_name:
@@ -214,7 +214,11 @@ def _init_redis_sentinel(redis_kwargs) -> redis.Redis:
verbose_logger.debug("init_redis_sentinel: sentinel nodes are being initialized.")
# Set up the Sentinel client
- sentinel = redis.Sentinel(sentinel_nodes, socket_timeout=0.1)
+ sentinel = redis.Sentinel(
+ sentinel_nodes,
+ socket_timeout=REDIS_SOCKET_TIMEOUT,
+ password=sentinel_password,
+ )
# Return the master instance for the given service
@@ -236,7 +240,7 @@ def _init_async_redis_sentinel(redis_kwargs) -> async_redis.Redis:
# Set up the Sentinel client
sentinel = async_redis.Sentinel(
sentinel_nodes,
- socket_timeout=0.1,
+ socket_timeout=REDIS_SOCKET_TIMEOUT,
password=sentinel_password,
)
@@ -307,7 +311,6 @@ def get_redis_async_client(
return _init_async_redis_sentinel(redis_kwargs)
return async_redis.Redis(
- socket_timeout=5,
**redis_kwargs,
)
@@ -317,7 +320,7 @@ def get_redis_connection_pool(**env_overrides):
verbose_logger.debug("get_redis_connection_pool: redis_kwargs", redis_kwargs)
if "url" in redis_kwargs and redis_kwargs["url"] is not None:
return async_redis.BlockingConnectionPool.from_url(
- timeout=5, url=redis_kwargs["url"]
+ timeout=REDIS_CONNECTION_POOL_TIMEOUT, url=redis_kwargs["url"]
)
connection_class = async_redis.Connection
if "ssl" in redis_kwargs:
@@ -325,4 +328,6 @@ def get_redis_connection_pool(**env_overrides):
redis_kwargs.pop("ssl", None)
redis_kwargs["connection_class"] = connection_class
redis_kwargs.pop("startup_nodes", None)
- return async_redis.BlockingConnectionPool(timeout=5, **redis_kwargs)
+ return async_redis.BlockingConnectionPool(
+ timeout=REDIS_CONNECTION_POOL_TIMEOUT, **redis_kwargs
+ )
diff --git a/litellm/_service_logger.py b/litellm/_service_logger.py
index 0b4f22e210..7a60359d54 100644
--- a/litellm/_service_logger.py
+++ b/litellm/_service_logger.py
@@ -15,7 +15,7 @@ from .types.services import ServiceLoggerPayload, ServiceTypes
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
- Span = _Span
+ Span = Union[_Span, Any]
OTELClass = OpenTelemetry
else:
Span = Any
@@ -124,6 +124,7 @@ class ServiceLogging(CustomLogger):
service=service,
duration=duration,
call_type=call_type,
+ event_metadata=event_metadata,
)
for callback in litellm.service_callback:
@@ -229,6 +230,7 @@ class ServiceLogging(CustomLogger):
service=service,
duration=duration,
call_type=call_type,
+ event_metadata=event_metadata,
)
for callback in litellm.service_callback:
diff --git a/litellm/_version.py b/litellm/_version.py
index 9f85eb3f8a..2034cc4f33 100644
--- a/litellm/_version.py
+++ b/litellm/_version.py
@@ -3,4 +3,4 @@ import importlib_metadata
try:
version = importlib_metadata.version("litellm")
except Exception:
- pass
+ version = "unknown"
diff --git a/litellm/anthropic_interface/__init__.py b/litellm/anthropic_interface/__init__.py
new file mode 100644
index 0000000000..9902fdc553
--- /dev/null
+++ b/litellm/anthropic_interface/__init__.py
@@ -0,0 +1,6 @@
+"""
+Anthropic module for LiteLLM
+"""
+from .messages import acreate, create
+
+__all__ = ["acreate", "create"]
diff --git a/litellm/anthropic_interface/messages/__init__.py b/litellm/anthropic_interface/messages/__init__.py
new file mode 100644
index 0000000000..f3249f981b
--- /dev/null
+++ b/litellm/anthropic_interface/messages/__init__.py
@@ -0,0 +1,117 @@
+"""
+Interface for Anthropic's messages API
+
+Use this to call LLMs in Anthropic /messages Request/Response format
+
+This is an __init__.py file to allow the following interface
+
+- litellm.messages.acreate
+- litellm.messages.create
+
+"""
+
+from typing import AsyncIterator, Dict, Iterator, List, Optional, Union
+
+from litellm.llms.anthropic.experimental_pass_through.messages.handler import (
+ anthropic_messages as _async_anthropic_messages,
+)
+from litellm.types.llms.anthropic_messages.anthropic_response import (
+ AnthropicMessagesResponse,
+)
+
+
+async def acreate(
+ max_tokens: int,
+ messages: List[Dict],
+ model: str,
+ metadata: Optional[Dict] = None,
+ stop_sequences: Optional[List[str]] = None,
+ stream: Optional[bool] = False,
+ system: Optional[str] = None,
+ temperature: Optional[float] = 1.0,
+ thinking: Optional[Dict] = None,
+ tool_choice: Optional[Dict] = None,
+ tools: Optional[List[Dict]] = None,
+ top_k: Optional[int] = None,
+ top_p: Optional[float] = None,
+ **kwargs
+) -> Union[AnthropicMessagesResponse, AsyncIterator]:
+ """
+ Async wrapper for Anthropic's messages API
+
+ Args:
+ max_tokens (int): Maximum tokens to generate (required)
+ messages (List[Dict]): List of message objects with role and content (required)
+ model (str): Model name to use (required)
+ metadata (Dict, optional): Request metadata
+ stop_sequences (List[str], optional): Custom stop sequences
+ stream (bool, optional): Whether to stream the response
+ system (str, optional): System prompt
+ temperature (float, optional): Sampling temperature (0.0 to 1.0)
+ thinking (Dict, optional): Extended thinking configuration
+ tool_choice (Dict, optional): Tool choice configuration
+ tools (List[Dict], optional): List of tool definitions
+ top_k (int, optional): Top K sampling parameter
+ top_p (float, optional): Nucleus sampling parameter
+ **kwargs: Additional arguments
+
+ Returns:
+ Dict: Response from the API
+ """
+ return await _async_anthropic_messages(
+ max_tokens=max_tokens,
+ messages=messages,
+ model=model,
+ metadata=metadata,
+ stop_sequences=stop_sequences,
+ stream=stream,
+ system=system,
+ temperature=temperature,
+ thinking=thinking,
+ tool_choice=tool_choice,
+ tools=tools,
+ top_k=top_k,
+ top_p=top_p,
+ **kwargs,
+ )
+
+
+async def create(
+ max_tokens: int,
+ messages: List[Dict],
+ model: str,
+ metadata: Optional[Dict] = None,
+ stop_sequences: Optional[List[str]] = None,
+ stream: Optional[bool] = False,
+ system: Optional[str] = None,
+ temperature: Optional[float] = 1.0,
+ thinking: Optional[Dict] = None,
+ tool_choice: Optional[Dict] = None,
+ tools: Optional[List[Dict]] = None,
+ top_k: Optional[int] = None,
+ top_p: Optional[float] = None,
+ **kwargs
+) -> Union[AnthropicMessagesResponse, Iterator]:
+ """
+ Async wrapper for Anthropic's messages API
+
+ Args:
+ max_tokens (int): Maximum tokens to generate (required)
+ messages (List[Dict]): List of message objects with role and content (required)
+ model (str): Model name to use (required)
+ metadata (Dict, optional): Request metadata
+ stop_sequences (List[str], optional): Custom stop sequences
+ stream (bool, optional): Whether to stream the response
+ system (str, optional): System prompt
+ temperature (float, optional): Sampling temperature (0.0 to 1.0)
+ thinking (Dict, optional): Extended thinking configuration
+ tool_choice (Dict, optional): Tool choice configuration
+ tools (List[Dict], optional): List of tool definitions
+ top_k (int, optional): Top K sampling parameter
+ top_p (float, optional): Nucleus sampling parameter
+ **kwargs: Additional arguments
+
+ Returns:
+ Dict: Response from the API
+ """
+ raise NotImplementedError("This function is not implemented")
diff --git a/litellm/anthropic_interface/readme.md b/litellm/anthropic_interface/readme.md
new file mode 100644
index 0000000000..01c5f1b7c3
--- /dev/null
+++ b/litellm/anthropic_interface/readme.md
@@ -0,0 +1,116 @@
+## Use LLM API endpoints in Anthropic Interface
+
+Note: This is called `anthropic_interface` because `anthropic` is a known python package and was failing mypy type checking.
+
+
+## Usage
+---
+
+### LiteLLM Python SDK
+
+#### Non-streaming example
+```python showLineNumbers title="Example using LiteLLM Python SDK"
+import litellm
+response = await litellm.anthropic.messages.acreate(
+ messages=[{"role": "user", "content": "Hello, can you tell me a short joke?"}],
+ api_key=api_key,
+ model="anthropic/claude-3-haiku-20240307",
+ max_tokens=100,
+)
+```
+
+Example response:
+```json
+{
+ "content": [
+ {
+ "text": "Hi! this is a very short joke",
+ "type": "text"
+ }
+ ],
+ "id": "msg_013Zva2CMHLNnXjNJJKqJ2EF",
+ "model": "claude-3-7-sonnet-20250219",
+ "role": "assistant",
+ "stop_reason": "end_turn",
+ "stop_sequence": null,
+ "type": "message",
+ "usage": {
+ "input_tokens": 2095,
+ "output_tokens": 503,
+ "cache_creation_input_tokens": 2095,
+ "cache_read_input_tokens": 0
+ }
+}
+```
+
+#### Streaming example
+```python showLineNumbers title="Example using LiteLLM Python SDK"
+import litellm
+response = await litellm.anthropic.messages.acreate(
+ messages=[{"role": "user", "content": "Hello, can you tell me a short joke?"}],
+ api_key=api_key,
+ model="anthropic/claude-3-haiku-20240307",
+ max_tokens=100,
+ stream=True,
+)
+async for chunk in response:
+ print(chunk)
+```
+
+### LiteLLM Proxy Server
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: anthropic-claude
+ litellm_params:
+ model: claude-3-7-sonnet-latest
+```
+
+2. Start proxy
+
+```bash
+litellm --config /path/to/config.yaml
+```
+
+3. Test it!
+
+
+
+
+```python showLineNumbers title="Example using LiteLLM Proxy Server"
+import anthropic
+
+# point anthropic sdk to litellm proxy
+client = anthropic.Anthropic(
+ base_url="http://0.0.0.0:4000",
+ api_key="sk-1234",
+)
+
+response = client.messages.create(
+ messages=[{"role": "user", "content": "Hello, can you tell me a short joke?"}],
+ model="anthropic/claude-3-haiku-20240307",
+ max_tokens=100,
+)
+```
+
+
+
+```bash showLineNumbers title="Example using LiteLLM Proxy Server"
+curl -L -X POST 'http://0.0.0.0:4000/v1/messages' \
+-H 'content-type: application/json' \
+-H 'x-api-key: $LITELLM_API_KEY' \
+-H 'anthropic-version: 2023-06-01' \
+-d '{
+ "model": "anthropic-claude",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hello, can you tell me a short joke?"
+ }
+ ],
+ "max_tokens": 100
+}'
+```
\ No newline at end of file
diff --git a/litellm/assistants/main.py b/litellm/assistants/main.py
index 28f4518f15..928b6e8ac2 100644
--- a/litellm/assistants/main.py
+++ b/litellm/assistants/main.py
@@ -304,6 +304,11 @@ def create_assistants(
"response_format": response_format,
}
+ # only send params that are not None
+ create_assistant_data = {
+ k: v for k, v in create_assistant_data.items() if v is not None
+ }
+
response: Optional[Union[Coroutine[Any, Any, Assistant], Assistant]] = None
if custom_llm_provider == "openai":
api_base = (
diff --git a/litellm/batches/main.py b/litellm/batches/main.py
index 1ddcafce4c..f4f74c72fb 100644
--- a/litellm/batches/main.py
+++ b/litellm/batches/main.py
@@ -153,7 +153,6 @@ def create_batch(
)
api_base: Optional[str] = None
if custom_llm_provider == "openai":
-
# for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
api_base = (
optional_params.api_base
@@ -358,7 +357,6 @@ def retrieve_batch(
_is_async = kwargs.pop("aretrieve_batch", False) is True
api_base: Optional[str] = None
if custom_llm_provider == "openai":
-
# for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
api_base = (
optional_params.api_base
diff --git a/litellm/budget_manager.py b/litellm/budget_manager.py
index e664c4f44f..b25967579e 100644
--- a/litellm/budget_manager.py
+++ b/litellm/budget_manager.py
@@ -14,6 +14,12 @@ import time
from typing import Literal, Optional
import litellm
+from litellm.constants import (
+ DAYS_IN_A_MONTH,
+ DAYS_IN_A_WEEK,
+ DAYS_IN_A_YEAR,
+ HOURS_IN_A_DAY,
+)
from litellm.utils import ModelResponse
@@ -81,11 +87,11 @@ class BudgetManager:
if duration == "daily":
duration_in_days = 1
elif duration == "weekly":
- duration_in_days = 7
+ duration_in_days = DAYS_IN_A_WEEK
elif duration == "monthly":
- duration_in_days = 28
+ duration_in_days = DAYS_IN_A_MONTH
elif duration == "yearly":
- duration_in_days = 365
+ duration_in_days = DAYS_IN_A_YEAR
else:
raise ValueError(
"""duration needs to be one of ["daily", "weekly", "monthly", "yearly"]"""
@@ -182,7 +188,9 @@ class BudgetManager:
current_time = time.time()
# Convert duration from days to seconds
- duration_in_seconds = self.user_dict[user]["duration"] * 24 * 60 * 60
+ duration_in_seconds = (
+ self.user_dict[user]["duration"] * HOURS_IN_A_DAY * 60 * 60
+ )
# Check if duration has elapsed
if current_time - last_updated_at >= duration_in_seconds:
diff --git a/litellm/caching/base_cache.py b/litellm/caching/base_cache.py
index 7109951d15..5140b390f7 100644
--- a/litellm/caching/base_cache.py
+++ b/litellm/caching/base_cache.py
@@ -9,12 +9,12 @@ Has 4 methods:
"""
from abc import ABC, abstractmethod
-from typing import TYPE_CHECKING, Any, Optional
+from typing import TYPE_CHECKING, Any, Optional, Union
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
- Span = _Span
+ Span = Union[_Span, Any]
else:
Span = Any
diff --git a/litellm/caching/caching.py b/litellm/caching/caching.py
index 415c49edff..6a7c93e3fe 100644
--- a/litellm/caching/caching.py
+++ b/litellm/caching/caching.py
@@ -19,6 +19,7 @@ from pydantic import BaseModel
import litellm
from litellm._logging import verbose_logger
+from litellm.constants import CACHED_STREAMING_CHUNK_DELAY
from litellm.litellm_core_utils.model_param_helper import ModelParamHelper
from litellm.types.caching import *
from litellm.types.utils import all_litellm_params
@@ -88,16 +89,16 @@ class Cache:
s3_aws_session_token: Optional[str] = None,
s3_config: Optional[Any] = None,
s3_path: Optional[str] = None,
- redis_semantic_cache_use_async=False,
- redis_semantic_cache_embedding_model="text-embedding-ada-002",
+ redis_semantic_cache_embedding_model: str = "text-embedding-ada-002",
+ redis_semantic_cache_index_name: Optional[str] = None,
redis_flush_size: Optional[int] = None,
redis_startup_nodes: Optional[List] = None,
- disk_cache_dir=None,
+ disk_cache_dir: Optional[str] = None,
qdrant_api_base: Optional[str] = None,
qdrant_api_key: Optional[str] = None,
qdrant_collection_name: Optional[str] = None,
qdrant_quantization_config: Optional[str] = None,
- qdrant_semantic_cache_embedding_model="text-embedding-ada-002",
+ qdrant_semantic_cache_embedding_model: str = "text-embedding-ada-002",
**kwargs,
):
"""
@@ -170,8 +171,8 @@ class Cache:
port=port,
password=password,
similarity_threshold=similarity_threshold,
- use_async=redis_semantic_cache_use_async,
embedding_model=redis_semantic_cache_embedding_model,
+ index_name=redis_semantic_cache_index_name,
**kwargs,
)
elif type == LiteLLMCacheType.QDRANT_SEMANTIC:
@@ -406,7 +407,7 @@ class Cache:
}
]
}
- time.sleep(0.02)
+ time.sleep(CACHED_STREAMING_CHUNK_DELAY)
def _get_cache_logic(
self,
diff --git a/litellm/caching/caching_handler.py b/litellm/caching/caching_handler.py
index 2a958c9eee..14278de9cd 100644
--- a/litellm/caching/caching_handler.py
+++ b/litellm/caching/caching_handler.py
@@ -66,9 +66,7 @@ class CachingHandlerResponse(BaseModel):
cached_result: Optional[Any] = None
final_embedding_cached_response: Optional[EmbeddingResponse] = None
- embedding_all_elements_cache_hit: bool = (
- False # this is set to True when all elements in the list have a cache hit in the embedding cache, if true return the final_embedding_cached_response no need to make an API call
- )
+ embedding_all_elements_cache_hit: bool = False # this is set to True when all elements in the list have a cache hit in the embedding cache, if true return the final_embedding_cached_response no need to make an API call
class LLMCachingHandler:
@@ -738,7 +736,6 @@ class LLMCachingHandler:
if self._should_store_result_in_cache(
original_function=self.original_function, kwargs=new_kwargs
):
-
litellm.cache.add_cache(result, **new_kwargs)
return
@@ -790,6 +787,7 @@ class LLMCachingHandler:
- Else append the chunk to self.async_streaming_chunks
"""
+
complete_streaming_response: Optional[
Union[ModelResponse, TextCompletionResponse]
] = _assemble_complete_response_from_streaming_chunks(
@@ -800,7 +798,6 @@ class LLMCachingHandler:
streaming_chunks=self.async_streaming_chunks,
is_async=True,
)
-
# if a complete_streaming_response is assembled, add it to the cache
if complete_streaming_response is not None:
await self.async_set_cache(
@@ -865,9 +862,9 @@ class LLMCachingHandler:
}
if litellm.cache is not None:
- litellm_params["preset_cache_key"] = (
- litellm.cache._get_preset_cache_key_from_kwargs(**kwargs)
- )
+ litellm_params[
+ "preset_cache_key"
+ ] = litellm.cache._get_preset_cache_key_from_kwargs(**kwargs)
else:
litellm_params["preset_cache_key"] = None
diff --git a/litellm/caching/disk_cache.py b/litellm/caching/disk_cache.py
index abf3203f50..413ac2932d 100644
--- a/litellm/caching/disk_cache.py
+++ b/litellm/caching/disk_cache.py
@@ -1,12 +1,12 @@
import json
-from typing import TYPE_CHECKING, Any, Optional
+from typing import TYPE_CHECKING, Any, Optional, Union
from .base_cache import BaseCache
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
- Span = _Span
+ Span = Union[_Span, Any]
else:
Span = Any
diff --git a/litellm/caching/dual_cache.py b/litellm/caching/dual_cache.py
index 5f598f7d70..8bef333758 100644
--- a/litellm/caching/dual_cache.py
+++ b/litellm/caching/dual_cache.py
@@ -12,7 +12,7 @@ import asyncio
import time
import traceback
from concurrent.futures import ThreadPoolExecutor
-from typing import TYPE_CHECKING, Any, List, Optional
+from typing import TYPE_CHECKING, Any, List, Optional, Union
import litellm
from litellm._logging import print_verbose, verbose_logger
@@ -24,7 +24,7 @@ from .redis_cache import RedisCache
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
- Span = _Span
+ Span = Union[_Span, Any]
else:
Span = Any
diff --git a/litellm/caching/in_memory_cache.py b/litellm/caching/in_memory_cache.py
index 9fca969226..e3d757d08d 100644
--- a/litellm/caching/in_memory_cache.py
+++ b/litellm/caching/in_memory_cache.py
@@ -9,8 +9,13 @@ Has 4 methods:
"""
import json
+import sys
import time
-from typing import List, Optional
+from typing import Any, List, Optional
+
+from pydantic import BaseModel
+
+from litellm.constants import MAX_SIZE_PER_ITEM_IN_MEMORY_CACHE_IN_KB
from .base_cache import BaseCache
@@ -22,6 +27,7 @@ class InMemoryCache(BaseCache):
default_ttl: Optional[
int
] = 600, # default ttl is 10 minutes. At maximum litellm rate limiting logic requires objects to be in memory for 1 minute
+ max_size_per_item: Optional[int] = 1024, # 1MB = 1024KB
):
"""
max_size_in_memory [int]: Maximum number of items in cache. done to prevent memory leaks. Use 200 items as a default
@@ -30,11 +36,54 @@ class InMemoryCache(BaseCache):
max_size_in_memory or 200
) # set an upper bound of 200 items in-memory
self.default_ttl = default_ttl or 600
+ self.max_size_per_item = (
+ max_size_per_item or MAX_SIZE_PER_ITEM_IN_MEMORY_CACHE_IN_KB
+ ) # 1MB = 1024KB
# in-memory cache
self.cache_dict: dict = {}
self.ttl_dict: dict = {}
+ def check_value_size(self, value: Any):
+ """
+ Check if value size exceeds max_size_per_item (1MB)
+ Returns True if value size is acceptable, False otherwise
+ """
+ try:
+ # Fast path for common primitive types that are typically small
+ if (
+ isinstance(value, (bool, int, float, str))
+ and len(str(value))
+ < self.max_size_per_item * MAX_SIZE_PER_ITEM_IN_MEMORY_CACHE_IN_KB
+ ): # Conservative estimate
+ return True
+
+ # Direct size check for bytes objects
+ if isinstance(value, bytes):
+ return sys.getsizeof(value) / 1024 <= self.max_size_per_item
+
+ # Handle special types without full conversion when possible
+ if hasattr(value, "__sizeof__"): # Use __sizeof__ if available
+ size = value.__sizeof__() / 1024
+ return size <= self.max_size_per_item
+
+ # Fallback for complex types
+ if isinstance(value, BaseModel) and hasattr(
+ value, "model_dump"
+ ): # Pydantic v2
+ value = value.model_dump()
+ elif hasattr(value, "isoformat"): # datetime objects
+ return True # datetime strings are always small
+
+ # Only convert to JSON if absolutely necessary
+ if not isinstance(value, (str, bytes)):
+ value = json.dumps(value, default=str)
+
+ return sys.getsizeof(value) / 1024 <= self.max_size_per_item
+
+ except Exception:
+ return False
+
def evict_cache(self):
"""
Eviction policy:
@@ -61,6 +110,8 @@ class InMemoryCache(BaseCache):
if len(self.cache_dict) >= self.max_size_in_memory:
# only evict when cache is full
self.evict_cache()
+ if not self.check_value_size(value):
+ return
self.cache_dict[key] = value
if "ttl" in kwargs and kwargs["ttl"] is not None:
diff --git a/litellm/caching/llm_caching_handler.py b/litellm/caching/llm_caching_handler.py
index 429634b7b1..3bf1f80d08 100644
--- a/litellm/caching/llm_caching_handler.py
+++ b/litellm/caching/llm_caching_handler.py
@@ -8,7 +8,6 @@ from .in_memory_cache import InMemoryCache
class LLMClientCache(InMemoryCache):
-
def update_cache_key_with_event_loop(self, key):
"""
Add the event loop to the cache key, to prevent event loop closed errors.
diff --git a/litellm/caching/qdrant_semantic_cache.py b/litellm/caching/qdrant_semantic_cache.py
index bdfd3770ae..32d4d8b0fd 100644
--- a/litellm/caching/qdrant_semantic_cache.py
+++ b/litellm/caching/qdrant_semantic_cache.py
@@ -11,10 +11,12 @@ Has 4 methods:
import ast
import asyncio
import json
-from typing import Any
+from typing import Any, cast
import litellm
from litellm._logging import print_verbose
+from litellm.constants import QDRANT_SCALAR_QUANTILE, QDRANT_VECTOR_SIZE
+from litellm.types.utils import EmbeddingResponse
from .base_cache import BaseCache
@@ -118,7 +120,11 @@ class QdrantSemanticCache(BaseCache):
}
elif quantization_config == "scalar":
quantization_params = {
- "scalar": {"type": "int8", "quantile": 0.99, "always_ram": False}
+ "scalar": {
+ "type": "int8",
+ "quantile": QDRANT_SCALAR_QUANTILE,
+ "always_ram": False,
+ }
}
elif quantization_config == "product":
quantization_params = {
@@ -132,7 +138,7 @@ class QdrantSemanticCache(BaseCache):
new_collection_status = self.sync_client.put(
url=f"{self.qdrant_api_base}/collections/{self.collection_name}",
json={
- "vectors": {"size": 1536, "distance": "Cosine"},
+ "vectors": {"size": QDRANT_VECTOR_SIZE, "distance": "Cosine"},
"quantization_config": quantization_params,
},
headers=self.headers,
@@ -171,10 +177,13 @@ class QdrantSemanticCache(BaseCache):
prompt += message["content"]
# create an embedding for prompt
- embedding_response = litellm.embedding(
- model=self.embedding_model,
- input=prompt,
- cache={"no-store": True, "no-cache": True},
+ embedding_response = cast(
+ EmbeddingResponse,
+ litellm.embedding(
+ model=self.embedding_model,
+ input=prompt,
+ cache={"no-store": True, "no-cache": True},
+ ),
)
# get the embedding
@@ -212,10 +221,13 @@ class QdrantSemanticCache(BaseCache):
prompt += message["content"]
# convert to embedding
- embedding_response = litellm.embedding(
- model=self.embedding_model,
- input=prompt,
- cache={"no-store": True, "no-cache": True},
+ embedding_response = cast(
+ EmbeddingResponse,
+ litellm.embedding(
+ model=self.embedding_model,
+ input=prompt,
+ cache={"no-store": True, "no-cache": True},
+ ),
)
# get the embedding
diff --git a/litellm/caching/redis_cache.py b/litellm/caching/redis_cache.py
index 66245e7476..31e11abf97 100644
--- a/litellm/caching/redis_cache.py
+++ b/litellm/caching/redis_cache.py
@@ -34,7 +34,7 @@ if TYPE_CHECKING:
cluster_pipeline = ClusterPipeline
async_redis_client = Redis
async_redis_cluster_client = RedisCluster
- Span = _Span
+ Span = Union[_Span, Any]
else:
pipeline = Any
cluster_pipeline = Any
@@ -54,9 +54,9 @@ class RedisCache(BaseCache):
redis_flush_size: Optional[int] = 100,
namespace: Optional[str] = None,
startup_nodes: Optional[List] = None, # for redis-cluster
+ socket_timeout: Optional[float] = 5.0, # default 5 second timeout
**kwargs,
):
-
from litellm._service_logger import ServiceLogging
from .._redis import get_redis_client, get_redis_connection_pool
@@ -70,6 +70,9 @@ class RedisCache(BaseCache):
redis_kwargs["password"] = password
if startup_nodes is not None:
redis_kwargs["startup_nodes"] = startup_nodes
+ if socket_timeout is not None:
+ redis_kwargs["socket_timeout"] = socket_timeout
+
### HEALTH MONITORING OBJECT ###
if kwargs.get("service_logger_obj", None) is not None and isinstance(
kwargs["service_logger_obj"], ServiceLogging
@@ -301,12 +304,18 @@ class RedisCache(BaseCache):
key = self.check_and_fix_namespace(key=key)
ttl = self.get_ttl(**kwargs)
+ nx = kwargs.get("nx", False)
print_verbose(f"Set ASYNC Redis Cache: key: {key}\nValue {value}\nttl={ttl}")
try:
if not hasattr(_redis_client, "set"):
raise Exception("Redis client cannot set cache. Attribute not found.")
- await _redis_client.set(name=key, value=json.dumps(value), ex=ttl)
+ result = await _redis_client.set(
+ name=key,
+ value=json.dumps(value),
+ nx=nx,
+ ex=ttl,
+ )
print_verbose(
f"Successfully Set ASYNC Redis Cache: key: {key}\nValue {value}\nttl={ttl}"
)
@@ -323,6 +332,7 @@ class RedisCache(BaseCache):
event_metadata={"key": key},
)
)
+ return result
except Exception as e:
end_time = time.time()
_duration = end_time - start_time
@@ -556,6 +566,7 @@ class RedisCache(BaseCache):
## LOGGING ##
end_time = time.time()
_duration = end_time - start_time
+
asyncio.create_task(
self.service_logger_obj.async_service_success_hook(
service=ServiceTypes.REDIS,
@@ -927,7 +938,7 @@ class RedisCache(BaseCache):
# typed as Any, redis python lib has incomplete type stubs for RedisCluster and does not include `delete`
_redis_client: Any = self.init_async_client()
# keys is str
- await _redis_client.delete(key)
+ return await _redis_client.delete(key)
def delete_cache(self, key):
self.redis_client.delete(key)
@@ -1040,3 +1051,109 @@ class RedisCache(BaseCache):
except Exception as e:
verbose_logger.debug(f"Redis TTL Error: {e}")
return None
+
+ async def async_rpush(
+ self,
+ key: str,
+ values: List[Any],
+ parent_otel_span: Optional[Span] = None,
+ **kwargs,
+ ) -> int:
+ """
+ Append one or multiple values to a list stored at key
+
+ Args:
+ key: The Redis key of the list
+ values: One or more values to append to the list
+ parent_otel_span: Optional parent OpenTelemetry span
+
+ Returns:
+ int: The length of the list after the push operation
+ """
+ _redis_client: Any = self.init_async_client()
+ start_time = time.time()
+ try:
+ response = await _redis_client.rpush(key, *values)
+ ## LOGGING ##
+ end_time = time.time()
+ _duration = end_time - start_time
+ asyncio.create_task(
+ self.service_logger_obj.async_service_success_hook(
+ service=ServiceTypes.REDIS,
+ duration=_duration,
+ call_type="async_rpush",
+ )
+ )
+ return response
+ except Exception as e:
+ # NON blocking - notify users Redis is throwing an exception
+ ## LOGGING ##
+ end_time = time.time()
+ _duration = end_time - start_time
+ asyncio.create_task(
+ self.service_logger_obj.async_service_failure_hook(
+ service=ServiceTypes.REDIS,
+ duration=_duration,
+ error=e,
+ call_type="async_rpush",
+ )
+ )
+ verbose_logger.error(
+ f"LiteLLM Redis Cache RPUSH: - Got exception from REDIS : {str(e)}"
+ )
+ raise e
+
+ async def async_lpop(
+ self,
+ key: str,
+ count: Optional[int] = None,
+ parent_otel_span: Optional[Span] = None,
+ **kwargs,
+ ) -> Union[Any, List[Any]]:
+ _redis_client: Any = self.init_async_client()
+ start_time = time.time()
+ print_verbose(f"LPOP from Redis list: key: {key}, count: {count}")
+ try:
+ result = await _redis_client.lpop(key, count)
+ ## LOGGING ##
+ end_time = time.time()
+ _duration = end_time - start_time
+ asyncio.create_task(
+ self.service_logger_obj.async_service_success_hook(
+ service=ServiceTypes.REDIS,
+ duration=_duration,
+ call_type="async_lpop",
+ )
+ )
+
+ # Handle result parsing if needed
+ if isinstance(result, bytes):
+ try:
+ return result.decode("utf-8")
+ except Exception:
+ return result
+ elif isinstance(result, list) and all(
+ isinstance(item, bytes) for item in result
+ ):
+ try:
+ return [item.decode("utf-8") for item in result]
+ except Exception:
+ return result
+ return result
+ except Exception as e:
+ # NON blocking - notify users Redis is throwing an exception
+ ## LOGGING ##
+ end_time = time.time()
+ _duration = end_time - start_time
+ asyncio.create_task(
+ self.service_logger_obj.async_service_failure_hook(
+ service=ServiceTypes.REDIS,
+ duration=_duration,
+ error=e,
+ call_type="async_lpop",
+ )
+ )
+ verbose_logger.error(
+ f"LiteLLM Redis Cache LPOP: - Got exception from REDIS : {str(e)}"
+ )
+ raise e
diff --git a/litellm/caching/redis_cluster_cache.py b/litellm/caching/redis_cluster_cache.py
index 2e7d1de17f..21c3ab0366 100644
--- a/litellm/caching/redis_cluster_cache.py
+++ b/litellm/caching/redis_cluster_cache.py
@@ -5,7 +5,7 @@ Key differences:
- RedisClient NEEDs to be re-used across requests, adds 3000ms latency if it's re-created
"""
-from typing import TYPE_CHECKING, Any, List, Optional
+from typing import TYPE_CHECKING, Any, List, Optional, Union
from litellm.caching.redis_cache import RedisCache
@@ -16,7 +16,7 @@ if TYPE_CHECKING:
pipeline = Pipeline
async_redis_client = Redis
- Span = _Span
+ Span = Union[_Span, Any]
else:
pipeline = Any
async_redis_client = Any
diff --git a/litellm/caching/redis_semantic_cache.py b/litellm/caching/redis_semantic_cache.py
index b609286a55..c76f27377d 100644
--- a/litellm/caching/redis_semantic_cache.py
+++ b/litellm/caching/redis_semantic_cache.py
@@ -1,337 +1,450 @@
"""
-Redis Semantic Cache implementation
+Redis Semantic Cache implementation for LiteLLM
-Has 4 methods:
- - set_cache
- - get_cache
- - async_set_cache
- - async_get_cache
+The RedisSemanticCache provides semantic caching functionality using Redis as a backend.
+This cache stores responses based on the semantic similarity of prompts rather than
+exact matching, allowing for more flexible caching of LLM responses.
+
+This implementation uses RedisVL's SemanticCache to find semantically similar prompts
+and their cached responses.
"""
import ast
import asyncio
import json
-from typing import Any
+import os
+from typing import Any, Dict, List, Optional, Tuple, cast
import litellm
from litellm._logging import print_verbose
+from litellm.litellm_core_utils.prompt_templates.common_utils import (
+ get_str_from_messages,
+)
+from litellm.types.utils import EmbeddingResponse
from .base_cache import BaseCache
class RedisSemanticCache(BaseCache):
+ """
+ Redis-backed semantic cache for LLM responses.
+
+ This cache uses vector similarity to find semantically similar prompts that have been
+ previously sent to the LLM, allowing for cache hits even when prompts are not identical
+ but carry similar meaning.
+ """
+
+ DEFAULT_REDIS_INDEX_NAME: str = "litellm_semantic_cache_index"
+
def __init__(
self,
- host=None,
- port=None,
- password=None,
- redis_url=None,
- similarity_threshold=None,
- use_async=False,
- embedding_model="text-embedding-ada-002",
+ host: Optional[str] = None,
+ port: Optional[str] = None,
+ password: Optional[str] = None,
+ redis_url: Optional[str] = None,
+ similarity_threshold: Optional[float] = None,
+ embedding_model: str = "text-embedding-ada-002",
+ index_name: Optional[str] = None,
**kwargs,
):
- from redisvl.index import SearchIndex
-
- print_verbose(
- "redis semantic-cache initializing INDEX - litellm_semantic_cache_index"
- )
- if similarity_threshold is None:
- raise Exception("similarity_threshold must be provided, passed None")
- self.similarity_threshold = similarity_threshold
- self.embedding_model = embedding_model
- schema = {
- "index": {
- "name": "litellm_semantic_cache_index",
- "prefix": "litellm",
- "storage_type": "hash",
- },
- "fields": {
- "text": [{"name": "response"}],
- "vector": [
- {
- "name": "litellm_embedding",
- "dims": 1536,
- "distance_metric": "cosine",
- "algorithm": "flat",
- "datatype": "float32",
- }
- ],
- },
- }
- if redis_url is None:
- # if no url passed, check if host, port and password are passed, if not raise an Exception
- if host is None or port is None or password is None:
- # try checking env for host, port and password
- import os
-
- host = os.getenv("REDIS_HOST")
- port = os.getenv("REDIS_PORT")
- password = os.getenv("REDIS_PASSWORD")
- if host is None or port is None or password is None:
- raise Exception("Redis host, port, and password must be provided")
-
- redis_url = "redis://:" + password + "@" + host + ":" + port
- print_verbose(f"redis semantic-cache redis_url: {redis_url}")
- if use_async is False:
- self.index = SearchIndex.from_dict(schema)
- self.index.connect(redis_url=redis_url)
- try:
- self.index.create(overwrite=False) # don't overwrite existing index
- except Exception as e:
- print_verbose(f"Got exception creating semantic cache index: {str(e)}")
- elif use_async is True:
- schema["index"]["name"] = "litellm_semantic_cache_index_async"
- self.index = SearchIndex.from_dict(schema)
- self.index.connect(redis_url=redis_url, use_async=True)
-
- #
- def _get_cache_logic(self, cached_response: Any):
"""
- Common 'get_cache_logic' across sync + async redis client implementations
+ Initialize the Redis Semantic Cache.
+
+ Args:
+ host: Redis host address
+ port: Redis port
+ password: Redis password
+ redis_url: Full Redis URL (alternative to separate host/port/password)
+ similarity_threshold: Threshold for semantic similarity (0.0 to 1.0)
+ where 1.0 requires exact matches and 0.0 accepts any match
+ embedding_model: Model to use for generating embeddings
+ index_name: Name for the Redis index
+ ttl: Default time-to-live for cache entries in seconds
+ **kwargs: Additional arguments passed to the Redis client
+
+ Raises:
+ Exception: If similarity_threshold is not provided or required Redis
+ connection information is missing
+ """
+ from redisvl.extensions.llmcache import SemanticCache
+ from redisvl.utils.vectorize import CustomTextVectorizer
+
+ if index_name is None:
+ index_name = self.DEFAULT_REDIS_INDEX_NAME
+
+ print_verbose(f"Redis semantic-cache initializing index - {index_name}")
+
+ # Validate similarity threshold
+ if similarity_threshold is None:
+ raise ValueError("similarity_threshold must be provided, passed None")
+
+ # Store configuration
+ self.similarity_threshold = similarity_threshold
+
+ # Convert similarity threshold [0,1] to distance threshold [0,2]
+ # For cosine distance: 0 = most similar, 2 = least similar
+ # While similarity: 1 = most similar, 0 = least similar
+ self.distance_threshold = 1 - similarity_threshold
+ self.embedding_model = embedding_model
+
+ # Set up Redis connection
+ if redis_url is None:
+ try:
+ # Attempt to use provided parameters or fallback to environment variables
+ host = host or os.environ["REDIS_HOST"]
+ port = port or os.environ["REDIS_PORT"]
+ password = password or os.environ["REDIS_PASSWORD"]
+ except KeyError as e:
+ # Raise a more informative exception if any of the required keys are missing
+ missing_var = e.args[0]
+ raise ValueError(
+ f"Missing required Redis configuration: {missing_var}. "
+ f"Provide {missing_var} or redis_url."
+ ) from e
+
+ redis_url = f"redis://:{password}@{host}:{port}"
+
+ print_verbose(f"Redis semantic-cache redis_url: {redis_url}")
+
+ # Initialize the Redis vectorizer and cache
+ cache_vectorizer = CustomTextVectorizer(self._get_embedding)
+
+ self.llmcache = SemanticCache(
+ name=index_name,
+ redis_url=redis_url,
+ vectorizer=cache_vectorizer,
+ distance_threshold=self.distance_threshold,
+ overwrite=False,
+ )
+
+ def _get_ttl(self, **kwargs) -> Optional[int]:
+ """
+ Get the TTL (time-to-live) value for cache entries.
+
+ Args:
+ **kwargs: Keyword arguments that may contain a custom TTL
+
+ Returns:
+ Optional[int]: The TTL value in seconds, or None if no TTL should be applied
+ """
+ ttl = kwargs.get("ttl")
+ if ttl is not None:
+ ttl = int(ttl)
+ return ttl
+
+ def _get_embedding(self, prompt: str) -> List[float]:
+ """
+ Generate an embedding vector for the given prompt using the configured embedding model.
+
+ Args:
+ prompt: The text to generate an embedding for
+
+ Returns:
+ List[float]: The embedding vector
+ """
+ # Create an embedding from prompt
+ embedding_response = cast(
+ EmbeddingResponse,
+ litellm.embedding(
+ model=self.embedding_model,
+ input=prompt,
+ cache={"no-store": True, "no-cache": True},
+ ),
+ )
+ embedding = embedding_response["data"][0]["embedding"]
+ return embedding
+
+ def _get_cache_logic(self, cached_response: Any) -> Any:
+ """
+ Process the cached response to prepare it for use.
+
+ Args:
+ cached_response: The raw cached response
+
+ Returns:
+ The processed cache response, or None if input was None
"""
if cached_response is None:
return cached_response
- # check if cached_response is bytes
+ # Convert bytes to string if needed
if isinstance(cached_response, bytes):
cached_response = cached_response.decode("utf-8")
+ # Convert string representation to Python object
try:
- cached_response = json.loads(
- cached_response
- ) # Convert string to dictionary
- except Exception:
- cached_response = ast.literal_eval(cached_response)
- return cached_response
-
- def set_cache(self, key, value, **kwargs):
- import numpy as np
-
- print_verbose(f"redis semantic-cache set_cache, kwargs: {kwargs}")
-
- # get the prompt
- messages = kwargs["messages"]
- prompt = "".join(message["content"] for message in messages)
-
- # create an embedding for prompt
- embedding_response = litellm.embedding(
- model=self.embedding_model,
- input=prompt,
- cache={"no-store": True, "no-cache": True},
- )
-
- # get the embedding
- embedding = embedding_response["data"][0]["embedding"]
-
- # make the embedding a numpy array, convert to bytes
- embedding_bytes = np.array(embedding, dtype=np.float32).tobytes()
- value = str(value)
- assert isinstance(value, str)
-
- new_data = [
- {"response": value, "prompt": prompt, "litellm_embedding": embedding_bytes}
- ]
-
- # Add more data
- self.index.load(new_data)
-
- return
-
- def get_cache(self, key, **kwargs):
- print_verbose(f"sync redis semantic-cache get_cache, kwargs: {kwargs}")
- from redisvl.query import VectorQuery
-
- # query
- # get the messages
- messages = kwargs["messages"]
- prompt = "".join(message["content"] for message in messages)
-
- # convert to embedding
- embedding_response = litellm.embedding(
- model=self.embedding_model,
- input=prompt,
- cache={"no-store": True, "no-cache": True},
- )
-
- # get the embedding
- embedding = embedding_response["data"][0]["embedding"]
-
- query = VectorQuery(
- vector=embedding,
- vector_field_name="litellm_embedding",
- return_fields=["response", "prompt", "vector_distance"],
- num_results=1,
- )
-
- results = self.index.query(query)
- if results is None:
- return None
- if isinstance(results, list):
- if len(results) == 0:
+ cached_response = json.loads(cached_response)
+ except json.JSONDecodeError:
+ try:
+ cached_response = ast.literal_eval(cached_response)
+ except (ValueError, SyntaxError) as e:
+ print_verbose(f"Error parsing cached response: {str(e)}")
return None
- vector_distance = results[0]["vector_distance"]
- vector_distance = float(vector_distance)
- similarity = 1 - vector_distance
- cached_prompt = results[0]["prompt"]
+ return cached_response
- # check similarity, if more than self.similarity_threshold, return results
- print_verbose(
- f"semantic cache: similarity threshold: {self.similarity_threshold}, similarity: {similarity}, prompt: {prompt}, closest_cached_prompt: {cached_prompt}"
- )
- if similarity > self.similarity_threshold:
- # cache hit !
- cached_value = results[0]["response"]
+ def set_cache(self, key: str, value: Any, **kwargs) -> None:
+ """
+ Store a value in the semantic cache.
+
+ Args:
+ key: The cache key (not directly used in semantic caching)
+ value: The response value to cache
+ **kwargs: Additional arguments including 'messages' for the prompt
+ and optional 'ttl' for time-to-live
+ """
+ print_verbose(f"Redis semantic-cache set_cache, kwargs: {kwargs}")
+
+ value_str: Optional[str] = None
+ try:
+ # Extract the prompt from messages
+ messages = kwargs.get("messages", [])
+ if not messages:
+ print_verbose("No messages provided for semantic caching")
+ return
+
+ prompt = get_str_from_messages(messages)
+ value_str = str(value)
+
+ # Get TTL and store in Redis semantic cache
+ ttl = self._get_ttl(**kwargs)
+ if ttl is not None:
+ self.llmcache.store(prompt, value_str, ttl=int(ttl))
+ else:
+ self.llmcache.store(prompt, value_str)
+ except Exception as e:
print_verbose(
- f"got a cache hit, similarity: {similarity}, Current prompt: {prompt}, cached_prompt: {cached_prompt}"
+ f"Error setting {value_str or value} in the Redis semantic cache: {str(e)}"
)
- return self._get_cache_logic(cached_response=cached_value)
- else:
- # cache miss !
- return None
- pass
+ def get_cache(self, key: str, **kwargs) -> Any:
+ """
+ Retrieve a semantically similar cached response.
- async def async_set_cache(self, key, value, **kwargs):
- import numpy as np
+ Args:
+ key: The cache key (not directly used in semantic caching)
+ **kwargs: Additional arguments including 'messages' for the prompt
- from litellm.proxy.proxy_server import llm_model_list, llm_router
+ Returns:
+ The cached response if a semantically similar prompt is found, else None
+ """
+ print_verbose(f"Redis semantic-cache get_cache, kwargs: {kwargs}")
try:
- await self.index.acreate(overwrite=False) # don't overwrite existing index
+ # Extract the prompt from messages
+ messages = kwargs.get("messages", [])
+ if not messages:
+ print_verbose("No messages provided for semantic cache lookup")
+ return None
+
+ prompt = get_str_from_messages(messages)
+ # Check the cache for semantically similar prompts
+ results = self.llmcache.check(prompt=prompt)
+
+ # Return None if no similar prompts found
+ if not results:
+ return None
+
+ # Process the best matching result
+ cache_hit = results[0]
+ vector_distance = float(cache_hit["vector_distance"])
+
+ # Convert vector distance back to similarity score
+ # For cosine distance: 0 = most similar, 2 = least similar
+ # While similarity: 1 = most similar, 0 = least similar
+ similarity = 1 - vector_distance
+
+ cached_prompt = cache_hit["prompt"]
+ cached_response = cache_hit["response"]
+
+ print_verbose(
+ f"Cache hit: similarity threshold: {self.similarity_threshold}, "
+ f"actual similarity: {similarity}, "
+ f"current prompt: {prompt}, "
+ f"cached prompt: {cached_prompt}"
+ )
+
+ return self._get_cache_logic(cached_response=cached_response)
except Exception as e:
- print_verbose(f"Got exception creating semantic cache index: {str(e)}")
- print_verbose(f"async redis semantic-cache set_cache, kwargs: {kwargs}")
+ print_verbose(f"Error retrieving from Redis semantic cache: {str(e)}")
- # get the prompt
- messages = kwargs["messages"]
- prompt = "".join(message["content"] for message in messages)
- # create an embedding for prompt
- router_model_names = (
- [m["model_name"] for m in llm_model_list]
- if llm_model_list is not None
- else []
- )
- if llm_router is not None and self.embedding_model in router_model_names:
- user_api_key = kwargs.get("metadata", {}).get("user_api_key", "")
- embedding_response = await llm_router.aembedding(
- model=self.embedding_model,
- input=prompt,
- cache={"no-store": True, "no-cache": True},
- metadata={
- "user_api_key": user_api_key,
- "semantic-cache-embedding": True,
- "trace_id": kwargs.get("metadata", {}).get("trace_id", None),
- },
- )
- else:
- # convert to embedding
- embedding_response = await litellm.aembedding(
- model=self.embedding_model,
- input=prompt,
- cache={"no-store": True, "no-cache": True},
- )
+ async def _get_async_embedding(self, prompt: str, **kwargs) -> List[float]:
+ """
+ Asynchronously generate an embedding for the given prompt.
- # get the embedding
- embedding = embedding_response["data"][0]["embedding"]
-
- # make the embedding a numpy array, convert to bytes
- embedding_bytes = np.array(embedding, dtype=np.float32).tobytes()
- value = str(value)
- assert isinstance(value, str)
-
- new_data = [
- {"response": value, "prompt": prompt, "litellm_embedding": embedding_bytes}
- ]
-
- # Add more data
- await self.index.aload(new_data)
- return
-
- async def async_get_cache(self, key, **kwargs):
- print_verbose(f"async redis semantic-cache get_cache, kwargs: {kwargs}")
- from redisvl.query import VectorQuery
+ Args:
+ prompt: The text to generate an embedding for
+ **kwargs: Additional arguments that may contain metadata
+ Returns:
+ List[float]: The embedding vector
+ """
from litellm.proxy.proxy_server import llm_model_list, llm_router
- # query
- # get the messages
- messages = kwargs["messages"]
- prompt = "".join(message["content"] for message in messages)
-
+ # Route the embedding request through the proxy if appropriate
router_model_names = (
[m["model_name"] for m in llm_model_list]
if llm_model_list is not None
else []
)
- if llm_router is not None and self.embedding_model in router_model_names:
- user_api_key = kwargs.get("metadata", {}).get("user_api_key", "")
- embedding_response = await llm_router.aembedding(
- model=self.embedding_model,
- input=prompt,
- cache={"no-store": True, "no-cache": True},
- metadata={
- "user_api_key": user_api_key,
- "semantic-cache-embedding": True,
- "trace_id": kwargs.get("metadata", {}).get("trace_id", None),
- },
- )
- else:
- # convert to embedding
- embedding_response = await litellm.aembedding(
- model=self.embedding_model,
- input=prompt,
- cache={"no-store": True, "no-cache": True},
- )
- # get the embedding
- embedding = embedding_response["data"][0]["embedding"]
+ try:
+ if llm_router is not None and self.embedding_model in router_model_names:
+ # Use the router for embedding generation
+ user_api_key = kwargs.get("metadata", {}).get("user_api_key", "")
+ embedding_response = await llm_router.aembedding(
+ model=self.embedding_model,
+ input=prompt,
+ cache={"no-store": True, "no-cache": True},
+ metadata={
+ "user_api_key": user_api_key,
+ "semantic-cache-embedding": True,
+ "trace_id": kwargs.get("metadata", {}).get("trace_id", None),
+ },
+ )
+ else:
+ # Generate embedding directly
+ embedding_response = await litellm.aembedding(
+ model=self.embedding_model,
+ input=prompt,
+ cache={"no-store": True, "no-cache": True},
+ )
- query = VectorQuery(
- vector=embedding,
- vector_field_name="litellm_embedding",
- return_fields=["response", "prompt", "vector_distance"],
- )
- results = await self.index.aquery(query)
- if results is None:
- kwargs.setdefault("metadata", {})["semantic-similarity"] = 0.0
- return None
- if isinstance(results, list):
- if len(results) == 0:
+ # Extract and return the embedding vector
+ return embedding_response["data"][0]["embedding"]
+ except Exception as e:
+ print_verbose(f"Error generating async embedding: {str(e)}")
+ raise ValueError(f"Failed to generate embedding: {str(e)}") from e
+
+ async def async_set_cache(self, key: str, value: Any, **kwargs) -> None:
+ """
+ Asynchronously store a value in the semantic cache.
+
+ Args:
+ key: The cache key (not directly used in semantic caching)
+ value: The response value to cache
+ **kwargs: Additional arguments including 'messages' for the prompt
+ and optional 'ttl' for time-to-live
+ """
+ print_verbose(f"Async Redis semantic-cache set_cache, kwargs: {kwargs}")
+
+ try:
+ # Extract the prompt from messages
+ messages = kwargs.get("messages", [])
+ if not messages:
+ print_verbose("No messages provided for semantic caching")
+ return
+
+ prompt = get_str_from_messages(messages)
+ value_str = str(value)
+
+ # Generate embedding for the value (response) to cache
+ prompt_embedding = await self._get_async_embedding(prompt, **kwargs)
+
+ # Get TTL and store in Redis semantic cache
+ ttl = self._get_ttl(**kwargs)
+ if ttl is not None:
+ await self.llmcache.astore(
+ prompt,
+ value_str,
+ vector=prompt_embedding, # Pass through custom embedding
+ ttl=ttl,
+ )
+ else:
+ await self.llmcache.astore(
+ prompt,
+ value_str,
+ vector=prompt_embedding, # Pass through custom embedding
+ )
+ except Exception as e:
+ print_verbose(f"Error in async_set_cache: {str(e)}")
+
+ async def async_get_cache(self, key: str, **kwargs) -> Any:
+ """
+ Asynchronously retrieve a semantically similar cached response.
+
+ Args:
+ key: The cache key (not directly used in semantic caching)
+ **kwargs: Additional arguments including 'messages' for the prompt
+
+ Returns:
+ The cached response if a semantically similar prompt is found, else None
+ """
+ print_verbose(f"Async Redis semantic-cache get_cache, kwargs: {kwargs}")
+
+ try:
+ # Extract the prompt from messages
+ messages = kwargs.get("messages", [])
+ if not messages:
+ print_verbose("No messages provided for semantic cache lookup")
kwargs.setdefault("metadata", {})["semantic-similarity"] = 0.0
return None
- vector_distance = results[0]["vector_distance"]
- vector_distance = float(vector_distance)
- similarity = 1 - vector_distance
- cached_prompt = results[0]["prompt"]
+ prompt = get_str_from_messages(messages)
- # check similarity, if more than self.similarity_threshold, return results
- print_verbose(
- f"semantic cache: similarity threshold: {self.similarity_threshold}, similarity: {similarity}, prompt: {prompt}, closest_cached_prompt: {cached_prompt}"
- )
+ # Generate embedding for the prompt
+ prompt_embedding = await self._get_async_embedding(prompt, **kwargs)
- # update kwargs["metadata"] with similarity, don't rewrite the original metadata
- kwargs.setdefault("metadata", {})["semantic-similarity"] = similarity
+ # Check the cache for semantically similar prompts
+ results = await self.llmcache.acheck(prompt=prompt, vector=prompt_embedding)
+
+ # handle results / cache hit
+ if not results:
+ kwargs.setdefault("metadata", {})[
+ "semantic-similarity"
+ ] = 0.0 # TODO why here but not above??
+ return None
+
+ cache_hit = results[0]
+ vector_distance = float(cache_hit["vector_distance"])
+
+ # Convert vector distance back to similarity
+ # For cosine distance: 0 = most similar, 2 = least similar
+ # While similarity: 1 = most similar, 0 = least similar
+ similarity = 1 - vector_distance
+
+ cached_prompt = cache_hit["prompt"]
+ cached_response = cache_hit["response"]
+
+ # update kwargs["metadata"] with similarity, don't rewrite the original metadata
+ kwargs.setdefault("metadata", {})["semantic-similarity"] = similarity
- if similarity > self.similarity_threshold:
- # cache hit !
- cached_value = results[0]["response"]
print_verbose(
- f"got a cache hit, similarity: {similarity}, Current prompt: {prompt}, cached_prompt: {cached_prompt}"
+ f"Cache hit: similarity threshold: {self.similarity_threshold}, "
+ f"actual similarity: {similarity}, "
+ f"current prompt: {prompt}, "
+ f"cached prompt: {cached_prompt}"
)
- return self._get_cache_logic(cached_response=cached_value)
- else:
- # cache miss !
- return None
- pass
- async def _index_info(self):
- return await self.index.ainfo()
+ return self._get_cache_logic(cached_response=cached_response)
+ except Exception as e:
+ print_verbose(f"Error in async_get_cache: {str(e)}")
+ kwargs.setdefault("metadata", {})["semantic-similarity"] = 0.0
- async def async_set_cache_pipeline(self, cache_list, **kwargs):
- tasks = []
- for val in cache_list:
- tasks.append(self.async_set_cache(val[0], val[1], **kwargs))
- await asyncio.gather(*tasks)
+ async def _index_info(self) -> Dict[str, Any]:
+ """
+ Get information about the Redis index.
+
+ Returns:
+ Dict[str, Any]: Information about the Redis index
+ """
+ aindex = await self.llmcache._get_async_index()
+ return await aindex.info()
+
+ async def async_set_cache_pipeline(
+ self, cache_list: List[Tuple[str, Any]], **kwargs
+ ) -> None:
+ """
+ Asynchronously store multiple values in the semantic cache.
+
+ Args:
+ cache_list: List of (key, value) tuples to cache
+ **kwargs: Additional arguments
+ """
+ try:
+ tasks = []
+ for val in cache_list:
+ tasks.append(self.async_set_cache(val[0], val[1], **kwargs))
+ await asyncio.gather(*tasks)
+ except Exception as e:
+ print_verbose(f"Error in async_set_cache_pipeline: {str(e)}")
diff --git a/litellm/caching/s3_cache.py b/litellm/caching/s3_cache.py
index 301591c64f..c02e109136 100644
--- a/litellm/caching/s3_cache.py
+++ b/litellm/caching/s3_cache.py
@@ -123,7 +123,7 @@ class S3Cache(BaseCache):
) # Convert string to dictionary
except Exception:
cached_response = ast.literal_eval(cached_response)
- if type(cached_response) is not dict:
+ if not isinstance(cached_response, dict):
cached_response = dict(cached_response)
verbose_logger.debug(
f"Got S3 Cache: key: {key}, cached_response {cached_response}. Type Response {type(cached_response)}"
diff --git a/litellm/constants.py b/litellm/constants.py
index 17eece363b..8f50ac63da 100644
--- a/litellm/constants.py
+++ b/litellm/constants.py
@@ -4,21 +4,98 @@ ROUTER_MAX_FALLBACKS = 5
DEFAULT_BATCH_SIZE = 512
DEFAULT_FLUSH_INTERVAL_SECONDS = 5
DEFAULT_MAX_RETRIES = 2
+DEFAULT_MAX_RECURSE_DEPTH = 10
DEFAULT_FAILURE_THRESHOLD_PERCENT = (
0.5 # default cooldown a deployment if 50% of requests fail in a given minute
)
+DEFAULT_MAX_TOKENS = 4096
+DEFAULT_ALLOWED_FAILS = 3
+DEFAULT_REDIS_SYNC_INTERVAL = 1
DEFAULT_COOLDOWN_TIME_SECONDS = 5
DEFAULT_REPLICATE_POLLING_RETRIES = 5
DEFAULT_REPLICATE_POLLING_DELAY_SECONDS = 1
DEFAULT_IMAGE_TOKEN_COUNT = 250
DEFAULT_IMAGE_WIDTH = 300
DEFAULT_IMAGE_HEIGHT = 300
+DEFAULT_MAX_TOKENS = 256 # used when providers need a default
+MAX_SIZE_PER_ITEM_IN_MEMORY_CACHE_IN_KB = 1024 # 1MB = 1024KB
SINGLE_DEPLOYMENT_TRAFFIC_FAILURE_THRESHOLD = 1000 # Minimum number of requests to consider "reasonable traffic". Used for single-deployment cooldown logic.
+
+DEFAULT_REASONING_EFFORT_LOW_THINKING_BUDGET = 1024
+DEFAULT_REASONING_EFFORT_MEDIUM_THINKING_BUDGET = 2048
+DEFAULT_REASONING_EFFORT_HIGH_THINKING_BUDGET = 4096
+
+########## Networking constants ##############################################################
+_DEFAULT_TTL_FOR_HTTPX_CLIENTS = 3600 # 1 hour, re-use the same httpx client for 1 hour
+
+########### v2 Architecture constants for managing writing updates to the database ###########
+REDIS_UPDATE_BUFFER_KEY = "litellm_spend_update_buffer"
+REDIS_DAILY_SPEND_UPDATE_BUFFER_KEY = "litellm_daily_spend_update_buffer"
+REDIS_DAILY_TEAM_SPEND_UPDATE_BUFFER_KEY = "litellm_daily_team_spend_update_buffer"
+REDIS_DAILY_TAG_SPEND_UPDATE_BUFFER_KEY = "litellm_daily_tag_spend_update_buffer"
+MAX_REDIS_BUFFER_DEQUEUE_COUNT = 100
+MAX_SIZE_IN_MEMORY_QUEUE = 10000
+MAX_IN_MEMORY_QUEUE_FLUSH_COUNT = 1000
+###############################################################################################
+MINIMUM_PROMPT_CACHE_TOKEN_COUNT = (
+ 1024 # minimum number of tokens to cache a prompt by Anthropic
+)
+DEFAULT_TRIM_RATIO = 0.75 # default ratio of tokens to trim from the end of a prompt
+HOURS_IN_A_DAY = 24
+DAYS_IN_A_WEEK = 7
+DAYS_IN_A_MONTH = 28
+DAYS_IN_A_YEAR = 365
+REPLICATE_MODEL_NAME_WITH_ID_LENGTH = 64
+#### TOKEN COUNTING ####
+FUNCTION_DEFINITION_TOKEN_COUNT = 9
+SYSTEM_MESSAGE_TOKEN_COUNT = 4
+TOOL_CHOICE_OBJECT_TOKEN_COUNT = 4
+DEFAULT_MOCK_RESPONSE_PROMPT_TOKEN_COUNT = 10
+DEFAULT_MOCK_RESPONSE_COMPLETION_TOKEN_COUNT = 20
+MAX_SHORT_SIDE_FOR_IMAGE_HIGH_RES = 768
+MAX_LONG_SIDE_FOR_IMAGE_HIGH_RES = 2000
+MAX_TILE_WIDTH = 512
+MAX_TILE_HEIGHT = 512
+OPENAI_FILE_SEARCH_COST_PER_1K_CALLS = 2.5 / 1000
+MIN_NON_ZERO_TEMPERATURE = 0.0001
#### RELIABILITY ####
REPEATED_STREAMING_CHUNK_LIMIT = 100 # catch if model starts looping the same chunk while streaming. Uses high default to prevent false positives.
+DEFAULT_MAX_LRU_CACHE_SIZE = 16
+INITIAL_RETRY_DELAY = 0.5
+MAX_RETRY_DELAY = 8.0
+JITTER = 0.75
+DEFAULT_IN_MEMORY_TTL = 5 # default time to live for the in-memory cache
+DEFAULT_POLLING_INTERVAL = 0.03 # default polling interval for the scheduler
+AZURE_OPERATION_POLLING_TIMEOUT = 120
+REDIS_SOCKET_TIMEOUT = 0.1
+REDIS_CONNECTION_POOL_TIMEOUT = 5
+NON_LLM_CONNECTION_TIMEOUT = 15 # timeout for adjacent services (e.g. jwt auth)
+MAX_EXCEPTION_MESSAGE_LENGTH = 2000
+BEDROCK_MAX_POLICY_SIZE = 75
+REPLICATE_POLLING_DELAY_SECONDS = 0.5
+DEFAULT_ANTHROPIC_CHAT_MAX_TOKENS = 4096
+TOGETHER_AI_4_B = 4
+TOGETHER_AI_8_B = 8
+TOGETHER_AI_21_B = 21
+TOGETHER_AI_41_B = 41
+TOGETHER_AI_80_B = 80
+TOGETHER_AI_110_B = 110
+TOGETHER_AI_EMBEDDING_150_M = 150
+TOGETHER_AI_EMBEDDING_350_M = 350
+QDRANT_SCALAR_QUANTILE = 0.99
+QDRANT_VECTOR_SIZE = 1536
+CACHED_STREAMING_CHUNK_DELAY = 0.02
+MAX_SIZE_PER_ITEM_IN_MEMORY_CACHE_IN_KB = 512
+DEFAULT_MAX_TOKENS_FOR_TRITON = 2000
#### Networking settings ####
request_timeout: float = 6000 # time in seconds
STREAM_SSE_DONE_STRING: str = "[DONE]"
+### SPEND TRACKING ###
+DEFAULT_REPLICATE_GPU_PRICE_PER_SECOND = 0.001400 # price per second for a100 80GB
+FIREWORKS_AI_56_B_MOE = 56
+FIREWORKS_AI_176_B_MOE = 176
+FIREWORKS_AI_16_B = 16
+FIREWORKS_AI_80_B = 80
LITELLM_CHAT_PROVIDERS = [
"openai",
@@ -416,12 +493,17 @@ RESPONSE_FORMAT_TOOL_NAME = "json_tool_call" # default tool name used when conv
########################### Logging Callback Constants ###########################
AZURE_STORAGE_MSFT_VERSION = "2019-07-07"
+PROMETHEUS_BUDGET_METRICS_REFRESH_INTERVAL_MINUTES = 5
+MCP_TOOL_NAME_PREFIX = "mcp_tool"
########################### LiteLLM Proxy Specific Constants ###########################
########################################################################################
MAX_SPENDLOG_ROWS_TO_QUERY = (
1_000_000 # if spendLogs has more than 1M rows, do not query the DB
)
+DEFAULT_SOFT_BUDGET = (
+ 50.0 # by default all litellm proxy keys have a soft budget of 50.0
+)
# makes it clear this is a rate limit error for a litellm virtual key
RATE_LIMIT_ERROR_MESSAGE_FOR_VIRTUAL_KEY = "LiteLLM Virtual Key user_api_key_hash"
@@ -443,3 +525,19 @@ HEALTH_CHECK_TIMEOUT_SECONDS = 60 # 60 seconds
UI_SESSION_TOKEN_TEAM_ID = "litellm-dashboard"
LITELLM_PROXY_ADMIN_NAME = "default_user_id"
+
+########################### DB CRON JOB NAMES ###########################
+DB_SPEND_UPDATE_JOB_NAME = "db_spend_update_job"
+PROMETHEUS_EMIT_BUDGET_METRICS_JOB_NAME = "prometheus_emit_budget_metrics_job"
+DEFAULT_CRON_JOB_LOCK_TTL_SECONDS = 60 # 1 minute
+PROXY_BUDGET_RESCHEDULER_MIN_TIME = 597
+PROXY_BUDGET_RESCHEDULER_MAX_TIME = 605
+PROXY_BATCH_WRITE_AT = 10 # in seconds
+DEFAULT_HEALTH_CHECK_INTERVAL = 300 # 5 minutes
+PROMETHEUS_FALLBACK_STATS_SEND_TIME_HOURS = 9
+DEFAULT_MODEL_CREATED_AT_TIME = 1677610602 # returns on `/models` endpoint
+DEFAULT_SLACK_ALERTING_THRESHOLD = 300
+MAX_TEAM_LIST_LIMIT = 20
+DEFAULT_PROMPT_INJECTION_SIMILARITY_THRESHOLD = 0.7
+LENGTH_OF_LITELLM_GENERATED_KEY = 16
+SECRET_MANAGER_REFRESH_INTERVAL = 86400
diff --git a/litellm/cost_calculator.py b/litellm/cost_calculator.py
index 58600ea14f..7f3d4fcc9f 100644
--- a/litellm/cost_calculator.py
+++ b/litellm/cost_calculator.py
@@ -2,14 +2,24 @@
## File for 'response_cost' calculation in Logging
import time
from functools import lru_cache
-from typing import Any, List, Literal, Optional, Tuple, Union
+from typing import Any, List, Literal, Optional, Tuple, Union, cast
from pydantic import BaseModel
import litellm
import litellm._logging
from litellm import verbose_logger
-from litellm.litellm_core_utils.llm_cost_calc.utils import _generic_cost_per_character
+from litellm.constants import (
+ DEFAULT_MAX_LRU_CACHE_SIZE,
+ DEFAULT_REPLICATE_GPU_PRICE_PER_SECOND,
+)
+from litellm.litellm_core_utils.llm_cost_calc.tool_call_cost_tracking import (
+ StandardBuiltInToolCostTracking,
+)
+from litellm.litellm_core_utils.llm_cost_calc.utils import (
+ _generic_cost_per_character,
+ generic_cost_per_token,
+)
from litellm.llms.anthropic.cost_calculation import (
cost_per_token as anthropic_cost_per_token,
)
@@ -47,16 +57,21 @@ from litellm.llms.vertex_ai.image_generation.cost_calculator import (
from litellm.responses.utils import ResponseAPILoggingUtils
from litellm.types.llms.openai import (
HttpxBinaryResponseContent,
+ OpenAIRealtimeStreamList,
+ OpenAIRealtimeStreamResponseBaseObject,
+ OpenAIRealtimeStreamSessionEvents,
ResponseAPIUsage,
ResponsesAPIResponse,
)
from litellm.types.rerank import RerankBilledUnits, RerankResponse
from litellm.types.utils import (
CallTypesLiteral,
+ LiteLLMRealtimeStreamLoggingObject,
LlmProviders,
LlmProvidersSet,
ModelInfo,
PassthroughCallTypes,
+ StandardBuiltInToolsParams,
Usage,
)
from litellm.utils import (
@@ -271,15 +286,13 @@ def cost_per_token( # noqa: PLR0915
custom_llm_provider=custom_llm_provider,
prompt_characters=prompt_characters,
completion_characters=completion_characters,
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
+ usage=usage_block,
)
elif cost_router == "cost_per_token":
return google_cost_per_token(
model=model_without_prefix,
custom_llm_provider=custom_llm_provider,
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
+ usage=usage_block,
)
elif custom_llm_provider == "anthropic":
return anthropic_cost_per_token(model=model, usage=usage_block)
@@ -353,9 +366,7 @@ def cost_per_token( # noqa: PLR0915
def get_replicate_completion_pricing(completion_response: dict, total_time=0.0):
# see https://replicate.com/pricing
# for all litellm currently supported LLMs, almost all requests go to a100_80gb
- a100_80gb_price_per_second_public = (
- 0.001400 # assume all calls sent to A100 80GB for now
- )
+ a100_80gb_price_per_second_public = DEFAULT_REPLICATE_GPU_PRICE_PER_SECOND # assume all calls sent to A100 80GB for now
if total_time == 0.0: # total time is in ms
start_time = completion_response.get("created", time.time())
end_time = getattr(completion_response, "ended", time.time())
@@ -393,6 +404,7 @@ def _select_model_name_for_cost_calc(
base_model: Optional[str] = None,
custom_pricing: Optional[bool] = None,
custom_llm_provider: Optional[str] = None,
+ router_model_id: Optional[str] = None,
) -> Optional[str]:
"""
1. If custom pricing is true, return received model name
@@ -407,12 +419,6 @@ def _select_model_name_for_cost_calc(
model=model, custom_llm_provider=custom_llm_provider
)
- if custom_pricing is True:
- return_model = model
-
- if base_model is not None:
- return_model = base_model
-
completion_response_model: Optional[str] = None
if completion_response is not None:
if isinstance(completion_response, BaseModel):
@@ -420,6 +426,16 @@ def _select_model_name_for_cost_calc(
elif isinstance(completion_response, dict):
completion_response_model = completion_response.get("model", None)
hidden_params: Optional[dict] = getattr(completion_response, "_hidden_params", None)
+
+ if custom_pricing is True:
+ if router_model_id is not None and router_model_id in litellm.model_cost:
+ return_model = router_model_id
+ else:
+ return_model = model
+
+ if base_model is not None:
+ return_model = base_model
+
if completion_response_model is None and hidden_params is not None:
if (
hidden_params.get("model", None) is not None
@@ -448,7 +464,7 @@ def _select_model_name_for_cost_calc(
return return_model
-@lru_cache(maxsize=16)
+@lru_cache(maxsize=DEFAULT_MAX_LRU_CACHE_SIZE)
def _model_contains_known_llm_provider(model: str) -> bool:
"""
Check if the model contains a known llm provider
@@ -460,13 +476,36 @@ def _model_contains_known_llm_provider(model: str) -> bool:
def _get_usage_object(
completion_response: Any,
) -> Optional[Usage]:
- usage_obj: Optional[Usage] = None
- if completion_response is not None and isinstance(
- completion_response, ModelResponse
- ):
- usage_obj = completion_response.get("usage")
+ usage_obj = cast(
+ Union[Usage, ResponseAPIUsage, dict, BaseModel],
+ (
+ completion_response.get("usage")
+ if isinstance(completion_response, dict)
+ else getattr(completion_response, "get", lambda x: None)("usage")
+ ),
+ )
- return usage_obj
+ if usage_obj is None:
+ return None
+ if isinstance(usage_obj, Usage):
+ return usage_obj
+ elif (
+ usage_obj is not None
+ and (isinstance(usage_obj, dict) or isinstance(usage_obj, ResponseAPIUsage))
+ and ResponseAPILoggingUtils._is_response_api_usage(usage_obj)
+ ):
+ return ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage(
+ usage_obj
+ )
+ elif isinstance(usage_obj, dict):
+ return Usage(**usage_obj)
+ elif isinstance(usage_obj, BaseModel):
+ return Usage(**usage_obj.model_dump())
+ else:
+ verbose_logger.debug(
+ f"Unknown usage object type: {type(usage_obj)}, usage_obj: {usage_obj}"
+ )
+ return None
def _is_known_usage_objects(usage_obj):
@@ -524,6 +563,9 @@ def completion_cost( # noqa: PLR0915
optional_params: Optional[dict] = None,
custom_pricing: Optional[bool] = None,
base_model: Optional[str] = None,
+ standard_built_in_tools_params: Optional[StandardBuiltInToolsParams] = None,
+ litellm_model_name: Optional[str] = None,
+ router_model_id: Optional[str] = None,
) -> float:
"""
Calculate the cost of a given completion call fot GPT-3.5-turbo, llama2, any litellm supported llm.
@@ -554,7 +596,6 @@ def completion_cost( # noqa: PLR0915
- For un-mapped Replicate models, the cost is calculated based on the total time used for the request.
"""
try:
-
call_type = _infer_call_type(call_type, completion_response) or "completion"
if (
@@ -577,237 +618,320 @@ def completion_cost( # noqa: PLR0915
completion_response=completion_response
)
rerank_billed_units: Optional[RerankBilledUnits] = None
- model = _select_model_name_for_cost_calc(
+
+ selected_model = _select_model_name_for_cost_calc(
model=model,
completion_response=completion_response,
custom_llm_provider=custom_llm_provider,
custom_pricing=custom_pricing,
base_model=base_model,
+ router_model_id=router_model_id,
)
- verbose_logger.info(f"selected model name for cost calculation: {model}")
-
- if completion_response is not None and (
- isinstance(completion_response, BaseModel)
- or isinstance(completion_response, dict)
- ): # tts returns a custom class
- if isinstance(completion_response, dict):
- usage_obj: Optional[Union[dict, Usage]] = completion_response.get(
- "usage", {}
- )
- else:
- usage_obj = getattr(completion_response, "usage", {})
- if isinstance(usage_obj, BaseModel) and not _is_known_usage_objects(
- usage_obj=usage_obj
- ):
- setattr(
- completion_response,
- "usage",
- litellm.Usage(**usage_obj.model_dump()),
- )
- if usage_obj is None:
- _usage = {}
- elif isinstance(usage_obj, BaseModel):
- _usage = usage_obj.model_dump()
- else:
- _usage = usage_obj
-
- if ResponseAPILoggingUtils._is_response_api_usage(_usage):
- _usage = (
- ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage(
- _usage
- ).model_dump()
- )
-
- # get input/output tokens from completion_response
- prompt_tokens = _usage.get("prompt_tokens", 0)
- completion_tokens = _usage.get("completion_tokens", 0)
- cache_creation_input_tokens = _usage.get("cache_creation_input_tokens", 0)
- cache_read_input_tokens = _usage.get("cache_read_input_tokens", 0)
- if (
- "prompt_tokens_details" in _usage
- and _usage["prompt_tokens_details"] != {}
- and _usage["prompt_tokens_details"]
- ):
- prompt_tokens_details = _usage.get("prompt_tokens_details", {})
- cache_read_input_tokens = prompt_tokens_details.get("cached_tokens", 0)
-
- total_time = getattr(completion_response, "_response_ms", 0)
-
- hidden_params = getattr(completion_response, "_hidden_params", None)
- if hidden_params is not None:
- custom_llm_provider = hidden_params.get(
- "custom_llm_provider", custom_llm_provider or None
- )
- region_name = hidden_params.get("region_name", region_name)
- size = hidden_params.get("optional_params", {}).get(
- "size", "1024-x-1024"
- ) # openai default
- quality = hidden_params.get("optional_params", {}).get(
- "quality", "standard"
- ) # openai default
- n = hidden_params.get("optional_params", {}).get(
- "n", 1
- ) # openai default
- else:
- if model is None:
- raise ValueError(
- f"Model is None and does not exist in passed completion_response. Passed completion_response={completion_response}, model={model}"
- )
- if len(messages) > 0:
- prompt_tokens = token_counter(model=model, messages=messages)
- elif len(prompt) > 0:
- prompt_tokens = token_counter(model=model, text=prompt)
- completion_tokens = token_counter(model=model, text=completion)
- if model is None:
- raise ValueError(
- f"Model is None and does not exist in passed completion_response. Passed completion_response={completion_response}, model={model}"
- )
- if custom_llm_provider is None:
+ potential_model_names = [selected_model]
+ if model is not None:
+ potential_model_names.append(model)
+ for idx, model in enumerate(potential_model_names):
try:
- model, custom_llm_provider, _, _ = litellm.get_llm_provider(
- model=model
- ) # strip the llm provider from the model name -> for image gen cost calculation
+ verbose_logger.info(
+ f"selected model name for cost calculation: {model}"
+ )
+
+ if completion_response is not None and (
+ isinstance(completion_response, BaseModel)
+ or isinstance(completion_response, dict)
+ ): # tts returns a custom class
+ if isinstance(completion_response, dict):
+ usage_obj: Optional[
+ Union[dict, Usage]
+ ] = completion_response.get("usage", {})
+ else:
+ usage_obj = getattr(completion_response, "usage", {})
+ if isinstance(usage_obj, BaseModel) and not _is_known_usage_objects(
+ usage_obj=usage_obj
+ ):
+ setattr(
+ completion_response,
+ "usage",
+ litellm.Usage(**usage_obj.model_dump()),
+ )
+ if usage_obj is None:
+ _usage = {}
+ elif isinstance(usage_obj, BaseModel):
+ _usage = usage_obj.model_dump()
+ else:
+ _usage = usage_obj
+
+ if ResponseAPILoggingUtils._is_response_api_usage(_usage):
+ _usage = ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage(
+ _usage
+ ).model_dump()
+
+ # get input/output tokens from completion_response
+ prompt_tokens = _usage.get("prompt_tokens", 0)
+ completion_tokens = _usage.get("completion_tokens", 0)
+ cache_creation_input_tokens = _usage.get(
+ "cache_creation_input_tokens", 0
+ )
+ cache_read_input_tokens = _usage.get("cache_read_input_tokens", 0)
+ if (
+ "prompt_tokens_details" in _usage
+ and _usage["prompt_tokens_details"] != {}
+ and _usage["prompt_tokens_details"]
+ ):
+ prompt_tokens_details = _usage.get("prompt_tokens_details", {})
+ cache_read_input_tokens = prompt_tokens_details.get(
+ "cached_tokens", 0
+ )
+
+ total_time = getattr(completion_response, "_response_ms", 0)
+
+ hidden_params = getattr(completion_response, "_hidden_params", None)
+ if hidden_params is not None:
+ custom_llm_provider = hidden_params.get(
+ "custom_llm_provider", custom_llm_provider or None
+ )
+ region_name = hidden_params.get("region_name", region_name)
+ size = hidden_params.get("optional_params", {}).get(
+ "size", "1024-x-1024"
+ ) # openai default
+ quality = hidden_params.get("optional_params", {}).get(
+ "quality", "standard"
+ ) # openai default
+ n = hidden_params.get("optional_params", {}).get(
+ "n", 1
+ ) # openai default
+ else:
+ if model is None:
+ raise ValueError(
+ f"Model is None and does not exist in passed completion_response. Passed completion_response={completion_response}, model={model}"
+ )
+ if len(messages) > 0:
+ prompt_tokens = token_counter(model=model, messages=messages)
+ elif len(prompt) > 0:
+ prompt_tokens = token_counter(model=model, text=prompt)
+ completion_tokens = token_counter(model=model, text=completion)
+
+ if model is None:
+ raise ValueError(
+ f"Model is None and does not exist in passed completion_response. Passed completion_response={completion_response}, model={model}"
+ )
+ if custom_llm_provider is None:
+ try:
+ model, custom_llm_provider, _, _ = litellm.get_llm_provider(
+ model=model
+ ) # strip the llm provider from the model name -> for image gen cost calculation
+ except Exception as e:
+ verbose_logger.debug(
+ "litellm.cost_calculator.py::completion_cost() - Error inferring custom_llm_provider - {}".format(
+ str(e)
+ )
+ )
+ if (
+ call_type == CallTypes.image_generation.value
+ or call_type == CallTypes.aimage_generation.value
+ or call_type
+ == PassthroughCallTypes.passthrough_image_generation.value
+ ):
+ ### IMAGE GENERATION COST CALCULATION ###
+ if custom_llm_provider == "vertex_ai":
+ if isinstance(completion_response, ImageResponse):
+ return vertex_ai_image_cost_calculator(
+ model=model,
+ image_response=completion_response,
+ )
+ elif custom_llm_provider == "bedrock":
+ if isinstance(completion_response, ImageResponse):
+ return bedrock_image_cost_calculator(
+ model=model,
+ size=size,
+ image_response=completion_response,
+ optional_params=optional_params,
+ )
+ raise TypeError(
+ "completion_response must be of type ImageResponse for bedrock image cost calculation"
+ )
+ else:
+ return default_image_cost_calculator(
+ model=model,
+ quality=quality,
+ custom_llm_provider=custom_llm_provider,
+ n=n,
+ size=size,
+ optional_params=optional_params,
+ )
+ elif (
+ call_type == CallTypes.speech.value
+ or call_type == CallTypes.aspeech.value
+ ):
+ prompt_characters = litellm.utils._count_characters(text=prompt)
+ elif (
+ call_type == CallTypes.atranscription.value
+ or call_type == CallTypes.transcription.value
+ ):
+ audio_transcription_file_duration = getattr(
+ completion_response, "duration", 0.0
+ )
+ elif (
+ call_type == CallTypes.rerank.value
+ or call_type == CallTypes.arerank.value
+ ):
+ if completion_response is not None and isinstance(
+ completion_response, RerankResponse
+ ):
+ meta_obj = completion_response.meta
+ if meta_obj is not None:
+ billed_units = meta_obj.get("billed_units", {}) or {}
+ else:
+ billed_units = {}
+
+ rerank_billed_units = RerankBilledUnits(
+ search_units=billed_units.get("search_units"),
+ total_tokens=billed_units.get("total_tokens"),
+ )
+
+ search_units = (
+ billed_units.get("search_units") or 1
+ ) # cohere charges per request by default.
+ completion_tokens = search_units
+ elif call_type == CallTypes.arealtime.value and isinstance(
+ completion_response, LiteLLMRealtimeStreamLoggingObject
+ ):
+ if (
+ cost_per_token_usage_object is None
+ or custom_llm_provider is None
+ ):
+ raise ValueError(
+ "usage object and custom_llm_provider must be provided for realtime stream cost calculation. Got cost_per_token_usage_object={}, custom_llm_provider={}".format(
+ cost_per_token_usage_object,
+ custom_llm_provider,
+ )
+ )
+ return handle_realtime_stream_cost_calculation(
+ results=completion_response.results,
+ combined_usage_object=cost_per_token_usage_object,
+ custom_llm_provider=custom_llm_provider,
+ litellm_model_name=model,
+ )
+ # Calculate cost based on prompt_tokens, completion_tokens
+ if (
+ "togethercomputer" in model
+ or "together_ai" in model
+ or custom_llm_provider == "together_ai"
+ ):
+ # together ai prices based on size of llm
+ # get_model_params_and_category takes a model name and returns the category of LLM size it is in model_prices_and_context_window.json
+
+ model = get_model_params_and_category(
+ model, call_type=CallTypes(call_type)
+ )
+
+ # replicate llms are calculate based on time for request running
+ # see https://replicate.com/pricing
+ elif (
+ model in litellm.replicate_models or "replicate" in model
+ ) and model not in litellm.model_cost:
+ # for unmapped replicate model, default to replicate's time tracking logic
+ return get_replicate_completion_pricing(completion_response, total_time) # type: ignore
+
+ if model is None:
+ raise ValueError(
+ f"Model is None and does not exist in passed completion_response. Passed completion_response={completion_response}, model={model}"
+ )
+
+ if (
+ custom_llm_provider is not None
+ and custom_llm_provider == "vertex_ai"
+ ):
+ # Calculate the prompt characters + response characters
+ if len(messages) > 0:
+ prompt_string = litellm.utils.get_formatted_prompt(
+ data={"messages": messages}, call_type="completion"
+ )
+
+ prompt_characters = litellm.utils._count_characters(
+ text=prompt_string
+ )
+ if completion_response is not None and isinstance(
+ completion_response, ModelResponse
+ ):
+ completion_string = litellm.utils.get_response_string(
+ response_obj=completion_response
+ )
+ completion_characters = litellm.utils._count_characters(
+ text=completion_string
+ )
+
+ (
+ prompt_tokens_cost_usd_dollar,
+ completion_tokens_cost_usd_dollar,
+ ) = cost_per_token(
+ model=model,
+ prompt_tokens=prompt_tokens,
+ completion_tokens=completion_tokens,
+ custom_llm_provider=custom_llm_provider,
+ response_time_ms=total_time,
+ region_name=region_name,
+ custom_cost_per_second=custom_cost_per_second,
+ custom_cost_per_token=custom_cost_per_token,
+ prompt_characters=prompt_characters,
+ completion_characters=completion_characters,
+ cache_creation_input_tokens=cache_creation_input_tokens,
+ cache_read_input_tokens=cache_read_input_tokens,
+ usage_object=cost_per_token_usage_object,
+ call_type=cast(CallTypesLiteral, call_type),
+ audio_transcription_file_duration=audio_transcription_file_duration,
+ rerank_billed_units=rerank_billed_units,
+ )
+ _final_cost = (
+ prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
+ )
+ _final_cost += (
+ StandardBuiltInToolCostTracking.get_cost_for_built_in_tools(
+ model=model,
+ response_object=completion_response,
+ standard_built_in_tools_params=standard_built_in_tools_params,
+ custom_llm_provider=custom_llm_provider,
+ )
+ )
+ return _final_cost
except Exception as e:
verbose_logger.debug(
- "litellm.cost_calculator.py::completion_cost() - Error inferring custom_llm_provider - {}".format(
- str(e)
+ "litellm.cost_calculator.py::completion_cost() - Error calculating cost for model={} - {}".format(
+ model, str(e)
)
)
- if (
- call_type == CallTypes.image_generation.value
- or call_type == CallTypes.aimage_generation.value
- or call_type == PassthroughCallTypes.passthrough_image_generation.value
- ):
- ### IMAGE GENERATION COST CALCULATION ###
- if custom_llm_provider == "vertex_ai":
- if isinstance(completion_response, ImageResponse):
- return vertex_ai_image_cost_calculator(
- model=model,
- image_response=completion_response,
- )
- elif custom_llm_provider == "bedrock":
- if isinstance(completion_response, ImageResponse):
- return bedrock_image_cost_calculator(
- model=model,
- size=size,
- image_response=completion_response,
- optional_params=optional_params,
- )
- raise TypeError(
- "completion_response must be of type ImageResponse for bedrock image cost calculation"
- )
- else:
- return default_image_cost_calculator(
- model=model,
- quality=quality,
- custom_llm_provider=custom_llm_provider,
- n=n,
- size=size,
- optional_params=optional_params,
- )
- elif (
- call_type == CallTypes.speech.value or call_type == CallTypes.aspeech.value
- ):
- prompt_characters = litellm.utils._count_characters(text=prompt)
- elif (
- call_type == CallTypes.atranscription.value
- or call_type == CallTypes.transcription.value
- ):
- audio_transcription_file_duration = getattr(
- completion_response, "duration", 0.0
+ if idx == len(potential_model_names) - 1:
+ raise e
+ raise Exception(
+ "Unable to calculat cost for received potential model names - {}".format(
+ potential_model_names
)
- elif (
- call_type == CallTypes.rerank.value or call_type == CallTypes.arerank.value
- ):
- if completion_response is not None and isinstance(
- completion_response, RerankResponse
- ):
- meta_obj = completion_response.meta
- if meta_obj is not None:
- billed_units = meta_obj.get("billed_units", {}) or {}
- else:
- billed_units = {}
-
- rerank_billed_units = RerankBilledUnits(
- search_units=billed_units.get("search_units"),
- total_tokens=billed_units.get("total_tokens"),
- )
-
- search_units = (
- billed_units.get("search_units") or 1
- ) # cohere charges per request by default.
- completion_tokens = search_units
- # Calculate cost based on prompt_tokens, completion_tokens
- if (
- "togethercomputer" in model
- or "together_ai" in model
- or custom_llm_provider == "together_ai"
- ):
- # together ai prices based on size of llm
- # get_model_params_and_category takes a model name and returns the category of LLM size it is in model_prices_and_context_window.json
-
- model = get_model_params_and_category(model, call_type=CallTypes(call_type))
-
- # replicate llms are calculate based on time for request running
- # see https://replicate.com/pricing
- elif (
- model in litellm.replicate_models or "replicate" in model
- ) and model not in litellm.model_cost:
- # for unmapped replicate model, default to replicate's time tracking logic
- return get_replicate_completion_pricing(completion_response, total_time) # type: ignore
-
- if model is None:
- raise ValueError(
- f"Model is None and does not exist in passed completion_response. Passed completion_response={completion_response}, model={model}"
- )
-
- if custom_llm_provider is not None and custom_llm_provider == "vertex_ai":
- # Calculate the prompt characters + response characters
- if len(messages) > 0:
- prompt_string = litellm.utils.get_formatted_prompt(
- data={"messages": messages}, call_type="completion"
- )
-
- prompt_characters = litellm.utils._count_characters(text=prompt_string)
- if completion_response is not None and isinstance(
- completion_response, ModelResponse
- ):
- completion_string = litellm.utils.get_response_string(
- response_obj=completion_response
- )
- completion_characters = litellm.utils._count_characters(
- text=completion_string
- )
-
- (
- prompt_tokens_cost_usd_dollar,
- completion_tokens_cost_usd_dollar,
- ) = cost_per_token(
- model=model,
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
- custom_llm_provider=custom_llm_provider,
- response_time_ms=total_time,
- region_name=region_name,
- custom_cost_per_second=custom_cost_per_second,
- custom_cost_per_token=custom_cost_per_token,
- prompt_characters=prompt_characters,
- completion_characters=completion_characters,
- cache_creation_input_tokens=cache_creation_input_tokens,
- cache_read_input_tokens=cache_read_input_tokens,
- usage_object=cost_per_token_usage_object,
- call_type=call_type,
- audio_transcription_file_duration=audio_transcription_file_duration,
- rerank_billed_units=rerank_billed_units,
)
- _final_cost = prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
-
- return _final_cost
except Exception as e:
raise e
+def get_response_cost_from_hidden_params(
+ hidden_params: Union[dict, BaseModel]
+) -> Optional[float]:
+ if isinstance(hidden_params, BaseModel):
+ _hidden_params_dict = hidden_params.model_dump()
+ else:
+ _hidden_params_dict = hidden_params
+
+ additional_headers = _hidden_params_dict.get("additional_headers", {})
+ if (
+ additional_headers
+ and "llm_provider-x-litellm-response-cost" in additional_headers
+ ):
+ response_cost = additional_headers["llm_provider-x-litellm-response-cost"]
+ if response_cost is None:
+ return None
+ return float(additional_headers["llm_provider-x-litellm-response-cost"])
+ return None
+
+
def response_cost_calculator(
response_object: Union[
ModelResponse,
@@ -818,6 +942,7 @@ def response_cost_calculator(
HttpxBinaryResponseContent,
RerankResponse,
ResponsesAPIResponse,
+ LiteLLMRealtimeStreamLoggingObject,
],
model: str,
custom_llm_provider: Optional[str],
@@ -844,7 +969,10 @@ def response_cost_calculator(
base_model: Optional[str] = None,
custom_pricing: Optional[bool] = None,
prompt: str = "",
-) -> Optional[float]:
+ standard_built_in_tools_params: Optional[StandardBuiltInToolsParams] = None,
+ litellm_model_name: Optional[str] = None,
+ router_model_id: Optional[str] = None,
+) -> float:
"""
Returns
- float or None: cost of response
@@ -856,6 +984,14 @@ def response_cost_calculator(
else:
if isinstance(response_object, BaseModel):
response_object._hidden_params["optional_params"] = optional_params
+
+ if hasattr(response_object, "_hidden_params"):
+ provider_response_cost = get_response_cost_from_hidden_params(
+ response_object._hidden_params
+ )
+ if provider_response_cost is not None:
+ return provider_response_cost
+
response_cost = completion_cost(
completion_response=response_object,
model=model,
@@ -865,6 +1001,9 @@ def response_cost_calculator(
custom_pricing=custom_pricing,
base_model=base_model,
prompt=prompt,
+ standard_built_in_tools_params=standard_built_in_tools_params,
+ litellm_model_name=litellm_model_name,
+ router_model_id=router_model_id,
)
return response_cost
except Exception as e:
@@ -1039,3 +1178,173 @@ def batch_cost_calculator(
) # batch cost is usually half of the regular token cost
return total_prompt_cost, total_completion_cost
+
+
+class RealtimeAPITokenUsageProcessor:
+ @staticmethod
+ def collect_usage_from_realtime_stream_results(
+ results: OpenAIRealtimeStreamList,
+ ) -> List[Usage]:
+ """
+ Collect usage from realtime stream results
+ """
+ response_done_events: List[OpenAIRealtimeStreamResponseBaseObject] = cast(
+ List[OpenAIRealtimeStreamResponseBaseObject],
+ [result for result in results if result["type"] == "response.done"],
+ )
+ usage_objects: List[Usage] = []
+ for result in response_done_events:
+ usage_object = (
+ ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage(
+ result["response"].get("usage", {})
+ )
+ )
+ usage_objects.append(usage_object)
+ return usage_objects
+
+ @staticmethod
+ def combine_usage_objects(usage_objects: List[Usage]) -> Usage:
+ """
+ Combine multiple Usage objects into a single Usage object, checking model keys for nested values.
+ """
+ from litellm.types.utils import (
+ CompletionTokensDetails,
+ PromptTokensDetailsWrapper,
+ Usage,
+ )
+
+ combined = Usage()
+
+ # Sum basic token counts
+ for usage in usage_objects:
+ # Handle direct attributes by checking what exists in the model
+ for attr in dir(usage):
+ if not attr.startswith("_") and not callable(getattr(usage, attr)):
+ current_val = getattr(combined, attr, 0)
+ new_val = getattr(usage, attr, 0)
+ if (
+ new_val is not None
+ and isinstance(new_val, (int, float))
+ and isinstance(current_val, (int, float))
+ ):
+ setattr(combined, attr, current_val + new_val)
+ # Handle nested prompt_tokens_details
+ if hasattr(usage, "prompt_tokens_details") and usage.prompt_tokens_details:
+ if (
+ not hasattr(combined, "prompt_tokens_details")
+ or not combined.prompt_tokens_details
+ ):
+ combined.prompt_tokens_details = PromptTokensDetailsWrapper()
+
+ # Check what keys exist in the model's prompt_tokens_details
+ for attr in dir(usage.prompt_tokens_details):
+ if not attr.startswith("_") and not callable(
+ getattr(usage.prompt_tokens_details, attr)
+ ):
+ current_val = getattr(combined.prompt_tokens_details, attr, 0)
+ new_val = getattr(usage.prompt_tokens_details, attr, 0)
+ if new_val is not None:
+ setattr(
+ combined.prompt_tokens_details,
+ attr,
+ current_val + new_val,
+ )
+
+ # Handle nested completion_tokens_details
+ if (
+ hasattr(usage, "completion_tokens_details")
+ and usage.completion_tokens_details
+ ):
+ if (
+ not hasattr(combined, "completion_tokens_details")
+ or not combined.completion_tokens_details
+ ):
+ combined.completion_tokens_details = CompletionTokensDetails()
+
+ # Check what keys exist in the model's completion_tokens_details
+ for attr in dir(usage.completion_tokens_details):
+ if not attr.startswith("_") and not callable(
+ getattr(usage.completion_tokens_details, attr)
+ ):
+ current_val = getattr(
+ combined.completion_tokens_details, attr, 0
+ )
+ new_val = getattr(usage.completion_tokens_details, attr, 0)
+ if new_val is not None:
+ setattr(
+ combined.completion_tokens_details,
+ attr,
+ current_val + new_val,
+ )
+
+ return combined
+
+ @staticmethod
+ def collect_and_combine_usage_from_realtime_stream_results(
+ results: OpenAIRealtimeStreamList,
+ ) -> Usage:
+ """
+ Collect and combine usage from realtime stream results
+ """
+ collected_usage_objects = (
+ RealtimeAPITokenUsageProcessor.collect_usage_from_realtime_stream_results(
+ results
+ )
+ )
+ combined_usage_object = RealtimeAPITokenUsageProcessor.combine_usage_objects(
+ collected_usage_objects
+ )
+ return combined_usage_object
+
+ @staticmethod
+ def create_logging_realtime_object(
+ usage: Usage, results: OpenAIRealtimeStreamList
+ ) -> LiteLLMRealtimeStreamLoggingObject:
+ return LiteLLMRealtimeStreamLoggingObject(
+ usage=usage,
+ results=results,
+ )
+
+
+def handle_realtime_stream_cost_calculation(
+ results: OpenAIRealtimeStreamList,
+ combined_usage_object: Usage,
+ custom_llm_provider: str,
+ litellm_model_name: str,
+) -> float:
+ """
+ Handles the cost calculation for realtime stream responses.
+
+ Pick the 'response.done' events. Calculate total cost across all 'response.done' events.
+
+ Args:
+ results: A list of OpenAIRealtimeStreamBaseObject objects
+ """
+ received_model = None
+ potential_model_names = []
+ for result in results:
+ if result["type"] == "session.created":
+ received_model = cast(OpenAIRealtimeStreamSessionEvents, result)["session"][
+ "model"
+ ]
+ potential_model_names.append(received_model)
+
+ potential_model_names.append(litellm_model_name)
+ input_cost_per_token = 0.0
+ output_cost_per_token = 0.0
+
+ for model_name in potential_model_names:
+ try:
+ _input_cost_per_token, _output_cost_per_token = generic_cost_per_token(
+ model=model_name,
+ usage=combined_usage_object,
+ custom_llm_provider=custom_llm_provider,
+ )
+ except Exception:
+ continue
+ input_cost_per_token += _input_cost_per_token
+ output_cost_per_token += _output_cost_per_token
+ break # exit if we find a valid model
+ total_cost = input_cost_per_token + output_cost_per_token
+
+ return total_cost
diff --git a/litellm/experimental_mcp_client/Readme.md b/litellm/experimental_mcp_client/Readme.md
new file mode 100644
index 0000000000..4fbd624369
--- /dev/null
+++ b/litellm/experimental_mcp_client/Readme.md
@@ -0,0 +1,6 @@
+# LiteLLM MCP Client
+
+LiteLLM MCP Client is a client that allows you to use MCP tools with LiteLLM.
+
+
+
diff --git a/litellm/experimental_mcp_client/__init__.py b/litellm/experimental_mcp_client/__init__.py
new file mode 100644
index 0000000000..7110d5375e
--- /dev/null
+++ b/litellm/experimental_mcp_client/__init__.py
@@ -0,0 +1,3 @@
+from .tools import call_openai_tool, load_mcp_tools
+
+__all__ = ["load_mcp_tools", "call_openai_tool"]
diff --git a/litellm/experimental_mcp_client/client.py b/litellm/experimental_mcp_client/client.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/litellm/experimental_mcp_client/tools.py b/litellm/experimental_mcp_client/tools.py
new file mode 100644
index 0000000000..cdc26af4b7
--- /dev/null
+++ b/litellm/experimental_mcp_client/tools.py
@@ -0,0 +1,111 @@
+import json
+from typing import Dict, List, Literal, Union
+
+from mcp import ClientSession
+from mcp.types import CallToolRequestParams as MCPCallToolRequestParams
+from mcp.types import CallToolResult as MCPCallToolResult
+from mcp.types import Tool as MCPTool
+from openai.types.chat import ChatCompletionToolParam
+from openai.types.shared_params.function_definition import FunctionDefinition
+
+from litellm.types.utils import ChatCompletionMessageToolCall
+
+
+########################################################
+# List MCP Tool functions
+########################################################
+def transform_mcp_tool_to_openai_tool(mcp_tool: MCPTool) -> ChatCompletionToolParam:
+ """Convert an MCP tool to an OpenAI tool."""
+ return ChatCompletionToolParam(
+ type="function",
+ function=FunctionDefinition(
+ name=mcp_tool.name,
+ description=mcp_tool.description or "",
+ parameters=mcp_tool.inputSchema,
+ strict=False,
+ ),
+ )
+
+
+async def load_mcp_tools(
+ session: ClientSession, format: Literal["mcp", "openai"] = "mcp"
+) -> Union[List[MCPTool], List[ChatCompletionToolParam]]:
+ """
+ Load all available MCP tools
+
+ Args:
+ session: The MCP session to use
+ format: The format to convert the tools to
+ By default, the tools are returned in MCP format.
+
+ If format is set to "openai", the tools are converted to OpenAI API compatible tools.
+ """
+ tools = await session.list_tools()
+ if format == "openai":
+ return [
+ transform_mcp_tool_to_openai_tool(mcp_tool=tool) for tool in tools.tools
+ ]
+ return tools.tools
+
+
+########################################################
+# Call MCP Tool functions
+########################################################
+
+
+async def call_mcp_tool(
+ session: ClientSession,
+ call_tool_request_params: MCPCallToolRequestParams,
+) -> MCPCallToolResult:
+ """Call an MCP tool."""
+ tool_result = await session.call_tool(
+ name=call_tool_request_params.name,
+ arguments=call_tool_request_params.arguments,
+ )
+ return tool_result
+
+
+def _get_function_arguments(function: FunctionDefinition) -> dict:
+ """Helper to safely get and parse function arguments."""
+ arguments = function.get("arguments", {})
+ if isinstance(arguments, str):
+ try:
+ arguments = json.loads(arguments)
+ except json.JSONDecodeError:
+ arguments = {}
+ return arguments if isinstance(arguments, dict) else {}
+
+
+def transform_openai_tool_call_request_to_mcp_tool_call_request(
+ openai_tool: Union[ChatCompletionMessageToolCall, Dict],
+) -> MCPCallToolRequestParams:
+ """Convert an OpenAI ChatCompletionMessageToolCall to an MCP CallToolRequestParams."""
+ function = openai_tool["function"]
+ return MCPCallToolRequestParams(
+ name=function["name"],
+ arguments=_get_function_arguments(function),
+ )
+
+
+async def call_openai_tool(
+ session: ClientSession,
+ openai_tool: ChatCompletionMessageToolCall,
+) -> MCPCallToolResult:
+ """
+ Call an OpenAI tool using MCP client.
+
+ Args:
+ session: The MCP session to use
+ openai_tool: The OpenAI tool to call. You can get this from the `choices[0].message.tool_calls[0]` of the response from the OpenAI API.
+ Returns:
+ The result of the MCP tool call.
+ """
+ mcp_tool_call_request_params = (
+ transform_openai_tool_call_request_to_mcp_tool_call_request(
+ openai_tool=openai_tool,
+ )
+ )
+ return await call_mcp_tool(
+ session=session,
+ call_tool_request_params=mcp_tool_call_request_params,
+ )
diff --git a/litellm/files/main.py b/litellm/files/main.py
index db9a11ced1..ebe79c1079 100644
--- a/litellm/files/main.py
+++ b/litellm/files/main.py
@@ -15,7 +15,9 @@ import httpx
import litellm
from litellm import get_secret_str
+from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.azure.files.handler import AzureOpenAIFilesAPI
+from litellm.llms.custom_httpx.llm_http_handler import BaseLLMHTTPHandler
from litellm.llms.openai.openai import FileDeleted, FileObject, OpenAIFilesAPI
from litellm.llms.vertex_ai.files.handler import VertexAIFilesHandler
from litellm.types.llms.openai import (
@@ -23,9 +25,18 @@ from litellm.types.llms.openai import (
FileContentRequest,
FileTypes,
HttpxBinaryResponseContent,
+ OpenAIFileObject,
)
from litellm.types.router import *
-from litellm.utils import get_litellm_params, supports_httpx_timeout
+from litellm.types.utils import LlmProviders
+from litellm.utils import (
+ ProviderConfigManager,
+ client,
+ get_litellm_params,
+ supports_httpx_timeout,
+)
+
+base_llm_http_handler = BaseLLMHTTPHandler()
####### ENVIRONMENT VARIABLES ###################
openai_files_instance = OpenAIFilesAPI()
@@ -34,6 +45,227 @@ vertex_ai_files_instance = VertexAIFilesHandler()
#################################################
+@client
+async def acreate_file(
+ file: FileTypes,
+ purpose: Literal["assistants", "batch", "fine-tune"],
+ custom_llm_provider: Literal["openai", "azure", "vertex_ai"] = "openai",
+ extra_headers: Optional[Dict[str, str]] = None,
+ extra_body: Optional[Dict[str, str]] = None,
+ **kwargs,
+) -> OpenAIFileObject:
+ """
+ Async: Files are used to upload documents that can be used with features like Assistants, Fine-tuning, and Batch API.
+
+ LiteLLM Equivalent of POST: POST https://api.openai.com/v1/files
+ """
+ try:
+ loop = asyncio.get_event_loop()
+ kwargs["acreate_file"] = True
+
+ call_args = {
+ "file": file,
+ "purpose": purpose,
+ "custom_llm_provider": custom_llm_provider,
+ "extra_headers": extra_headers,
+ "extra_body": extra_body,
+ **kwargs,
+ }
+
+ # Use a partial function to pass your keyword arguments
+ func = partial(create_file, **call_args)
+
+ # Add the context to the function
+ ctx = contextvars.copy_context()
+ func_with_context = partial(ctx.run, func)
+ init_response = await loop.run_in_executor(None, func_with_context)
+ if asyncio.iscoroutine(init_response):
+ response = await init_response
+ else:
+ response = init_response # type: ignore
+
+ return response
+ except Exception as e:
+ raise e
+
+
+@client
+def create_file(
+ file: FileTypes,
+ purpose: Literal["assistants", "batch", "fine-tune"],
+ custom_llm_provider: Optional[Literal["openai", "azure", "vertex_ai"]] = None,
+ extra_headers: Optional[Dict[str, str]] = None,
+ extra_body: Optional[Dict[str, str]] = None,
+ **kwargs,
+) -> Union[OpenAIFileObject, Coroutine[Any, Any, OpenAIFileObject]]:
+ """
+ Files are used to upload documents that can be used with features like Assistants, Fine-tuning, and Batch API.
+
+ LiteLLM Equivalent of POST: POST https://api.openai.com/v1/files
+
+ Specify either provider_list or custom_llm_provider.
+ """
+ try:
+ _is_async = kwargs.pop("acreate_file", False) is True
+ optional_params = GenericLiteLLMParams(**kwargs)
+ litellm_params_dict = get_litellm_params(**kwargs)
+ logging_obj = cast(
+ Optional[LiteLLMLoggingObj], kwargs.get("litellm_logging_obj")
+ )
+ if logging_obj is None:
+ raise ValueError("logging_obj is required")
+ client = kwargs.get("client")
+
+ ### TIMEOUT LOGIC ###
+ timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600
+ # set timeout for 10 minutes by default
+
+ if (
+ timeout is not None
+ and isinstance(timeout, httpx.Timeout)
+ and supports_httpx_timeout(cast(str, custom_llm_provider)) is False
+ ):
+ read_timeout = timeout.read or 600
+ timeout = read_timeout # default 10 min timeout
+ elif timeout is not None and not isinstance(timeout, httpx.Timeout):
+ timeout = float(timeout) # type: ignore
+ elif timeout is None:
+ timeout = 600.0
+
+ _create_file_request = CreateFileRequest(
+ file=file,
+ purpose=purpose,
+ extra_headers=extra_headers,
+ extra_body=extra_body,
+ )
+
+ provider_config = ProviderConfigManager.get_provider_files_config(
+ model="",
+ provider=LlmProviders(custom_llm_provider),
+ )
+ if provider_config is not None:
+ response = base_llm_http_handler.create_file(
+ provider_config=provider_config,
+ litellm_params=litellm_params_dict,
+ create_file_data=_create_file_request,
+ headers=extra_headers or {},
+ api_base=optional_params.api_base,
+ api_key=optional_params.api_key,
+ logging_obj=logging_obj,
+ _is_async=_is_async,
+ client=client
+ if client is not None
+ and isinstance(client, (HTTPHandler, AsyncHTTPHandler))
+ else None,
+ timeout=timeout,
+ )
+ elif custom_llm_provider == "openai":
+ # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
+ api_base = (
+ optional_params.api_base
+ or litellm.api_base
+ or os.getenv("OPENAI_API_BASE")
+ or "https://api.openai.com/v1"
+ )
+ organization = (
+ optional_params.organization
+ or litellm.organization
+ or os.getenv("OPENAI_ORGANIZATION", None)
+ or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105
+ )
+ # set API KEY
+ api_key = (
+ optional_params.api_key
+ or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there
+ or litellm.openai_key
+ or os.getenv("OPENAI_API_KEY")
+ )
+
+ response = openai_files_instance.create_file(
+ _is_async=_is_async,
+ api_base=api_base,
+ api_key=api_key,
+ timeout=timeout,
+ max_retries=optional_params.max_retries,
+ organization=organization,
+ create_file_data=_create_file_request,
+ )
+ elif custom_llm_provider == "azure":
+ api_base = optional_params.api_base or litellm.api_base or get_secret_str("AZURE_API_BASE") # type: ignore
+ api_version = (
+ optional_params.api_version
+ or litellm.api_version
+ or get_secret_str("AZURE_API_VERSION")
+ ) # type: ignore
+
+ api_key = (
+ optional_params.api_key
+ or litellm.api_key
+ or litellm.azure_key
+ or get_secret_str("AZURE_OPENAI_API_KEY")
+ or get_secret_str("AZURE_API_KEY")
+ ) # type: ignore
+
+ extra_body = optional_params.get("extra_body", {})
+ if extra_body is not None:
+ extra_body.pop("azure_ad_token", None)
+ else:
+ get_secret_str("AZURE_AD_TOKEN") # type: ignore
+
+ response = azure_files_instance.create_file(
+ _is_async=_is_async,
+ api_base=api_base,
+ api_key=api_key,
+ api_version=api_version,
+ timeout=timeout,
+ max_retries=optional_params.max_retries,
+ create_file_data=_create_file_request,
+ litellm_params=litellm_params_dict,
+ )
+ elif custom_llm_provider == "vertex_ai":
+ api_base = optional_params.api_base or ""
+ vertex_ai_project = (
+ optional_params.vertex_project
+ or litellm.vertex_project
+ or get_secret_str("VERTEXAI_PROJECT")
+ )
+ vertex_ai_location = (
+ optional_params.vertex_location
+ or litellm.vertex_location
+ or get_secret_str("VERTEXAI_LOCATION")
+ )
+ vertex_credentials = optional_params.vertex_credentials or get_secret_str(
+ "VERTEXAI_CREDENTIALS"
+ )
+
+ response = vertex_ai_files_instance.create_file(
+ _is_async=_is_async,
+ api_base=api_base,
+ vertex_project=vertex_ai_project,
+ vertex_location=vertex_ai_location,
+ vertex_credentials=vertex_credentials,
+ timeout=timeout,
+ max_retries=optional_params.max_retries,
+ create_file_data=_create_file_request,
+ )
+ else:
+ raise litellm.exceptions.BadRequestError(
+ message="LiteLLM doesn't support {} for 'create_file'. Only ['openai', 'azure', 'vertex_ai'] are supported.".format(
+ custom_llm_provider
+ ),
+ model="n/a",
+ llm_provider=custom_llm_provider,
+ response=httpx.Response(
+ status_code=400,
+ content="Unsupported provider",
+ request=httpx.Request(method="create_file", url="https://github.com/BerriAI/litellm"), # type: ignore
+ ),
+ )
+ return response
+ except Exception as e:
+ raise e
+
+
async def afile_retrieve(
file_id: str,
custom_llm_provider: Literal["openai", "azure"] = "openai",
@@ -241,9 +473,11 @@ def file_delete(
"""
try:
optional_params = GenericLiteLLMParams(**kwargs)
+ litellm_params_dict = get_litellm_params(**kwargs)
### TIMEOUT LOGIC ###
timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600
# set timeout for 10 minutes by default
+ client = kwargs.get("client")
if (
timeout is not None
@@ -317,6 +551,8 @@ def file_delete(
timeout=timeout,
max_retries=optional_params.max_retries,
file_id=file_id,
+ client=client,
+ litellm_params=litellm_params_dict,
)
else:
raise litellm.exceptions.BadRequestError(
@@ -488,195 +724,6 @@ def file_list(
raise e
-async def acreate_file(
- file: FileTypes,
- purpose: Literal["assistants", "batch", "fine-tune"],
- custom_llm_provider: Literal["openai", "azure", "vertex_ai"] = "openai",
- extra_headers: Optional[Dict[str, str]] = None,
- extra_body: Optional[Dict[str, str]] = None,
- **kwargs,
-) -> FileObject:
- """
- Async: Files are used to upload documents that can be used with features like Assistants, Fine-tuning, and Batch API.
-
- LiteLLM Equivalent of POST: POST https://api.openai.com/v1/files
- """
- try:
- loop = asyncio.get_event_loop()
- kwargs["acreate_file"] = True
-
- # Use a partial function to pass your keyword arguments
- func = partial(
- create_file,
- file,
- purpose,
- custom_llm_provider,
- extra_headers,
- extra_body,
- **kwargs,
- )
-
- # Add the context to the function
- ctx = contextvars.copy_context()
- func_with_context = partial(ctx.run, func)
- init_response = await loop.run_in_executor(None, func_with_context)
- if asyncio.iscoroutine(init_response):
- response = await init_response
- else:
- response = init_response # type: ignore
-
- return response
- except Exception as e:
- raise e
-
-
-def create_file(
- file: FileTypes,
- purpose: Literal["assistants", "batch", "fine-tune"],
- custom_llm_provider: Literal["openai", "azure", "vertex_ai"] = "openai",
- extra_headers: Optional[Dict[str, str]] = None,
- extra_body: Optional[Dict[str, str]] = None,
- **kwargs,
-) -> Union[FileObject, Coroutine[Any, Any, FileObject]]:
- """
- Files are used to upload documents that can be used with features like Assistants, Fine-tuning, and Batch API.
-
- LiteLLM Equivalent of POST: POST https://api.openai.com/v1/files
- """
- try:
- _is_async = kwargs.pop("acreate_file", False) is True
- optional_params = GenericLiteLLMParams(**kwargs)
- litellm_params_dict = get_litellm_params(**kwargs)
-
- ### TIMEOUT LOGIC ###
- timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600
- # set timeout for 10 minutes by default
-
- if (
- timeout is not None
- and isinstance(timeout, httpx.Timeout)
- and supports_httpx_timeout(custom_llm_provider) is False
- ):
- read_timeout = timeout.read or 600
- timeout = read_timeout # default 10 min timeout
- elif timeout is not None and not isinstance(timeout, httpx.Timeout):
- timeout = float(timeout) # type: ignore
- elif timeout is None:
- timeout = 600.0
-
- _create_file_request = CreateFileRequest(
- file=file,
- purpose=purpose,
- extra_headers=extra_headers,
- extra_body=extra_body,
- )
- if custom_llm_provider == "openai":
- # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
- api_base = (
- optional_params.api_base
- or litellm.api_base
- or os.getenv("OPENAI_API_BASE")
- or "https://api.openai.com/v1"
- )
- organization = (
- optional_params.organization
- or litellm.organization
- or os.getenv("OPENAI_ORGANIZATION", None)
- or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105
- )
- # set API KEY
- api_key = (
- optional_params.api_key
- or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there
- or litellm.openai_key
- or os.getenv("OPENAI_API_KEY")
- )
-
- response = openai_files_instance.create_file(
- _is_async=_is_async,
- api_base=api_base,
- api_key=api_key,
- timeout=timeout,
- max_retries=optional_params.max_retries,
- organization=organization,
- create_file_data=_create_file_request,
- )
- elif custom_llm_provider == "azure":
- api_base = optional_params.api_base or litellm.api_base or get_secret_str("AZURE_API_BASE") # type: ignore
- api_version = (
- optional_params.api_version
- or litellm.api_version
- or get_secret_str("AZURE_API_VERSION")
- ) # type: ignore
-
- api_key = (
- optional_params.api_key
- or litellm.api_key
- or litellm.azure_key
- or get_secret_str("AZURE_OPENAI_API_KEY")
- or get_secret_str("AZURE_API_KEY")
- ) # type: ignore
-
- extra_body = optional_params.get("extra_body", {})
- if extra_body is not None:
- extra_body.pop("azure_ad_token", None)
- else:
- get_secret_str("AZURE_AD_TOKEN") # type: ignore
-
- response = azure_files_instance.create_file(
- _is_async=_is_async,
- api_base=api_base,
- api_key=api_key,
- api_version=api_version,
- timeout=timeout,
- max_retries=optional_params.max_retries,
- create_file_data=_create_file_request,
- litellm_params=litellm_params_dict,
- )
- elif custom_llm_provider == "vertex_ai":
- api_base = optional_params.api_base or ""
- vertex_ai_project = (
- optional_params.vertex_project
- or litellm.vertex_project
- or get_secret_str("VERTEXAI_PROJECT")
- )
- vertex_ai_location = (
- optional_params.vertex_location
- or litellm.vertex_location
- or get_secret_str("VERTEXAI_LOCATION")
- )
- vertex_credentials = optional_params.vertex_credentials or get_secret_str(
- "VERTEXAI_CREDENTIALS"
- )
-
- response = vertex_ai_files_instance.create_file(
- _is_async=_is_async,
- api_base=api_base,
- vertex_project=vertex_ai_project,
- vertex_location=vertex_ai_location,
- vertex_credentials=vertex_credentials,
- timeout=timeout,
- max_retries=optional_params.max_retries,
- create_file_data=_create_file_request,
- )
- else:
- raise litellm.exceptions.BadRequestError(
- message="LiteLLM doesn't support {} for 'create_batch'. Only 'openai' is supported.".format(
- custom_llm_provider
- ),
- model="n/a",
- llm_provider=custom_llm_provider,
- response=httpx.Response(
- status_code=400,
- content="Unsupported provider",
- request=httpx.Request(method="create_thread", url="https://github.com/BerriAI/litellm"), # type: ignore
- ),
- )
- return response
- except Exception as e:
- raise e
-
-
async def afile_content(
file_id: str,
custom_llm_provider: Literal["openai", "azure"] = "openai",
@@ -731,8 +778,10 @@ def file_content(
"""
try:
optional_params = GenericLiteLLMParams(**kwargs)
+ litellm_params_dict = get_litellm_params(**kwargs)
### TIMEOUT LOGIC ###
timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600
+ client = kwargs.get("client")
# set timeout for 10 minutes by default
if (
@@ -754,6 +803,7 @@ def file_content(
)
_is_async = kwargs.pop("afile_content", False) is True
+
if custom_llm_provider == "openai":
# for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
api_base = (
@@ -815,6 +865,8 @@ def file_content(
timeout=timeout,
max_retries=optional_params.max_retries,
file_content_request=_file_content_request,
+ client=client,
+ litellm_params=litellm_params_dict,
)
else:
raise litellm.exceptions.BadRequestError(
diff --git a/litellm/fine_tuning/main.py b/litellm/fine_tuning/main.py
index b726a394c2..09c070fffb 100644
--- a/litellm/fine_tuning/main.py
+++ b/litellm/fine_tuning/main.py
@@ -138,7 +138,6 @@ def create_fine_tuning_job(
# OpenAI
if custom_llm_provider == "openai":
-
# for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
api_base = (
optional_params.api_base
@@ -360,7 +359,6 @@ def cancel_fine_tuning_job(
# OpenAI
if custom_llm_provider == "openai":
-
# for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
api_base = (
optional_params.api_base
@@ -522,7 +520,6 @@ def list_fine_tuning_jobs(
# OpenAI
if custom_llm_provider == "openai":
-
# for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
api_base = (
optional_params.api_base
diff --git a/litellm/integrations/SlackAlerting/batching_handler.py b/litellm/integrations/SlackAlerting/batching_handler.py
index e35cf61d63..fdce2e0479 100644
--- a/litellm/integrations/SlackAlerting/batching_handler.py
+++ b/litellm/integrations/SlackAlerting/batching_handler.py
@@ -19,7 +19,6 @@ else:
def squash_payloads(queue):
-
squashed = {}
if len(queue) == 0:
return squashed
diff --git a/litellm/integrations/SlackAlerting/slack_alerting.py b/litellm/integrations/SlackAlerting/slack_alerting.py
index a2e6264760..9fde042ae7 100644
--- a/litellm/integrations/SlackAlerting/slack_alerting.py
+++ b/litellm/integrations/SlackAlerting/slack_alerting.py
@@ -16,6 +16,7 @@ import litellm.litellm_core_utils.litellm_logging
import litellm.types
from litellm._logging import verbose_logger, verbose_proxy_logger
from litellm.caching.caching import DualCache
+from litellm.constants import HOURS_IN_A_DAY
from litellm.integrations.custom_batch_logger import CustomBatchLogger
from litellm.litellm_core_utils.duration_parser import duration_in_seconds
from litellm.litellm_core_utils.exception_mapping_utils import (
@@ -195,12 +196,15 @@ class SlackAlerting(CustomBatchLogger):
if self.alerting is None or self.alert_types is None:
return
- time_difference_float, model, api_base, messages = (
- self._response_taking_too_long_callback_helper(
- kwargs=kwargs,
- start_time=start_time,
- end_time=end_time,
- )
+ (
+ time_difference_float,
+ model,
+ api_base,
+ messages,
+ ) = self._response_taking_too_long_callback_helper(
+ kwargs=kwargs,
+ start_time=start_time,
+ end_time=end_time,
)
if litellm.turn_off_message_logging or litellm.redact_messages_in_exceptions:
messages = "Message not logged. litellm.redact_messages_in_exceptions=True"
@@ -646,10 +650,10 @@ class SlackAlerting(CustomBatchLogger):
event_message += (
f"Budget Crossed\n Total Budget:`{user_info.max_budget}`"
)
- elif percent_left <= 0.05:
+ elif percent_left <= SLACK_ALERTING_THRESHOLD_5_PERCENT:
event = "threshold_crossed"
event_message += "5% Threshold Crossed "
- elif percent_left <= 0.15:
+ elif percent_left <= SLACK_ALERTING_THRESHOLD_15_PERCENT:
event = "threshold_crossed"
event_message += "15% Threshold Crossed"
elif user_info.soft_budget is not None:
@@ -819,9 +823,9 @@ class SlackAlerting(CustomBatchLogger):
### UNIQUE CACHE KEY ###
cache_key = provider + region_name
- outage_value: Optional[ProviderRegionOutageModel] = (
- await self.internal_usage_cache.async_get_cache(key=cache_key)
- )
+ outage_value: Optional[
+ ProviderRegionOutageModel
+ ] = await self.internal_usage_cache.async_get_cache(key=cache_key)
if (
getattr(exception, "status_code", None) is None
@@ -1402,9 +1406,9 @@ Model Info:
self.alert_to_webhook_url is not None
and alert_type in self.alert_to_webhook_url
):
- slack_webhook_url: Optional[Union[str, List[str]]] = (
- self.alert_to_webhook_url[alert_type]
- )
+ slack_webhook_url: Optional[
+ Union[str, List[str]]
+ ] = self.alert_to_webhook_url[alert_type]
elif self.default_webhook_url is not None:
slack_webhook_url = self.default_webhook_url
else:
@@ -1715,7 +1719,7 @@ Model Info:
await self.internal_usage_cache.async_set_cache(
key=_event_cache_key,
value="SENT",
- ttl=(30 * 24 * 60 * 60), # 1 month
+ ttl=(30 * HOURS_IN_A_DAY * 60 * 60), # 1 month
)
except Exception as e:
@@ -1768,7 +1772,6 @@ Model Info:
- Team Created, Updated, Deleted
"""
try:
-
message = f"`{event_name}`\n"
key_event_dict = key_event.model_dump()
diff --git a/litellm/integrations/_types/open_inference.py b/litellm/integrations/_types/open_inference.py
index b5076c0e42..bcfabe9b7b 100644
--- a/litellm/integrations/_types/open_inference.py
+++ b/litellm/integrations/_types/open_inference.py
@@ -283,4 +283,4 @@ class OpenInferenceSpanKindValues(Enum):
class OpenInferenceMimeTypeValues(Enum):
TEXT = "text/plain"
- JSON = "application/json"
\ No newline at end of file
+ JSON = "application/json"
diff --git a/litellm/integrations/agentops/__init__.py b/litellm/integrations/agentops/__init__.py
new file mode 100644
index 0000000000..6ad02ce0ba
--- /dev/null
+++ b/litellm/integrations/agentops/__init__.py
@@ -0,0 +1,3 @@
+from .agentops import AgentOps
+
+__all__ = ["AgentOps"]
\ No newline at end of file
diff --git a/litellm/integrations/agentops/agentops.py b/litellm/integrations/agentops/agentops.py
new file mode 100644
index 0000000000..11e76841e9
--- /dev/null
+++ b/litellm/integrations/agentops/agentops.py
@@ -0,0 +1,118 @@
+"""
+AgentOps integration for LiteLLM - Provides OpenTelemetry tracing for LLM calls
+"""
+import os
+from dataclasses import dataclass
+from typing import Optional, Dict, Any
+from litellm.integrations.opentelemetry import OpenTelemetry, OpenTelemetryConfig
+from litellm.llms.custom_httpx.http_handler import _get_httpx_client
+
+@dataclass
+class AgentOpsConfig:
+ endpoint: str = "https://otlp.agentops.cloud/v1/traces"
+ api_key: Optional[str] = None
+ service_name: Optional[str] = None
+ deployment_environment: Optional[str] = None
+ auth_endpoint: str = "https://api.agentops.ai/v3/auth/token"
+
+ @classmethod
+ def from_env(cls):
+ return cls(
+ endpoint="https://otlp.agentops.cloud/v1/traces",
+ api_key=os.getenv("AGENTOPS_API_KEY"),
+ service_name=os.getenv("AGENTOPS_SERVICE_NAME", "agentops"),
+ deployment_environment=os.getenv("AGENTOPS_ENVIRONMENT", "production"),
+ auth_endpoint="https://api.agentops.ai/v3/auth/token"
+ )
+
+class AgentOps(OpenTelemetry):
+ """
+ AgentOps integration - built on top of OpenTelemetry
+
+ Example usage:
+ ```python
+ import litellm
+
+ litellm.success_callback = ["agentops"]
+
+ response = litellm.completion(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "Hello, how are you?"}],
+ )
+ ```
+ """
+ def __init__(
+ self,
+ config: Optional[AgentOpsConfig] = None,
+ ):
+ if config is None:
+ config = AgentOpsConfig.from_env()
+
+ # Prefetch JWT token for authentication
+ jwt_token = None
+ project_id = None
+ if config.api_key:
+ try:
+ response = self._fetch_auth_token(config.api_key, config.auth_endpoint)
+ jwt_token = response.get("token")
+ project_id = response.get("project_id")
+ except Exception:
+ pass
+
+ headers = f"Authorization=Bearer {jwt_token}" if jwt_token else None
+
+ otel_config = OpenTelemetryConfig(
+ exporter="otlp_http",
+ endpoint=config.endpoint,
+ headers=headers
+ )
+
+ # Initialize OpenTelemetry with our config
+ super().__init__(
+ config=otel_config,
+ callback_name="agentops"
+ )
+
+ # Set AgentOps-specific resource attributes
+ resource_attrs = {
+ "service.name": config.service_name or "litellm",
+ "deployment.environment": config.deployment_environment or "production",
+ "telemetry.sdk.name": "agentops",
+ }
+
+ if project_id:
+ resource_attrs["project.id"] = project_id
+
+ self.resource_attributes = resource_attrs
+
+ def _fetch_auth_token(self, api_key: str, auth_endpoint: str) -> Dict[str, Any]:
+ """
+ Fetch JWT authentication token from AgentOps API
+
+ Args:
+ api_key: AgentOps API key
+ auth_endpoint: Authentication endpoint
+
+ Returns:
+ Dict containing JWT token and project ID
+ """
+ headers = {
+ "Content-Type": "application/json",
+ "Connection": "keep-alive",
+ }
+
+ client = _get_httpx_client()
+ try:
+ response = client.post(
+ url=auth_endpoint,
+ headers=headers,
+ json={"api_key": api_key},
+ timeout=10
+ )
+
+ if response.status_code != 200:
+ raise Exception(f"Failed to fetch auth token: {response.text}")
+
+ return response.json()
+ finally:
+ client.close()
\ No newline at end of file
diff --git a/litellm/integrations/anthropic_cache_control_hook.py b/litellm/integrations/anthropic_cache_control_hook.py
new file mode 100644
index 0000000000..c138b3cc25
--- /dev/null
+++ b/litellm/integrations/anthropic_cache_control_hook.py
@@ -0,0 +1,150 @@
+"""
+This hook is used to inject cache control directives into the messages of a chat completion.
+
+Users can define
+- `cache_control_injection_points` in the completion params and litellm will inject the cache control directives into the messages at the specified injection points.
+
+"""
+
+import copy
+from typing import Dict, List, Optional, Tuple, Union, cast
+
+from litellm.integrations.custom_logger import CustomLogger
+from litellm.integrations.custom_prompt_management import CustomPromptManagement
+from litellm.types.integrations.anthropic_cache_control_hook import (
+ CacheControlInjectionPoint,
+ CacheControlMessageInjectionPoint,
+)
+from litellm.types.llms.openai import AllMessageValues, ChatCompletionCachedContent
+from litellm.types.utils import StandardCallbackDynamicParams
+
+
+class AnthropicCacheControlHook(CustomPromptManagement):
+ def get_chat_completion_prompt(
+ self,
+ model: str,
+ messages: List[AllMessageValues],
+ non_default_params: dict,
+ prompt_id: Optional[str],
+ prompt_variables: Optional[dict],
+ dynamic_callback_params: StandardCallbackDynamicParams,
+ ) -> Tuple[str, List[AllMessageValues], dict]:
+ """
+ Apply cache control directives based on specified injection points.
+
+ Returns:
+ - model: str - the model to use
+ - messages: List[AllMessageValues] - messages with applied cache controls
+ - non_default_params: dict - params with any global cache controls
+ """
+ # Extract cache control injection points
+ injection_points: List[CacheControlInjectionPoint] = non_default_params.pop(
+ "cache_control_injection_points", []
+ )
+ if not injection_points:
+ return model, messages, non_default_params
+
+ # Create a deep copy of messages to avoid modifying the original list
+ processed_messages = copy.deepcopy(messages)
+
+ # Process message-level cache controls
+ for point in injection_points:
+ if point.get("location") == "message":
+ point = cast(CacheControlMessageInjectionPoint, point)
+ processed_messages = self._process_message_injection(
+ point=point, messages=processed_messages
+ )
+
+ return model, processed_messages, non_default_params
+
+ @staticmethod
+ def _process_message_injection(
+ point: CacheControlMessageInjectionPoint, messages: List[AllMessageValues]
+ ) -> List[AllMessageValues]:
+ """Process message-level cache control injection."""
+ control: ChatCompletionCachedContent = point.get(
+ "control", None
+ ) or ChatCompletionCachedContent(type="ephemeral")
+
+ _targetted_index: Optional[Union[int, str]] = point.get("index", None)
+ targetted_index: Optional[int] = None
+ if isinstance(_targetted_index, str):
+ if _targetted_index.isdigit():
+ targetted_index = int(_targetted_index)
+ else:
+ targetted_index = _targetted_index
+
+ targetted_role = point.get("role", None)
+
+ # Case 1: Target by specific index
+ if targetted_index is not None:
+ if 0 <= targetted_index < len(messages):
+ messages[targetted_index] = (
+ AnthropicCacheControlHook._safe_insert_cache_control_in_message(
+ messages[targetted_index], control
+ )
+ )
+ # Case 2: Target by role
+ elif targetted_role is not None:
+ for msg in messages:
+ if msg.get("role") == targetted_role:
+ msg = (
+ AnthropicCacheControlHook._safe_insert_cache_control_in_message(
+ message=msg, control=control
+ )
+ )
+ return messages
+
+ @staticmethod
+ def _safe_insert_cache_control_in_message(
+ message: AllMessageValues, control: ChatCompletionCachedContent
+ ) -> AllMessageValues:
+ """
+ Safe way to insert cache control in a message
+
+ OpenAI Message content can be either:
+ - string
+ - list of objects
+
+ This method handles inserting cache control in both cases.
+ """
+ message_content = message.get("content", None)
+
+ # 1. if string, insert cache control in the message
+ if isinstance(message_content, str):
+ message["cache_control"] = control # type: ignore
+ # 2. list of objects
+ elif isinstance(message_content, list):
+ for content_item in message_content:
+ if isinstance(content_item, dict):
+ content_item["cache_control"] = control # type: ignore
+ return message
+
+ @property
+ def integration_name(self) -> str:
+ """Return the integration name for this hook."""
+ return "anthropic_cache_control_hook"
+
+ @staticmethod
+ def should_use_anthropic_cache_control_hook(non_default_params: Dict) -> bool:
+ if non_default_params.get("cache_control_injection_points", None):
+ return True
+ return False
+
+ @staticmethod
+ def get_custom_logger_for_anthropic_cache_control_hook(
+ non_default_params: Dict,
+ ) -> Optional[CustomLogger]:
+ from litellm.litellm_core_utils.litellm_logging import (
+ _init_custom_logger_compatible_class,
+ )
+
+ if AnthropicCacheControlHook.should_use_anthropic_cache_control_hook(
+ non_default_params
+ ):
+ return _init_custom_logger_compatible_class(
+ logging_integration="anthropic_cache_control_hook",
+ internal_usage_cache=None,
+ llm_router=None,
+ )
+ return None
diff --git a/litellm/integrations/argilla.py b/litellm/integrations/argilla.py
index 055ad90259..a362ce7e4d 100644
--- a/litellm/integrations/argilla.py
+++ b/litellm/integrations/argilla.py
@@ -98,7 +98,6 @@ class ArgillaLogger(CustomBatchLogger):
argilla_dataset_name: Optional[str],
argilla_base_url: Optional[str],
) -> ArgillaCredentialsObject:
-
_credentials_api_key = argilla_api_key or os.getenv("ARGILLA_API_KEY")
if _credentials_api_key is None:
raise Exception("Invalid Argilla API Key given. _credentials_api_key=None.")
diff --git a/litellm/integrations/arize/_utils.py b/litellm/integrations/arize/_utils.py
index 9921d47aff..5a090968b4 100644
--- a/litellm/integrations/arize/_utils.py
+++ b/litellm/integrations/arize/_utils.py
@@ -1,31 +1,37 @@
-import json
-from typing import TYPE_CHECKING, Any, Optional
+from typing import TYPE_CHECKING, Any, Optional, Union
from litellm._logging import verbose_logger
+from litellm.litellm_core_utils.safe_json_dumps import safe_dumps
from litellm.types.utils import StandardLoggingPayload
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
- Span = _Span
+
+ Span = Union[_Span, Any]
else:
Span = Any
def set_attributes(span: Span, kwargs, response_obj):
- from openinference.semconv.trace import (
+ from litellm.integrations._types.open_inference import (
MessageAttributes,
OpenInferenceSpanKindValues,
SpanAttributes,
)
try:
- litellm_params = kwargs.get("litellm_params", {}) or {}
+ standard_logging_payload: Optional[StandardLoggingPayload] = kwargs.get(
+ "standard_logging_object"
+ )
#############################################
############ LLM CALL METADATA ##############
#############################################
- metadata = litellm_params.get("metadata", {}) or {}
- span.set_attribute(SpanAttributes.METADATA, str(metadata))
+
+ if standard_logging_payload and (
+ metadata := standard_logging_payload["metadata"]
+ ):
+ span.set_attribute(SpanAttributes.METADATA, safe_dumps(metadata))
#############################################
########## LLM Request Attributes ###########
@@ -62,13 +68,12 @@ def set_attributes(span: Span, kwargs, response_obj):
msg.get("content", ""),
)
- standard_logging_payload: Optional[StandardLoggingPayload] = kwargs.get(
- "standard_logging_object"
- )
- if standard_logging_payload and (model_params := standard_logging_payload["model_parameters"]):
+ if standard_logging_payload and (
+ model_params := standard_logging_payload["model_parameters"]
+ ):
# The Generative AI Provider: Azure, OpenAI, etc.
span.set_attribute(
- SpanAttributes.LLM_INVOCATION_PARAMETERS, json.dumps(model_params)
+ SpanAttributes.LLM_INVOCATION_PARAMETERS, safe_dumps(model_params)
)
if model_params.get("user"):
@@ -80,7 +85,7 @@ def set_attributes(span: Span, kwargs, response_obj):
########## LLM Response Attributes ##########
# https://docs.arize.com/arize/large-language-models/tracing/semantic-conventions
#############################################
- if hasattr(response_obj, 'get'):
+ if hasattr(response_obj, "get"):
for choice in response_obj.get("choices", []):
response_message = choice.get("message", {})
span.set_attribute(
diff --git a/litellm/integrations/arize/arize.py b/litellm/integrations/arize/arize.py
index 652957e1ee..03b6966809 100644
--- a/litellm/integrations/arize/arize.py
+++ b/litellm/integrations/arize/arize.py
@@ -3,31 +3,37 @@ arize AI is OTEL compatible
this file has Arize ai specific helper functions
"""
-import os
-from typing import TYPE_CHECKING, Any
+import os
+from datetime import datetime
+from typing import TYPE_CHECKING, Any, Optional, Union
+
from litellm.integrations.arize import _utils
+from litellm.integrations.opentelemetry import OpenTelemetry
from litellm.types.integrations.arize import ArizeConfig
+from litellm.types.services import ServiceLoggerPayload
if TYPE_CHECKING:
- from litellm.types.integrations.arize import Protocol as _Protocol
from opentelemetry.trace import Span as _Span
+ from litellm.types.integrations.arize import Protocol as _Protocol
+
Protocol = _Protocol
- Span = _Span
+ Span = Union[_Span, Any]
else:
Protocol = Any
Span = Any
-
-class ArizeLogger:
+class ArizeLogger(OpenTelemetry):
+ def set_attributes(self, span: Span, kwargs, response_obj: Optional[Any]):
+ ArizeLogger.set_arize_attributes(span, kwargs, response_obj)
+ return
@staticmethod
def set_arize_attributes(span: Span, kwargs, response_obj):
_utils.set_attributes(span, kwargs, response_obj)
return
-
@staticmethod
def get_arize_config() -> ArizeConfig:
@@ -43,11 +49,6 @@ class ArizeLogger:
space_key = os.environ.get("ARIZE_SPACE_KEY")
api_key = os.environ.get("ARIZE_API_KEY")
- if not space_key:
- raise ValueError("ARIZE_SPACE_KEY not found in environment variables")
- if not api_key:
- raise ValueError("ARIZE_API_KEY not found in environment variables")
-
grpc_endpoint = os.environ.get("ARIZE_ENDPOINT")
http_endpoint = os.environ.get("ARIZE_HTTP_ENDPOINT")
@@ -55,13 +56,13 @@ class ArizeLogger:
protocol: Protocol = "otlp_grpc"
if grpc_endpoint:
- protocol="otlp_grpc"
- endpoint=grpc_endpoint
+ protocol = "otlp_grpc"
+ endpoint = grpc_endpoint
elif http_endpoint:
- protocol="otlp_http"
- endpoint=http_endpoint
+ protocol = "otlp_http"
+ endpoint = http_endpoint
else:
- protocol="otlp_grpc"
+ protocol = "otlp_grpc"
endpoint = "https://otlp.arize.com/v1"
return ArizeConfig(
@@ -71,4 +72,33 @@ class ArizeLogger:
endpoint=endpoint,
)
+ async def async_service_success_hook(
+ self,
+ payload: ServiceLoggerPayload,
+ parent_otel_span: Optional[Span] = None,
+ start_time: Optional[Union[datetime, float]] = None,
+ end_time: Optional[Union[datetime, float]] = None,
+ event_metadata: Optional[dict] = None,
+ ):
+ """Arize is used mainly for LLM I/O tracing, sending router+caching metrics adds bloat to arize logs"""
+ pass
+ async def async_service_failure_hook(
+ self,
+ payload: ServiceLoggerPayload,
+ error: Optional[str] = "",
+ parent_otel_span: Optional[Span] = None,
+ start_time: Optional[Union[datetime, float]] = None,
+ end_time: Optional[Union[float, datetime]] = None,
+ event_metadata: Optional[dict] = None,
+ ):
+ """Arize is used mainly for LLM I/O tracing, sending router+caching metrics adds bloat to arize logs"""
+ pass
+
+ def create_litellm_proxy_request_started_span(
+ self,
+ start_time: datetime,
+ headers: dict,
+ ):
+ """Arize is used mainly for LLM I/O tracing, sending Proxy Server Request adds bloat to arize logs"""
+ pass
diff --git a/litellm/integrations/arize/arize_phoenix.py b/litellm/integrations/arize/arize_phoenix.py
index d7b7d5812b..2b4909885a 100644
--- a/litellm/integrations/arize/arize_phoenix.py
+++ b/litellm/integrations/arize/arize_phoenix.py
@@ -1,17 +1,20 @@
import os
-from typing import TYPE_CHECKING, Any
-from litellm.integrations.arize import _utils
+from typing import TYPE_CHECKING, Any, Union
+
from litellm._logging import verbose_logger
+from litellm.integrations.arize import _utils
from litellm.types.integrations.arize_phoenix import ArizePhoenixConfig
if TYPE_CHECKING:
- from .opentelemetry import OpenTelemetryConfig as _OpenTelemetryConfig
- from litellm.types.integrations.arize import Protocol as _Protocol
from opentelemetry.trace import Span as _Span
+ from litellm.types.integrations.arize import Protocol as _Protocol
+
+ from .opentelemetry import OpenTelemetryConfig as _OpenTelemetryConfig
+
Protocol = _Protocol
OpenTelemetryConfig = _OpenTelemetryConfig
- Span = _Span
+ Span = Union[_Span, Any]
else:
Protocol = Any
OpenTelemetryConfig = Any
@@ -20,6 +23,7 @@ else:
ARIZE_HOSTED_PHOENIX_ENDPOINT = "https://app.phoenix.arize.com/v1/traces"
+
class ArizePhoenixLogger:
@staticmethod
def set_arize_phoenix_attributes(span: Span, kwargs, response_obj):
@@ -49,7 +53,7 @@ class ArizePhoenixLogger:
protocol = "otlp_grpc"
else:
endpoint = ARIZE_HOSTED_PHOENIX_ENDPOINT
- protocol = "otlp_http"
+ protocol = "otlp_http"
verbose_logger.debug(
f"No PHOENIX_COLLECTOR_ENDPOINT or PHOENIX_COLLECTOR_HTTP_ENDPOINT found, using default endpoint with http: {ARIZE_HOSTED_PHOENIX_ENDPOINT}"
)
@@ -57,17 +61,16 @@ class ArizePhoenixLogger:
otlp_auth_headers = None
# If the endpoint is the Arize hosted Phoenix endpoint, use the api_key as the auth header as currently it is uses
# a slightly different auth header format than self hosted phoenix
- if endpoint == ARIZE_HOSTED_PHOENIX_ENDPOINT:
+ if endpoint == ARIZE_HOSTED_PHOENIX_ENDPOINT:
if api_key is None:
- raise ValueError("PHOENIX_API_KEY must be set when the Arize hosted Phoenix endpoint is used.")
+ raise ValueError(
+ "PHOENIX_API_KEY must be set when the Arize hosted Phoenix endpoint is used."
+ )
otlp_auth_headers = f"api_key={api_key}"
elif api_key is not None:
# api_key/auth is optional for self hosted phoenix
otlp_auth_headers = f"Authorization=Bearer {api_key}"
return ArizePhoenixConfig(
- otlp_auth_headers=otlp_auth_headers,
- protocol=protocol,
- endpoint=endpoint
+ otlp_auth_headers=otlp_auth_headers, protocol=protocol, endpoint=endpoint
)
-
diff --git a/litellm/integrations/athina.py b/litellm/integrations/athina.py
index 705dc11f1d..49b9e9e687 100644
--- a/litellm/integrations/athina.py
+++ b/litellm/integrations/athina.py
@@ -12,7 +12,10 @@ class AthinaLogger:
"athina-api-key": self.athina_api_key,
"Content-Type": "application/json",
}
- self.athina_logging_url = os.getenv("ATHINA_BASE_URL", "https://log.athina.ai") + "/api/v1/log/inference"
+ self.athina_logging_url = (
+ os.getenv("ATHINA_BASE_URL", "https://log.athina.ai")
+ + "/api/v1/log/inference"
+ )
self.additional_keys = [
"environment",
"prompt_slug",
diff --git a/litellm/integrations/azure_storage/azure_storage.py b/litellm/integrations/azure_storage/azure_storage.py
index ddc46b117f..6ffb1e542f 100644
--- a/litellm/integrations/azure_storage/azure_storage.py
+++ b/litellm/integrations/azure_storage/azure_storage.py
@@ -1,14 +1,15 @@
import asyncio
import json
import os
+import time
import uuid
from datetime import datetime, timedelta
from typing import List, Optional
from litellm._logging import verbose_logger
-from litellm.constants import AZURE_STORAGE_MSFT_VERSION
+from litellm.constants import _DEFAULT_TTL_FOR_HTTPX_CLIENTS, AZURE_STORAGE_MSFT_VERSION
from litellm.integrations.custom_batch_logger import CustomBatchLogger
-from litellm.llms.azure.common_utils import get_azure_ad_token_from_entrata_id
+from litellm.llms.azure.common_utils import get_azure_ad_token_from_entra_id
from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
get_async_httpx_client,
@@ -48,6 +49,9 @@ class AzureBlobStorageLogger(CustomBatchLogger):
"Missing required environment variable: AZURE_STORAGE_FILE_SYSTEM"
)
self.azure_storage_file_system: str = _azure_storage_file_system
+ self._service_client = None
+ # Time that the azure service client expires, in order to reset the connection pool and keep it fresh
+ self._service_client_timeout: Optional[float] = None
# Internal variables used for Token based authentication
self.azure_auth_token: Optional[str] = (
@@ -153,7 +157,6 @@ class AzureBlobStorageLogger(CustomBatchLogger):
3. Flush the data
"""
try:
-
if self.azure_storage_account_key:
await self.upload_to_azure_data_lake_with_azure_account_key(
payload=payload
@@ -292,7 +295,7 @@ class AzureBlobStorageLogger(CustomBatchLogger):
"Missing required environment variable: AZURE_STORAGE_CLIENT_SECRET"
)
- token_provider = get_azure_ad_token_from_entrata_id(
+ token_provider = get_azure_ad_token_from_entra_id(
tenant_id=tenant_id,
client_id=client_id,
client_secret=client_secret,
@@ -325,6 +328,25 @@ class AzureBlobStorageLogger(CustomBatchLogger):
f"AzureBlobStorageLogger is only available for premium users. {CommonProxyErrors.not_premium_user}"
)
+ async def get_service_client(self):
+ from azure.storage.filedatalake.aio import DataLakeServiceClient
+
+ # expire old clients to recover from connection issues
+ if (
+ self._service_client_timeout
+ and self._service_client
+ and self._service_client_timeout > time.time()
+ ):
+ await self._service_client.close()
+ self._service_client = None
+ if not self._service_client:
+ self._service_client = DataLakeServiceClient(
+ account_url=f"https://{self.azure_storage_account_name}.dfs.core.windows.net",
+ credential=self.azure_storage_account_key,
+ )
+ self._service_client_timeout = time.time() + _DEFAULT_TTL_FOR_HTTPX_CLIENTS
+ return self._service_client
+
async def upload_to_azure_data_lake_with_azure_account_key(
self, payload: StandardLoggingPayload
):
@@ -333,13 +355,10 @@ class AzureBlobStorageLogger(CustomBatchLogger):
This is used when Azure Storage Account Key is set - Azure Storage Account Key does not work directly with Azure Rest API
"""
- from azure.storage.filedatalake.aio import DataLakeServiceClient
# Create an async service client
- service_client = DataLakeServiceClient(
- account_url=f"https://{self.azure_storage_account_name}.dfs.core.windows.net",
- credential=self.azure_storage_account_key,
- )
+
+ service_client = await self.get_service_client()
# Get file system client
file_system_client = service_client.get_file_system_client(
file_system=self.azure_storage_file_system
diff --git a/litellm/integrations/braintrust_logging.py b/litellm/integrations/braintrust_logging.py
index 281fbda01e..0961eab02b 100644
--- a/litellm/integrations/braintrust_logging.py
+++ b/litellm/integrations/braintrust_logging.py
@@ -4,7 +4,7 @@
import copy
import os
from datetime import datetime
-from typing import Optional, Dict
+from typing import Dict, Optional
import httpx
from pydantic import BaseModel
@@ -19,7 +19,9 @@ from litellm.llms.custom_httpx.http_handler import (
)
from litellm.utils import print_verbose
-global_braintrust_http_handler = get_async_httpx_client(llm_provider=httpxSpecialProvider.LoggingCallback)
+global_braintrust_http_handler = get_async_httpx_client(
+ llm_provider=httpxSpecialProvider.LoggingCallback
+)
global_braintrust_sync_http_handler = HTTPHandler()
API_BASE = "https://api.braintrustdata.com/v1"
@@ -35,7 +37,9 @@ def get_utc_datetime():
class BraintrustLogger(CustomLogger):
- def __init__(self, api_key: Optional[str] = None, api_base: Optional[str] = None) -> None:
+ def __init__(
+ self, api_key: Optional[str] = None, api_base: Optional[str] = None
+ ) -> None:
super().__init__()
self.validate_environment(api_key=api_key)
self.api_base = api_base or API_BASE
@@ -45,7 +49,9 @@ class BraintrustLogger(CustomLogger):
"Authorization": "Bearer " + self.api_key,
"Content-Type": "application/json",
}
- self._project_id_cache: Dict[str, str] = {} # Cache mapping project names to IDs
+ self._project_id_cache: Dict[
+ str, str
+ ] = {} # Cache mapping project names to IDs
def validate_environment(self, api_key: Optional[str]):
"""
@@ -71,7 +77,9 @@ class BraintrustLogger(CustomLogger):
try:
response = global_braintrust_sync_http_handler.post(
- f"{self.api_base}/project", headers=self.headers, json={"name": project_name}
+ f"{self.api_base}/project",
+ headers=self.headers,
+ json={"name": project_name},
)
project_dict = response.json()
project_id = project_dict["id"]
@@ -89,7 +97,9 @@ class BraintrustLogger(CustomLogger):
try:
response = await global_braintrust_http_handler.post(
- f"{self.api_base}/project/register", headers=self.headers, json={"name": project_name}
+ f"{self.api_base}/project/register",
+ headers=self.headers,
+ json={"name": project_name},
)
project_dict = response.json()
project_id = project_dict["id"]
@@ -116,15 +126,21 @@ class BraintrustLogger(CustomLogger):
if metadata is None:
metadata = {}
- proxy_headers = litellm_params.get("proxy_server_request", {}).get("headers", {}) or {}
+ proxy_headers = (
+ litellm_params.get("proxy_server_request", {}).get("headers", {}) or {}
+ )
for metadata_param_key in proxy_headers:
if metadata_param_key.startswith("braintrust"):
trace_param_key = metadata_param_key.replace("braintrust", "", 1)
if trace_param_key in metadata:
- verbose_logger.warning(f"Overwriting Braintrust `{trace_param_key}` from request header")
+ verbose_logger.warning(
+ f"Overwriting Braintrust `{trace_param_key}` from request header"
+ )
else:
- verbose_logger.debug(f"Found Braintrust `{trace_param_key}` in request header")
+ verbose_logger.debug(
+ f"Found Braintrust `{trace_param_key}` in request header"
+ )
metadata[trace_param_key] = proxy_headers.get(metadata_param_key)
return metadata
@@ -157,24 +173,35 @@ class BraintrustLogger(CustomLogger):
output = None
choices = []
if response_obj is not None and (
- kwargs.get("call_type", None) == "embedding" or isinstance(response_obj, litellm.EmbeddingResponse)
+ kwargs.get("call_type", None) == "embedding"
+ or isinstance(response_obj, litellm.EmbeddingResponse)
):
output = None
- elif response_obj is not None and isinstance(response_obj, litellm.ModelResponse):
+ elif response_obj is not None and isinstance(
+ response_obj, litellm.ModelResponse
+ ):
output = response_obj["choices"][0]["message"].json()
choices = response_obj["choices"]
- elif response_obj is not None and isinstance(response_obj, litellm.TextCompletionResponse):
+ elif response_obj is not None and isinstance(
+ response_obj, litellm.TextCompletionResponse
+ ):
output = response_obj.choices[0].text
choices = response_obj.choices
- elif response_obj is not None and isinstance(response_obj, litellm.ImageResponse):
+ elif response_obj is not None and isinstance(
+ response_obj, litellm.ImageResponse
+ ):
output = response_obj["data"]
litellm_params = kwargs.get("litellm_params", {})
- metadata = litellm_params.get("metadata", {}) or {} # if litellm_params['metadata'] == None
+ metadata = (
+ litellm_params.get("metadata", {}) or {}
+ ) # if litellm_params['metadata'] == None
metadata = self.add_metadata_from_header(litellm_params, metadata)
clean_metadata = {}
try:
- metadata = copy.deepcopy(metadata) # Avoid modifying the original metadata
+ metadata = copy.deepcopy(
+ metadata
+ ) # Avoid modifying the original metadata
except Exception:
new_metadata = {}
for key, value in metadata.items():
@@ -192,7 +219,9 @@ class BraintrustLogger(CustomLogger):
project_id = metadata.get("project_id")
if project_id is None:
project_name = metadata.get("project_name")
- project_id = self.get_project_id_sync(project_name) if project_name else None
+ project_id = (
+ self.get_project_id_sync(project_name) if project_name else None
+ )
if project_id is None:
if self.default_project_id is None:
@@ -234,7 +263,8 @@ class BraintrustLogger(CustomLogger):
"completion_tokens": usage_obj.completion_tokens,
"total_tokens": usage_obj.total_tokens,
"total_cost": cost,
- "time_to_first_token": end_time.timestamp() - start_time.timestamp(),
+ "time_to_first_token": end_time.timestamp()
+ - start_time.timestamp(),
"start": start_time.timestamp(),
"end": end_time.timestamp(),
}
@@ -255,7 +285,9 @@ class BraintrustLogger(CustomLogger):
request_data["metrics"] = metrics
try:
- print_verbose(f"global_braintrust_sync_http_handler.post: {global_braintrust_sync_http_handler.post}")
+ print_verbose(
+ f"global_braintrust_sync_http_handler.post: {global_braintrust_sync_http_handler.post}"
+ )
global_braintrust_sync_http_handler.post(
url=f"{self.api_base}/project_logs/{project_id}/insert",
json={"events": [request_data]},
@@ -276,20 +308,29 @@ class BraintrustLogger(CustomLogger):
output = None
choices = []
if response_obj is not None and (
- kwargs.get("call_type", None) == "embedding" or isinstance(response_obj, litellm.EmbeddingResponse)
+ kwargs.get("call_type", None) == "embedding"
+ or isinstance(response_obj, litellm.EmbeddingResponse)
):
output = None
- elif response_obj is not None and isinstance(response_obj, litellm.ModelResponse):
+ elif response_obj is not None and isinstance(
+ response_obj, litellm.ModelResponse
+ ):
output = response_obj["choices"][0]["message"].json()
choices = response_obj["choices"]
- elif response_obj is not None and isinstance(response_obj, litellm.TextCompletionResponse):
+ elif response_obj is not None and isinstance(
+ response_obj, litellm.TextCompletionResponse
+ ):
output = response_obj.choices[0].text
choices = response_obj.choices
- elif response_obj is not None and isinstance(response_obj, litellm.ImageResponse):
+ elif response_obj is not None and isinstance(
+ response_obj, litellm.ImageResponse
+ ):
output = response_obj["data"]
litellm_params = kwargs.get("litellm_params", {})
- metadata = litellm_params.get("metadata", {}) or {} # if litellm_params['metadata'] == None
+ metadata = (
+ litellm_params.get("metadata", {}) or {}
+ ) # if litellm_params['metadata'] == None
metadata = self.add_metadata_from_header(litellm_params, metadata)
clean_metadata = {}
new_metadata = {}
@@ -313,7 +354,11 @@ class BraintrustLogger(CustomLogger):
project_id = metadata.get("project_id")
if project_id is None:
project_name = metadata.get("project_name")
- project_id = await self.get_project_id_async(project_name) if project_name else None
+ project_id = (
+ await self.get_project_id_async(project_name)
+ if project_name
+ else None
+ )
if project_id is None:
if self.default_project_id is None:
@@ -362,8 +407,14 @@ class BraintrustLogger(CustomLogger):
api_call_start_time = kwargs.get("api_call_start_time")
completion_start_time = kwargs.get("completion_start_time")
- if api_call_start_time is not None and completion_start_time is not None:
- metrics["time_to_first_token"] = completion_start_time.timestamp() - api_call_start_time.timestamp()
+ if (
+ api_call_start_time is not None
+ and completion_start_time is not None
+ ):
+ metrics["time_to_first_token"] = (
+ completion_start_time.timestamp()
+ - api_call_start_time.timestamp()
+ )
request_data = {
"id": litellm_call_id,
diff --git a/litellm/integrations/custom_batch_logger.py b/litellm/integrations/custom_batch_logger.py
index 3cfdf82cab..f9d4496c21 100644
--- a/litellm/integrations/custom_batch_logger.py
+++ b/litellm/integrations/custom_batch_logger.py
@@ -14,7 +14,6 @@ from litellm.integrations.custom_logger import CustomLogger
class CustomBatchLogger(CustomLogger):
-
def __init__(
self,
flush_lock: Optional[asyncio.Lock] = None,
diff --git a/litellm/integrations/custom_guardrail.py b/litellm/integrations/custom_guardrail.py
index 4421664bfc..41a3800116 100644
--- a/litellm/integrations/custom_guardrail.py
+++ b/litellm/integrations/custom_guardrail.py
@@ -7,7 +7,6 @@ from litellm.types.utils import StandardLoggingGuardrailInformation
class CustomGuardrail(CustomLogger):
-
def __init__(
self,
guardrail_name: Optional[str] = None,
diff --git a/litellm/integrations/custom_logger.py b/litellm/integrations/custom_logger.py
index 6f1ec88d01..18cb8e8d7f 100644
--- a/litellm/integrations/custom_logger.py
+++ b/litellm/integrations/custom_logger.py
@@ -20,8 +20,7 @@ from litellm.types.integrations.argilla import ArgillaItem
from litellm.types.llms.openai import AllMessageValues, ChatCompletionRequest
from litellm.types.utils import (
AdapterCompletionStreamWrapper,
- EmbeddingResponse,
- ImageResponse,
+ LLMResponseTypes,
ModelResponse,
ModelResponseStream,
StandardCallbackDynamicParams,
@@ -31,7 +30,7 @@ from litellm.types.utils import (
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
- Span = _Span
+ Span = Union[_Span, Any]
else:
Span = Any
@@ -95,7 +94,7 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
model: str,
messages: List[AllMessageValues],
non_default_params: dict,
- prompt_id: str,
+ prompt_id: Optional[str],
prompt_variables: Optional[dict],
dynamic_callback_params: StandardCallbackDynamicParams,
) -> Tuple[str, List[AllMessageValues], dict]:
@@ -223,7 +222,7 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
self,
data: dict,
user_api_key_dict: UserAPIKeyAuth,
- response: Union[Any, ModelResponse, EmbeddingResponse, ImageResponse],
+ response: LLMResponseTypes,
) -> Any:
pass
diff --git a/litellm/integrations/custom_prompt_management.py b/litellm/integrations/custom_prompt_management.py
new file mode 100644
index 0000000000..9d05e7b242
--- /dev/null
+++ b/litellm/integrations/custom_prompt_management.py
@@ -0,0 +1,49 @@
+from typing import List, Optional, Tuple
+
+from litellm.integrations.custom_logger import CustomLogger
+from litellm.integrations.prompt_management_base import (
+ PromptManagementBase,
+ PromptManagementClient,
+)
+from litellm.types.llms.openai import AllMessageValues
+from litellm.types.utils import StandardCallbackDynamicParams
+
+
+class CustomPromptManagement(CustomLogger, PromptManagementBase):
+ def get_chat_completion_prompt(
+ self,
+ model: str,
+ messages: List[AllMessageValues],
+ non_default_params: dict,
+ prompt_id: Optional[str],
+ prompt_variables: Optional[dict],
+ dynamic_callback_params: StandardCallbackDynamicParams,
+ ) -> Tuple[str, List[AllMessageValues], dict]:
+ """
+ Returns:
+ - model: str - the model to use (can be pulled from prompt management tool)
+ - messages: List[AllMessageValues] - the messages to use (can be pulled from prompt management tool)
+ - non_default_params: dict - update with any optional params (e.g. temperature, max_tokens, etc.) to use (can be pulled from prompt management tool)
+ """
+ return model, messages, non_default_params
+
+ @property
+ def integration_name(self) -> str:
+ return "custom-prompt-management"
+
+ def should_run_prompt_management(
+ self,
+ prompt_id: str,
+ dynamic_callback_params: StandardCallbackDynamicParams,
+ ) -> bool:
+ return True
+
+ def _compile_prompt_helper(
+ self,
+ prompt_id: str,
+ prompt_variables: Optional[dict],
+ dynamic_callback_params: StandardCallbackDynamicParams,
+ ) -> PromptManagementClient:
+ raise NotImplementedError(
+ "Custom prompt management does not support compile prompt helper"
+ )
diff --git a/litellm/integrations/datadog/datadog.py b/litellm/integrations/datadog/datadog.py
index 4f4b05c84e..fb6fee6dc6 100644
--- a/litellm/integrations/datadog/datadog.py
+++ b/litellm/integrations/datadog/datadog.py
@@ -41,7 +41,7 @@ from litellm.types.utils import StandardLoggingPayload
from ..additional_logging_utils import AdditionalLoggingUtils
# max number of logs DD API can accept
-DD_MAX_BATCH_SIZE = 1000
+
# specify what ServiceTypes are logged as success events to DD. (We don't want to spam DD traces with large number of service types)
DD_LOGGED_SUCCESS_SERVICE_TYPES = [
@@ -233,7 +233,6 @@ class DataDogLogger(
pass
async def _log_async_event(self, kwargs, response_obj, start_time, end_time):
-
dd_payload = self.create_datadog_logging_payload(
kwargs=kwargs,
response_obj=response_obj,
diff --git a/litellm/integrations/gcs_bucket/gcs_bucket.py b/litellm/integrations/gcs_bucket/gcs_bucket.py
index 187ab779c0..972a023666 100644
--- a/litellm/integrations/gcs_bucket/gcs_bucket.py
+++ b/litellm/integrations/gcs_bucket/gcs_bucket.py
@@ -20,10 +20,6 @@ else:
VertexBase = Any
-GCS_DEFAULT_BATCH_SIZE = 2048
-GCS_DEFAULT_FLUSH_INTERVAL_SECONDS = 20
-
-
class GCSBucketLogger(GCSBucketBase, AdditionalLoggingUtils):
def __init__(self, bucket_name: Optional[str] = None) -> None:
from litellm.proxy.proxy_server import premium_user
@@ -125,6 +121,7 @@ class GCSBucketLogger(GCSBucketBase, AdditionalLoggingUtils):
gcs_logging_config: GCSLoggingConfig = await self.get_gcs_logging_config(
kwargs
)
+
headers = await self.construct_request_headers(
vertex_instance=gcs_logging_config["vertex_instance"],
service_account_json=gcs_logging_config["path_service_account"],
diff --git a/litellm/integrations/gcs_bucket/gcs_bucket_base.py b/litellm/integrations/gcs_bucket/gcs_bucket_base.py
index 66995d8482..0ce845ecb2 100644
--- a/litellm/integrations/gcs_bucket/gcs_bucket_base.py
+++ b/litellm/integrations/gcs_bucket/gcs_bucket_base.py
@@ -125,9 +125,9 @@ class GCSBucketBase(CustomBatchLogger):
if kwargs is None:
kwargs = {}
- standard_callback_dynamic_params: Optional[StandardCallbackDynamicParams] = (
- kwargs.get("standard_callback_dynamic_params", None)
- )
+ standard_callback_dynamic_params: Optional[
+ StandardCallbackDynamicParams
+ ] = kwargs.get("standard_callback_dynamic_params", None)
bucket_name: str
path_service_account: Optional[str]
diff --git a/litellm/integrations/gcs_pubsub/pub_sub.py b/litellm/integrations/gcs_pubsub/pub_sub.py
index e94c853f3f..db7f9bb4d0 100644
--- a/litellm/integrations/gcs_pubsub/pub_sub.py
+++ b/litellm/integrations/gcs_pubsub/pub_sub.py
@@ -10,13 +10,16 @@ import asyncio
import json
import os
import traceback
-from typing import TYPE_CHECKING, Any, Dict, List, Optional
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
+
+from litellm.types.utils import StandardLoggingPayload
if TYPE_CHECKING:
from litellm.proxy._types import SpendLogsPayload
else:
SpendLogsPayload = Any
+import litellm
from litellm._logging import verbose_logger
from litellm.integrations.custom_batch_logger import CustomBatchLogger
from litellm.llms.custom_httpx.http_handler import (
@@ -61,18 +64,19 @@ class GcsPubSubLogger(CustomBatchLogger):
self.flush_lock = asyncio.Lock()
super().__init__(**kwargs, flush_lock=self.flush_lock)
asyncio.create_task(self.periodic_flush())
- self.log_queue: List[SpendLogsPayload] = []
+ self.log_queue: List[Union[SpendLogsPayload, StandardLoggingPayload]] = []
async def construct_request_headers(self) -> Dict[str, str]:
"""Construct authorization headers using Vertex AI auth"""
from litellm import vertex_chat_completion
- _auth_header, vertex_project = (
- await vertex_chat_completion._ensure_access_token_async(
- credentials=self.path_service_account_json,
- project_id=None,
- custom_llm_provider="vertex_ai",
- )
+ (
+ _auth_header,
+ vertex_project,
+ ) = await vertex_chat_completion._ensure_access_token_async(
+ credentials=self.path_service_account_json,
+ project_id=self.project_id,
+ custom_llm_provider="vertex_ai",
)
auth_header, _ = vertex_chat_completion._get_token_and_url(
@@ -115,13 +119,20 @@ class GcsPubSubLogger(CustomBatchLogger):
verbose_logger.debug(
"PubSub: Logging - Enters logging function for model %s", kwargs
)
- spend_logs_payload = get_logging_payload(
- kwargs=kwargs,
- response_obj=response_obj,
- start_time=start_time,
- end_time=end_time,
- )
- self.log_queue.append(spend_logs_payload)
+ standard_logging_payload = kwargs.get("standard_logging_object", None)
+
+ # Backwards compatibility with old logging payload
+ if litellm.gcs_pub_sub_use_v1 is True:
+ spend_logs_payload = get_logging_payload(
+ kwargs=kwargs,
+ response_obj=response_obj,
+ start_time=start_time,
+ end_time=end_time,
+ )
+ self.log_queue.append(spend_logs_payload)
+ else:
+ # New logging payload, StandardLoggingPayload
+ self.log_queue.append(standard_logging_payload)
if len(self.log_queue) >= self.batch_size:
await self.async_send_batch()
@@ -155,7 +166,7 @@ class GcsPubSubLogger(CustomBatchLogger):
self.log_queue.clear()
async def publish_message(
- self, message: SpendLogsPayload
+ self, message: Union[SpendLogsPayload, StandardLoggingPayload]
) -> Optional[Dict[str, Any]]:
"""
Publish message to Google Cloud Pub/Sub using REST API
diff --git a/litellm/integrations/humanloop.py b/litellm/integrations/humanloop.py
index fd3463f9e3..853fbe148c 100644
--- a/litellm/integrations/humanloop.py
+++ b/litellm/integrations/humanloop.py
@@ -152,7 +152,7 @@ class HumanloopLogger(CustomLogger):
model: str,
messages: List[AllMessageValues],
non_default_params: dict,
- prompt_id: str,
+ prompt_id: Optional[str],
prompt_variables: Optional[dict],
dynamic_callback_params: StandardCallbackDynamicParams,
) -> Tuple[
@@ -164,6 +164,9 @@ class HumanloopLogger(CustomLogger):
"humanloop_api_key"
) or get_secret_str("HUMANLOOP_API_KEY")
+ if prompt_id is None:
+ raise ValueError("prompt_id is required for Humanloop integration")
+
if humanloop_api_key is None:
return super().get_chat_completion_prompt(
model=model,
diff --git a/litellm/integrations/langfuse/langfuse.py b/litellm/integrations/langfuse/langfuse.py
index f990a316c4..d0472ee638 100644
--- a/litellm/integrations/langfuse/langfuse.py
+++ b/litellm/integrations/langfuse/langfuse.py
@@ -471,9 +471,9 @@ class LangFuseLogger:
# we clean out all extra litellm metadata params before logging
clean_metadata: Dict[str, Any] = {}
if prompt_management_metadata is not None:
- clean_metadata["prompt_management_metadata"] = (
- prompt_management_metadata
- )
+ clean_metadata[
+ "prompt_management_metadata"
+ ] = prompt_management_metadata
if isinstance(metadata, dict):
for key, value in metadata.items():
# generate langfuse tags - Default Tags sent to Langfuse from LiteLLM Proxy
diff --git a/litellm/integrations/langfuse/langfuse_handler.py b/litellm/integrations/langfuse/langfuse_handler.py
index aebe1461b0..f9d27f6cf0 100644
--- a/litellm/integrations/langfuse/langfuse_handler.py
+++ b/litellm/integrations/langfuse/langfuse_handler.py
@@ -19,7 +19,6 @@ else:
class LangFuseHandler:
-
@staticmethod
def get_langfuse_logger_for_request(
standard_callback_dynamic_params: StandardCallbackDynamicParams,
@@ -87,7 +86,9 @@ class LangFuseHandler:
if globalLangfuseLogger is not None:
return globalLangfuseLogger
- credentials_dict: Dict[str, Any] = (
+ credentials_dict: Dict[
+ str, Any
+ ] = (
{}
) # the global langfuse logger uses Environment Variables, there are no dynamic credentials
globalLangfuseLogger = in_memory_dynamic_logger_cache.get_cache(
diff --git a/litellm/integrations/langfuse/langfuse_prompt_management.py b/litellm/integrations/langfuse/langfuse_prompt_management.py
index 1f4ca84db3..dcd3d9933a 100644
--- a/litellm/integrations/langfuse/langfuse_prompt_management.py
+++ b/litellm/integrations/langfuse/langfuse_prompt_management.py
@@ -169,7 +169,7 @@ class LangfusePromptManagement(LangFuseLogger, PromptManagementBase, CustomLogge
model: str,
messages: List[AllMessageValues],
non_default_params: dict,
- prompt_id: str,
+ prompt_id: Optional[str],
prompt_variables: Optional[dict],
dynamic_callback_params: StandardCallbackDynamicParams,
) -> Tuple[
diff --git a/litellm/integrations/langsmith.py b/litellm/integrations/langsmith.py
index 1ef90c1822..0914150db9 100644
--- a/litellm/integrations/langsmith.py
+++ b/litellm/integrations/langsmith.py
@@ -75,7 +75,6 @@ class LangsmithLogger(CustomBatchLogger):
langsmith_project: Optional[str] = None,
langsmith_base_url: Optional[str] = None,
) -> LangsmithCredentialsObject:
-
_credentials_api_key = langsmith_api_key or os.getenv("LANGSMITH_API_KEY")
if _credentials_api_key is None:
raise Exception(
@@ -443,9 +442,9 @@ class LangsmithLogger(CustomBatchLogger):
Otherwise, use the default credentials.
"""
- standard_callback_dynamic_params: Optional[StandardCallbackDynamicParams] = (
- kwargs.get("standard_callback_dynamic_params", None)
- )
+ standard_callback_dynamic_params: Optional[
+ StandardCallbackDynamicParams
+ ] = kwargs.get("standard_callback_dynamic_params", None)
if standard_callback_dynamic_params is not None:
credentials = self.get_credentials_from_env(
langsmith_api_key=standard_callback_dynamic_params.get(
@@ -481,7 +480,6 @@ class LangsmithLogger(CustomBatchLogger):
asyncio.run(self.async_send_batch())
def get_run_by_id(self, run_id):
-
langsmith_api_key = self.default_credentials["LANGSMITH_API_KEY"]
langsmith_api_base = self.default_credentials["LANGSMITH_BASE_URL"]
diff --git a/litellm/integrations/langtrace.py b/litellm/integrations/langtrace.py
index 51cd272ff1..ac1069f440 100644
--- a/litellm/integrations/langtrace.py
+++ b/litellm/integrations/langtrace.py
@@ -1,12 +1,12 @@
import json
-from typing import TYPE_CHECKING, Any
+from typing import TYPE_CHECKING, Any, Union
from litellm.proxy._types import SpanAttributes
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
- Span = _Span
+ Span = Union[_Span, Any]
else:
Span = Any
diff --git a/litellm/integrations/lunary.py b/litellm/integrations/lunary.py
index fcd781e44e..b24a24e088 100644
--- a/litellm/integrations/lunary.py
+++ b/litellm/integrations/lunary.py
@@ -20,7 +20,6 @@ def parse_tool_calls(tool_calls):
return None
def clean_tool_call(tool_call):
-
serialized = {
"type": tool_call.type,
"id": tool_call.id,
@@ -36,7 +35,6 @@ def parse_tool_calls(tool_calls):
def parse_messages(input):
-
if input is None:
return None
diff --git a/litellm/integrations/mlflow.py b/litellm/integrations/mlflow.py
index 193d1c4ea2..e7a458accf 100644
--- a/litellm/integrations/mlflow.py
+++ b/litellm/integrations/mlflow.py
@@ -48,14 +48,17 @@ class MlflowLogger(CustomLogger):
def _extract_and_set_chat_attributes(self, span, kwargs, response_obj):
try:
- from mlflow.tracing.utils import set_span_chat_messages, set_span_chat_tools
+ from mlflow.tracing.utils import set_span_chat_messages # type: ignore
+ from mlflow.tracing.utils import set_span_chat_tools # type: ignore
except ImportError:
return
inputs = self._construct_input(kwargs)
input_messages = inputs.get("messages", [])
- output_messages = [c.message.model_dump(exclude_none=True)
- for c in getattr(response_obj, "choices", [])]
+ output_messages = [
+ c.message.model_dump(exclude_none=True)
+ for c in getattr(response_obj, "choices", [])
+ ]
if messages := [*input_messages, *output_messages]:
set_span_chat_messages(span, messages)
if tools := inputs.get("tools"):
diff --git a/litellm/integrations/opentelemetry.py b/litellm/integrations/opentelemetry.py
index 0ec7358037..f4fe40738b 100644
--- a/litellm/integrations/opentelemetry.py
+++ b/litellm/integrations/opentelemetry.py
@@ -1,7 +1,7 @@
import os
from dataclasses import dataclass
from datetime import datetime
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast
import litellm
from litellm._logging import verbose_logger
@@ -10,6 +10,7 @@ from litellm.types.services import ServiceLoggerPayload
from litellm.types.utils import (
ChatCompletionMessageToolCall,
Function,
+ StandardCallbackDynamicParams,
StandardLoggingPayload,
)
@@ -22,10 +23,10 @@ if TYPE_CHECKING:
)
from litellm.proxy.proxy_server import UserAPIKeyAuth as _UserAPIKeyAuth
- Span = _Span
- SpanExporter = _SpanExporter
- UserAPIKeyAuth = _UserAPIKeyAuth
- ManagementEndpointLoggingPayload = _ManagementEndpointLoggingPayload
+ Span = Union[_Span, Any]
+ SpanExporter = Union[_SpanExporter, Any]
+ UserAPIKeyAuth = Union[_UserAPIKeyAuth, Any]
+ ManagementEndpointLoggingPayload = Union[_ManagementEndpointLoggingPayload, Any]
else:
Span = Any
SpanExporter = Any
@@ -45,7 +46,6 @@ LITELLM_REQUEST_SPAN_NAME = "litellm_request"
@dataclass
class OpenTelemetryConfig:
-
exporter: Union[str, SpanExporter] = "console"
endpoint: Optional[str] = None
headers: Optional[str] = None
@@ -153,7 +153,6 @@ class OpenTelemetry(CustomLogger):
end_time: Optional[Union[datetime, float]] = None,
event_metadata: Optional[dict] = None,
):
-
from opentelemetry import trace
from opentelemetry.trace import Status, StatusCode
@@ -214,7 +213,6 @@ class OpenTelemetry(CustomLogger):
end_time: Optional[Union[float, datetime]] = None,
event_metadata: Optional[dict] = None,
):
-
from opentelemetry import trace
from opentelemetry.trace import Status, StatusCode
@@ -311,6 +309,8 @@ class OpenTelemetry(CustomLogger):
)
_parent_context, parent_otel_span = self._get_span_context(kwargs)
+ self._add_dynamic_span_processor_if_needed(kwargs)
+
# Span 1: Requst sent to litellm SDK
span = self.tracer.start_span(
name=self._get_span_name(kwargs),
@@ -341,6 +341,45 @@ class OpenTelemetry(CustomLogger):
if parent_otel_span is not None:
parent_otel_span.end(end_time=self._to_ns(datetime.now()))
+ def _add_dynamic_span_processor_if_needed(self, kwargs):
+ """
+ Helper method to add a span processor with dynamic headers if needed.
+
+ This allows for per-request configuration of telemetry exporters by
+ extracting headers from standard_callback_dynamic_params.
+ """
+ from opentelemetry import trace
+
+ standard_callback_dynamic_params: Optional[
+ StandardCallbackDynamicParams
+ ] = kwargs.get("standard_callback_dynamic_params")
+ if not standard_callback_dynamic_params:
+ return
+
+ # Extract headers from dynamic params
+ dynamic_headers = {}
+
+ # Handle Arize headers
+ if standard_callback_dynamic_params.get("arize_space_key"):
+ dynamic_headers["space_key"] = standard_callback_dynamic_params.get(
+ "arize_space_key"
+ )
+ if standard_callback_dynamic_params.get("arize_api_key"):
+ dynamic_headers["api_key"] = standard_callback_dynamic_params.get(
+ "arize_api_key"
+ )
+
+ # Only create a span processor if we have headers to use
+ if len(dynamic_headers) > 0:
+ from opentelemetry.sdk.trace import TracerProvider
+
+ provider = trace.get_tracer_provider()
+ if isinstance(provider, TracerProvider):
+ span_processor = self._get_span_processor(
+ dynamic_headers=dynamic_headers
+ )
+ provider.add_span_processor(span_processor)
+
def _handle_failure(self, kwargs, response_obj, start_time, end_time):
from opentelemetry.trace import Status, StatusCode
@@ -443,14 +482,12 @@ class OpenTelemetry(CustomLogger):
self, span: Span, kwargs, response_obj: Optional[Any]
):
try:
- if self.callback_name == "arize":
- from litellm.integrations.arize.arize import ArizeLogger
- ArizeLogger.set_arize_attributes(span, kwargs, response_obj)
- return
- elif self.callback_name == "arize_phoenix":
+ if self.callback_name == "arize_phoenix":
from litellm.integrations.arize.arize_phoenix import ArizePhoenixLogger
- ArizePhoenixLogger.set_arize_phoenix_attributes(span, kwargs, response_obj)
+ ArizePhoenixLogger.set_arize_phoenix_attributes(
+ span, kwargs, response_obj
+ )
return
elif self.callback_name == "langtrace":
from litellm.integrations.langtrace import LangtraceAttributes
@@ -682,7 +719,6 @@ class OpenTelemetry(CustomLogger):
span.set_attribute(key, primitive_value)
def set_raw_request_attributes(self, span: Span, kwargs, response_obj):
-
kwargs.get("optional_params", {})
litellm_params = kwargs.get("litellm_params", {}) or {}
custom_llm_provider = litellm_params.get("custom_llm_provider", "Unknown")
@@ -779,7 +815,7 @@ class OpenTelemetry(CustomLogger):
carrier = {"traceparent": traceparent}
return TraceContextTextMapPropagator().extract(carrier=carrier), None
- def _get_span_processor(self):
+ def _get_span_processor(self, dynamic_headers: Optional[dict] = None):
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
OTLPSpanExporter as OTLPSpanExporterGRPC,
)
@@ -799,17 +835,18 @@ class OpenTelemetry(CustomLogger):
self.OTEL_ENDPOINT,
self.OTEL_HEADERS,
)
- _split_otel_headers = {}
- if self.OTEL_HEADERS is not None and isinstance(self.OTEL_HEADERS, str):
- _split_otel_headers = self.OTEL_HEADERS.split("=")
- _split_otel_headers = {_split_otel_headers[0]: _split_otel_headers[1]}
+ _split_otel_headers = OpenTelemetry._get_headers_dictionary(
+ headers=dynamic_headers or self.OTEL_HEADERS
+ )
- if isinstance(self.OTEL_EXPORTER, SpanExporter):
+ if hasattr(
+ self.OTEL_EXPORTER, "export"
+ ): # Check if it has the export method that SpanExporter requires
verbose_logger.debug(
"OpenTelemetry: intiializing SpanExporter. Value of OTEL_EXPORTER: %s",
self.OTEL_EXPORTER,
)
- return SimpleSpanProcessor(self.OTEL_EXPORTER)
+ return SimpleSpanProcessor(cast(SpanExporter, self.OTEL_EXPORTER))
if self.OTEL_EXPORTER == "console":
verbose_logger.debug(
@@ -844,12 +881,30 @@ class OpenTelemetry(CustomLogger):
)
return BatchSpanProcessor(ConsoleSpanExporter())
+ @staticmethod
+ def _get_headers_dictionary(headers: Optional[Union[str, dict]]) -> Dict[str, str]:
+ """
+ Convert a string or dictionary of headers into a dictionary of headers.
+ """
+ _split_otel_headers: Dict[str, str] = {}
+ if headers:
+ if isinstance(headers, str):
+ # when passed HEADERS="x-honeycomb-team=B85YgLm96******"
+ # Split only on first '=' occurrence
+ parts = headers.split("=", 1)
+ if len(parts) == 2:
+ _split_otel_headers = {parts[0]: parts[1]}
+ else:
+ _split_otel_headers = {}
+ elif isinstance(headers, dict):
+ _split_otel_headers = headers
+ return _split_otel_headers
+
async def async_management_endpoint_success_hook(
self,
logging_payload: ManagementEndpointLoggingPayload,
parent_otel_span: Optional[Span] = None,
):
-
from opentelemetry import trace
from opentelemetry.trace import Status, StatusCode
@@ -903,7 +958,6 @@ class OpenTelemetry(CustomLogger):
logging_payload: ManagementEndpointLoggingPayload,
parent_otel_span: Optional[Span] = None,
):
-
from opentelemetry import trace
from opentelemetry.trace import Status, StatusCode
@@ -948,3 +1002,18 @@ class OpenTelemetry(CustomLogger):
)
management_endpoint_span.set_status(Status(StatusCode.ERROR))
management_endpoint_span.end(end_time=_end_time_ns)
+
+ def create_litellm_proxy_request_started_span(
+ self,
+ start_time: datetime,
+ headers: dict,
+ ) -> Optional[Span]:
+ """
+ Create a span for the received proxy server request.
+ """
+ return self.tracer.start_span(
+ name="Received Proxy Server Request",
+ start_time=self._to_ns(start_time),
+ context=self.get_traceparent_from_header(headers=headers),
+ kind=self.span_kind.SERVER,
+ )
diff --git a/litellm/integrations/opik/opik.py b/litellm/integrations/opik/opik.py
index 1f7f18f336..8cbfb9e653 100644
--- a/litellm/integrations/opik/opik.py
+++ b/litellm/integrations/opik/opik.py
@@ -185,7 +185,6 @@ class OpikLogger(CustomBatchLogger):
def _create_opik_payload( # noqa: PLR0915
self, kwargs, response_obj, start_time, end_time
) -> List[Dict]:
-
# Get metadata
_litellm_params = kwargs.get("litellm_params", {}) or {}
litellm_params_metadata = _litellm_params.get("metadata", {}) or {}
diff --git a/litellm/integrations/prometheus.py b/litellm/integrations/prometheus.py
index d6e47b87ce..f61321e53d 100644
--- a/litellm/integrations/prometheus.py
+++ b/litellm/integrations/prometheus.py
@@ -1,10 +1,19 @@
# used for /metrics endpoint on LiteLLM Proxy
#### What this does ####
# On success, log events to Prometheus
-import asyncio
import sys
from datetime import datetime, timedelta
-from typing import Any, Awaitable, Callable, List, Literal, Optional, Tuple, cast
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ Callable,
+ List,
+ Literal,
+ Optional,
+ Tuple,
+ cast,
+)
import litellm
from litellm._logging import print_verbose, verbose_logger
@@ -14,6 +23,11 @@ from litellm.types.integrations.prometheus import *
from litellm.types.utils import StandardLoggingPayload
from litellm.utils import get_end_user_id_for_cost_tracking
+if TYPE_CHECKING:
+ from apscheduler.schedulers.asyncio import AsyncIOScheduler
+else:
+ AsyncIOScheduler = Any
+
class PrometheusLogger(CustomLogger):
# Class variables or attributes
@@ -359,8 +373,6 @@ class PrometheusLogger(CustomLogger):
label_name="litellm_requests_metric"
),
)
- self._initialize_prometheus_startup_metrics()
-
except Exception as e:
print_verbose(f"Got exception on init prometheus client {str(e)}")
raise e
@@ -818,7 +830,7 @@ class PrometheusLogger(CustomLogger):
requested_model=request_data.get("model", ""),
status_code=str(getattr(original_exception, "status_code", None)),
exception_status=str(getattr(original_exception, "status_code", None)),
- exception_class=str(original_exception.__class__.__name__),
+ exception_class=self._get_exception_class_name(original_exception),
tags=_tags,
)
_labels = prometheus_label_factory(
@@ -917,7 +929,7 @@ class PrometheusLogger(CustomLogger):
api_base=api_base,
api_provider=llm_provider,
exception_status=str(getattr(exception, "status_code", None)),
- exception_class=exception.__class__.__name__,
+ exception_class=self._get_exception_class_name(exception),
requested_model=model_group,
hashed_api_key=standard_logging_payload["metadata"][
"user_api_key_hash"
@@ -1146,6 +1158,22 @@ class PrometheusLogger(CustomLogger):
)
return
+ @staticmethod
+ def _get_exception_class_name(exception: Exception) -> str:
+ exception_class_name = ""
+ if hasattr(exception, "llm_provider"):
+ exception_class_name = getattr(exception, "llm_provider") or ""
+
+ # pretty print the provider name on prometheus
+ # eg. `openai` -> `Openai.`
+ if len(exception_class_name) >= 1:
+ exception_class_name = (
+ exception_class_name[0].upper() + exception_class_name[1:] + "."
+ )
+
+ exception_class_name += exception.__class__.__name__
+ return exception_class_name
+
async def log_success_fallback_event(
self, original_model_group: str, kwargs: dict, original_exception: Exception
):
@@ -1181,7 +1209,7 @@ class PrometheusLogger(CustomLogger):
team=standard_metadata["user_api_key_team_id"],
team_alias=standard_metadata["user_api_key_team_alias"],
exception_status=str(getattr(original_exception, "status_code", None)),
- exception_class=str(original_exception.__class__.__name__),
+ exception_class=self._get_exception_class_name(original_exception),
tags=_tags,
)
_labels = prometheus_label_factory(
@@ -1225,7 +1253,7 @@ class PrometheusLogger(CustomLogger):
team=standard_metadata["user_api_key_team_id"],
team_alias=standard_metadata["user_api_key_team_alias"],
exception_status=str(getattr(original_exception, "status_code", None)),
- exception_class=str(original_exception.__class__.__name__),
+ exception_class=self._get_exception_class_name(original_exception),
tags=_tags,
)
@@ -1321,24 +1349,6 @@ class PrometheusLogger(CustomLogger):
return max_budget - spend
- def _initialize_prometheus_startup_metrics(self):
- """
- Initialize prometheus startup metrics
-
- Helper to create tasks for initializing metrics that are required on startup - eg. remaining budget metrics
- """
- if litellm.prometheus_initialize_budget_metrics is not True:
- verbose_logger.debug("Prometheus: skipping budget metrics initialization")
- return
-
- try:
- if asyncio.get_running_loop():
- asyncio.create_task(self._initialize_remaining_budget_metrics())
- except RuntimeError as e: # no running event loop
- verbose_logger.exception(
- f"No running event loop - skipping budget metrics initialization: {str(e)}"
- )
-
async def _initialize_budget_metrics(
self,
data_fetch_function: Callable[..., Awaitable[Tuple[List[Any], Optional[int]]]],
@@ -1459,12 +1469,41 @@ class PrometheusLogger(CustomLogger):
data_type="keys",
)
- async def _initialize_remaining_budget_metrics(self):
+ async def initialize_remaining_budget_metrics(self):
"""
- Initialize remaining budget metrics for all teams to avoid metric discrepancies.
+ Handler for initializing remaining budget metrics for all teams to avoid metric discrepancies.
Runs when prometheus logger starts up.
+
+ - If redis cache is available, we use the pod lock manager to acquire a lock and initialize the metrics.
+ - Ensures only one pod emits the metrics at a time.
+ - If redis cache is not available, we initialize the metrics directly.
"""
+ from litellm.constants import PROMETHEUS_EMIT_BUDGET_METRICS_JOB_NAME
+ from litellm.proxy.proxy_server import proxy_logging_obj
+
+ pod_lock_manager = proxy_logging_obj.db_spend_update_writer.pod_lock_manager
+
+ # if using redis, ensure only one pod emits the metrics at a time
+ if pod_lock_manager and pod_lock_manager.redis_cache:
+ if await pod_lock_manager.acquire_lock(
+ cronjob_id=PROMETHEUS_EMIT_BUDGET_METRICS_JOB_NAME
+ ):
+ try:
+ await self._initialize_remaining_budget_metrics()
+ finally:
+ await pod_lock_manager.release_lock(
+ cronjob_id=PROMETHEUS_EMIT_BUDGET_METRICS_JOB_NAME
+ )
+ else:
+ # if not using redis, initialize the metrics directly
+ await self._initialize_remaining_budget_metrics()
+
+ async def _initialize_remaining_budget_metrics(self):
+ """
+ Helper to initialize remaining budget metrics for all teams and API keys.
+ """
+ verbose_logger.debug("Emitting key, team budget metrics....")
await self._initialize_team_budget_metrics()
await self._initialize_api_key_budget_metrics()
@@ -1721,6 +1760,66 @@ class PrometheusLogger(CustomLogger):
return (end_time - start_time).total_seconds()
return None
+ @staticmethod
+ def initialize_budget_metrics_cron_job(scheduler: AsyncIOScheduler):
+ """
+ Initialize budget metrics as a cron job. This job runs every `PROMETHEUS_BUDGET_METRICS_REFRESH_INTERVAL_MINUTES` minutes.
+
+ It emits the current remaining budget metrics for all Keys and Teams.
+ """
+ from litellm.constants import PROMETHEUS_BUDGET_METRICS_REFRESH_INTERVAL_MINUTES
+ from litellm.integrations.custom_logger import CustomLogger
+ from litellm.integrations.prometheus import PrometheusLogger
+
+ prometheus_loggers: List[CustomLogger] = (
+ litellm.logging_callback_manager.get_custom_loggers_for_type(
+ callback_type=PrometheusLogger
+ )
+ )
+ # we need to get the initialized prometheus logger instance(s) and call logger.initialize_remaining_budget_metrics() on them
+ verbose_logger.debug("found %s prometheus loggers", len(prometheus_loggers))
+ if len(prometheus_loggers) > 0:
+ prometheus_logger = cast(PrometheusLogger, prometheus_loggers[0])
+ verbose_logger.debug(
+ "Initializing remaining budget metrics as a cron job executing every %s minutes"
+ % PROMETHEUS_BUDGET_METRICS_REFRESH_INTERVAL_MINUTES
+ )
+ scheduler.add_job(
+ prometheus_logger.initialize_remaining_budget_metrics,
+ "interval",
+ minutes=PROMETHEUS_BUDGET_METRICS_REFRESH_INTERVAL_MINUTES,
+ )
+
+ @staticmethod
+ def _mount_metrics_endpoint(premium_user: bool):
+ """
+ Mount the Prometheus metrics endpoint with optional authentication.
+
+ Args:
+ premium_user (bool): Whether the user is a premium user
+ require_auth (bool, optional): Whether to require authentication for the metrics endpoint.
+ Defaults to False.
+ """
+ from prometheus_client import make_asgi_app
+
+ from litellm._logging import verbose_proxy_logger
+ from litellm.proxy._types import CommonProxyErrors
+ from litellm.proxy.proxy_server import app
+
+ if premium_user is not True:
+ verbose_proxy_logger.warning(
+ f"Prometheus metrics are only available for premium users. {CommonProxyErrors.not_premium_user.value}"
+ )
+
+ # Create metrics ASGI app
+ metrics_app = make_asgi_app()
+
+ # Mount the metrics app to the app
+ app.mount("/metrics", metrics_app)
+ verbose_proxy_logger.debug(
+ "Starting Prometheus Metrics on /metrics (no authentication)"
+ )
+
def prometheus_label_factory(
supported_enum_labels: List[str],
diff --git a/litellm/integrations/prometheus_services.py b/litellm/integrations/prometheus_services.py
index 4bf293fb01..a5f2f0b5c7 100644
--- a/litellm/integrations/prometheus_services.py
+++ b/litellm/integrations/prometheus_services.py
@@ -3,11 +3,16 @@
# On success + failure, log events to Prometheus for litellm / adjacent services (litellm, redis, postgres, llm api providers)
-from typing import List, Optional, Union
+from typing import Dict, List, Optional, Union
from litellm._logging import print_verbose, verbose_logger
from litellm.types.integrations.prometheus import LATENCY_BUCKETS
-from litellm.types.services import ServiceLoggerPayload, ServiceTypes
+from litellm.types.services import (
+ DEFAULT_SERVICE_CONFIGS,
+ ServiceLoggerPayload,
+ ServiceMetrics,
+ ServiceTypes,
+)
FAILED_REQUESTS_LABELS = ["error_class", "function_name"]
@@ -23,7 +28,8 @@ class PrometheusServicesLogger:
):
try:
try:
- from prometheus_client import REGISTRY, Counter, Histogram
+ from prometheus_client import REGISTRY, Counter, Gauge, Histogram
+ from prometheus_client.gc_collector import Collector
except ImportError:
raise Exception(
"Missing prometheus_client. Run `pip install prometheus-client`"
@@ -31,36 +37,51 @@ class PrometheusServicesLogger:
self.Histogram = Histogram
self.Counter = Counter
+ self.Gauge = Gauge
self.REGISTRY = REGISTRY
verbose_logger.debug("in init prometheus services metrics")
- self.services = [item.value for item in ServiceTypes]
+ self.payload_to_prometheus_map: Dict[
+ str, List[Union[Histogram, Counter, Gauge, Collector]]
+ ] = {}
- self.payload_to_prometheus_map = (
- {}
- ) # store the prometheus histogram/counter we need to call for each field in payload
+ for service in ServiceTypes:
+ service_metrics: List[Union[Histogram, Counter, Gauge, Collector]] = []
- for service in self.services:
- histogram = self.create_histogram(service, type_of_request="latency")
- counter_failed_request = self.create_counter(
- service,
- type_of_request="failed_requests",
- additional_labels=FAILED_REQUESTS_LABELS,
- )
- counter_total_requests = self.create_counter(
- service, type_of_request="total_requests"
- )
- self.payload_to_prometheus_map[service] = [
- histogram,
- counter_failed_request,
- counter_total_requests,
- ]
+ metrics_to_initialize = self._get_service_metrics_initialize(service)
- self.prometheus_to_amount_map: dict = (
- {}
- ) # the field / value in ServiceLoggerPayload the object needs to be incremented by
+ # Initialize only the configured metrics for each service
+ if ServiceMetrics.HISTOGRAM in metrics_to_initialize:
+ histogram = self.create_histogram(
+ service.value, type_of_request="latency"
+ )
+ if histogram:
+ service_metrics.append(histogram)
+ if ServiceMetrics.COUNTER in metrics_to_initialize:
+ counter_failed_request = self.create_counter(
+ service.value,
+ type_of_request="failed_requests",
+ additional_labels=FAILED_REQUESTS_LABELS,
+ )
+ if counter_failed_request:
+ service_metrics.append(counter_failed_request)
+ counter_total_requests = self.create_counter(
+ service.value, type_of_request="total_requests"
+ )
+ if counter_total_requests:
+ service_metrics.append(counter_total_requests)
+
+ if ServiceMetrics.GAUGE in metrics_to_initialize:
+ gauge = self.create_gauge(service.value, type_of_request="size")
+ if gauge:
+ service_metrics.append(gauge)
+
+ if service_metrics:
+ self.payload_to_prometheus_map[service.value] = service_metrics
+
+ self.prometheus_to_amount_map: dict = {}
### MOCK TESTING ###
self.mock_testing = mock_testing
self.mock_testing_success_calls = 0
@@ -70,6 +91,19 @@ class PrometheusServicesLogger:
print_verbose(f"Got exception on init prometheus client {str(e)}")
raise e
+ def _get_service_metrics_initialize(
+ self, service: ServiceTypes
+ ) -> List[ServiceMetrics]:
+ DEFAULT_METRICS = [ServiceMetrics.COUNTER, ServiceMetrics.HISTOGRAM]
+ if service not in DEFAULT_SERVICE_CONFIGS:
+ return DEFAULT_METRICS
+
+ metrics = DEFAULT_SERVICE_CONFIGS.get(service, {}).get("metrics", [])
+ if not metrics:
+ verbose_logger.debug(f"No metrics found for service {service}")
+ return DEFAULT_METRICS
+ return metrics
+
def is_metric_registered(self, metric_name) -> bool:
for metric in self.REGISTRY.collect():
if metric_name == metric.name:
@@ -94,6 +128,15 @@ class PrometheusServicesLogger:
buckets=LATENCY_BUCKETS,
)
+ def create_gauge(self, service: str, type_of_request: str):
+ metric_name = "litellm_{}_{}".format(service, type_of_request)
+ is_registered = self.is_metric_registered(metric_name)
+ if is_registered:
+ return self._get_metric(metric_name)
+ return self.Gauge(
+ metric_name, "Gauge for {} service".format(service), labelnames=[service]
+ )
+
def create_counter(
self,
service: str,
@@ -120,6 +163,15 @@ class PrometheusServicesLogger:
histogram.labels(labels).observe(amount)
+ def update_gauge(
+ self,
+ gauge,
+ labels: str,
+ amount: float,
+ ):
+ assert isinstance(gauge, self.Gauge)
+ gauge.labels(labels).set(amount)
+
def increment_counter(
self,
counter,
@@ -190,6 +242,13 @@ class PrometheusServicesLogger:
labels=payload.service.value,
amount=1, # LOG TOTAL REQUESTS TO PROMETHEUS
)
+ elif isinstance(obj, self.Gauge):
+ if payload.event_metadata:
+ self.update_gauge(
+ gauge=obj,
+ labels=payload.event_metadata.get("gauge_labels") or "",
+ amount=payload.event_metadata.get("gauge_value") or 0,
+ )
async def async_service_failure_hook(
self,
diff --git a/litellm/integrations/prompt_management_base.py b/litellm/integrations/prompt_management_base.py
index 3fe3b31ed8..270c34be8a 100644
--- a/litellm/integrations/prompt_management_base.py
+++ b/litellm/integrations/prompt_management_base.py
@@ -14,7 +14,6 @@ class PromptManagementClient(TypedDict):
class PromptManagementBase(ABC):
-
@property
@abstractmethod
def integration_name(self) -> str:
@@ -80,14 +79,12 @@ class PromptManagementBase(ABC):
model: str,
messages: List[AllMessageValues],
non_default_params: dict,
- prompt_id: str,
+ prompt_id: Optional[str],
prompt_variables: Optional[dict],
dynamic_callback_params: StandardCallbackDynamicParams,
- ) -> Tuple[
- str,
- List[AllMessageValues],
- dict,
- ]:
+ ) -> Tuple[str, List[AllMessageValues], dict]:
+ if prompt_id is None:
+ raise ValueError("prompt_id is required for Prompt Management Base class")
if not self.should_run_prompt_management(
prompt_id=prompt_id, dynamic_callback_params=dynamic_callback_params
):
diff --git a/litellm/integrations/s3.py b/litellm/integrations/s3.py
index 4a0c27354f..01b9248e03 100644
--- a/litellm/integrations/s3.py
+++ b/litellm/integrations/s3.py
@@ -38,7 +38,7 @@ class S3Logger:
if litellm.s3_callback_params is not None:
# read in .env variables - example os.environ/AWS_BUCKET_NAME
for key, value in litellm.s3_callback_params.items():
- if type(value) is str and value.startswith("os.environ/"):
+ if isinstance(value, str) and value.startswith("os.environ/"):
litellm.s3_callback_params[key] = litellm.get_secret(value)
# now set s3 params from litellm.s3_logger_params
s3_bucket_name = litellm.s3_callback_params.get("s3_bucket_name")
diff --git a/litellm/integrations/weights_biases.py b/litellm/integrations/weights_biases.py
index 5fcbab04b3..63d87c9bd9 100644
--- a/litellm/integrations/weights_biases.py
+++ b/litellm/integrations/weights_biases.py
@@ -21,11 +21,11 @@ try:
# contains a (known) object attribute
object: Literal["chat.completion", "edit", "text_completion"]
- def __getitem__(self, key: K) -> V: ... # noqa
+ def __getitem__(self, key: K) -> V:
+ ... # noqa
- def get( # noqa
- self, key: K, default: Optional[V] = None
- ) -> Optional[V]: ... # pragma: no cover
+ def get(self, key: K, default: Optional[V] = None) -> Optional[V]: # noqa
+ ... # pragma: no cover
class OpenAIRequestResponseResolver:
def __call__(
diff --git a/litellm/litellm_core_utils/core_helpers.py b/litellm/litellm_core_utils/core_helpers.py
index 2036b93692..275c53ad30 100644
--- a/litellm/litellm_core_utils/core_helpers.py
+++ b/litellm/litellm_core_utils/core_helpers.py
@@ -10,7 +10,7 @@ from litellm.types.llms.openai import AllMessageValues
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
- Span = _Span
+ Span = Union[_Span, Any]
else:
Span = Any
diff --git a/litellm/litellm_core_utils/credential_accessor.py b/litellm/litellm_core_utils/credential_accessor.py
index d87dcc116b..45e1ea2c49 100644
--- a/litellm/litellm_core_utils/credential_accessor.py
+++ b/litellm/litellm_core_utils/credential_accessor.py
@@ -10,6 +10,7 @@ class CredentialAccessor:
@staticmethod
def get_credential_values(credential_name: str) -> dict:
"""Safe accessor for credentials."""
+
if not litellm.credential_list:
return {}
for credential in litellm.credential_list:
diff --git a/litellm/litellm_core_utils/default_encoding.py b/litellm/litellm_core_utils/default_encoding.py
index 05bf78a6a9..93b3132912 100644
--- a/litellm/litellm_core_utils/default_encoding.py
+++ b/litellm/litellm_core_utils/default_encoding.py
@@ -11,7 +11,9 @@ except (ImportError, AttributeError):
# Old way to access resources, which setuptools deprecated some time ago
import pkg_resources # type: ignore
- filename = pkg_resources.resource_filename(__name__, "litellm_core_utils/tokenizers")
+ filename = pkg_resources.resource_filename(
+ __name__, "litellm_core_utils/tokenizers"
+ )
os.environ["TIKTOKEN_CACHE_DIR"] = os.getenv(
"CUSTOM_TIKTOKEN_CACHE_DIR", filename
diff --git a/litellm/litellm_core_utils/get_litellm_params.py b/litellm/litellm_core_utils/get_litellm_params.py
index 4f2f43f0de..f40f1ae4c7 100644
--- a/litellm/litellm_core_utils/get_litellm_params.py
+++ b/litellm/litellm_core_utils/get_litellm_params.py
@@ -110,5 +110,8 @@ def get_litellm_params(
"azure_password": kwargs.get("azure_password"),
"max_retries": max_retries,
"timeout": kwargs.get("timeout"),
+ "bucket_name": kwargs.get("bucket_name"),
+ "vertex_credentials": kwargs.get("vertex_credentials"),
+ "vertex_project": kwargs.get("vertex_project"),
}
return litellm_params
diff --git a/litellm/litellm_core_utils/get_llm_provider_logic.py b/litellm/litellm_core_utils/get_llm_provider_logic.py
index c5df6f83a2..d14d4ad706 100644
--- a/litellm/litellm_core_utils/get_llm_provider_logic.py
+++ b/litellm/litellm_core_utils/get_llm_provider_logic.py
@@ -3,6 +3,7 @@ from typing import Optional, Tuple
import httpx
import litellm
+from litellm.constants import REPLICATE_MODEL_NAME_WITH_ID_LENGTH
from litellm.secret_managers.main import get_secret, get_secret_str
from ..types.router import LiteLLM_Params
@@ -129,17 +130,15 @@ def get_llm_provider( # noqa: PLR0915
model, custom_llm_provider
)
- if custom_llm_provider:
- if (
- model.split("/")[0] == custom_llm_provider
- ): # handle scenario where model="azure/*" and custom_llm_provider="azure"
- model = model.replace("{}/".format(custom_llm_provider), "")
-
- return model, custom_llm_provider, dynamic_api_key, api_base
+ if custom_llm_provider and (
+ model.split("/")[0] != custom_llm_provider
+ ): # handle scenario where model="azure/*" and custom_llm_provider="azure"
+ model = custom_llm_provider + "/" + model
if api_key and api_key.startswith("os.environ/"):
dynamic_api_key = get_secret_str(api_key)
# check if llm provider part of model name
+
if (
model.split("/", 1)[0] in litellm.provider_list
and model.split("/", 1)[0] not in litellm.model_list_set
@@ -258,10 +257,13 @@ def get_llm_provider( # noqa: PLR0915
elif model in litellm.cohere_chat_models:
custom_llm_provider = "cohere_chat"
## replicate
- elif model in litellm.replicate_models or (":" in model and len(model) > 64):
+ elif model in litellm.replicate_models or (
+ ":" in model and len(model) > REPLICATE_MODEL_NAME_WITH_ID_LENGTH
+ ):
model_parts = model.split(":")
if (
- len(model_parts) > 1 and len(model_parts[1]) == 64
+ len(model_parts) > 1
+ and len(model_parts[1]) == REPLICATE_MODEL_NAME_WITH_ID_LENGTH
): ## checks if model name has a 64 digit code - e.g. "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3"
custom_llm_provider = "replicate"
elif model in litellm.replicate_models:
@@ -584,11 +586,11 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915
dynamic_api_key = api_key or get_secret_str("GALADRIEL_API_KEY")
elif custom_llm_provider == "snowflake":
api_base = (
- api_base
- or get_secret("SNOWFLAKE_API_BASE")
- or f"https://{get_secret('SNOWFLAKE_ACCOUNT_ID')}.snowflakecomputing.com/api/v2/cortex/inference:complete"
- ) # type: ignore
- dynamic_api_key = api_key or get_secret("SNOWFLAKE_JWT")
+ api_base
+ or get_secret_str("SNOWFLAKE_API_BASE")
+ or f"https://{get_secret('SNOWFLAKE_ACCOUNT_ID')}.snowflakecomputing.com/api/v2/cortex/inference:complete"
+ ) # type: ignore
+ dynamic_api_key = api_key or get_secret_str("SNOWFLAKE_JWT")
if api_base is not None and not isinstance(api_base, str):
raise Exception("api base needs to be a string. api_base={}".format(api_base))
diff --git a/litellm/litellm_core_utils/get_model_cost_map.py b/litellm/litellm_core_utils/get_model_cost_map.py
index b8bdaee19c..b6a3a243c4 100644
--- a/litellm/litellm_core_utils/get_model_cost_map.py
+++ b/litellm/litellm_core_utils/get_model_cost_map.py
@@ -13,7 +13,7 @@ import os
import httpx
-def get_model_cost_map(url: str):
+def get_model_cost_map(url: str) -> dict:
if (
os.getenv("LITELLM_LOCAL_MODEL_COST_MAP", False)
or os.getenv("LITELLM_LOCAL_MODEL_COST_MAP", False) == "True"
diff --git a/litellm/litellm_core_utils/get_supported_openai_params.py b/litellm/litellm_core_utils/get_supported_openai_params.py
index 79dbb42ffb..c9d53d9e01 100644
--- a/litellm/litellm_core_utils/get_supported_openai_params.py
+++ b/litellm/litellm_core_utils/get_supported_openai_params.py
@@ -1,8 +1,8 @@
from typing import Literal, Optional
import litellm
-from litellm import LlmProviders
from litellm.exceptions import BadRequestError
+from litellm.types.utils import LlmProviders, LlmProvidersSet
def get_supported_openai_params( # noqa: PLR0915
@@ -30,6 +30,20 @@ def get_supported_openai_params( # noqa: PLR0915
except BadRequestError:
return None
+ if custom_llm_provider in LlmProvidersSet:
+ provider_config = litellm.ProviderConfigManager.get_provider_chat_config(
+ model=model, provider=LlmProviders(custom_llm_provider)
+ )
+ elif custom_llm_provider.split("/")[0] in LlmProvidersSet:
+ provider_config = litellm.ProviderConfigManager.get_provider_chat_config(
+ model=model, provider=LlmProviders(custom_llm_provider.split("/")[0])
+ )
+ else:
+ provider_config = None
+
+ if provider_config and request_type == "chat_completion":
+ return provider_config.get_supported_openai_params(model=model)
+
if custom_llm_provider == "bedrock":
return litellm.AmazonConverseConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "ollama":
@@ -79,6 +93,22 @@ def get_supported_openai_params( # noqa: PLR0915
elif custom_llm_provider == "maritalk":
return litellm.MaritalkConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "openai":
+ if request_type == "transcription":
+ transcription_provider_config = (
+ litellm.ProviderConfigManager.get_provider_audio_transcription_config(
+ model=model, provider=LlmProviders.OPENAI
+ )
+ )
+ if isinstance(
+ transcription_provider_config, litellm.OpenAIGPTAudioTranscriptionConfig
+ ):
+ return transcription_provider_config.get_supported_openai_params(
+ model=model
+ )
+ else:
+ raise ValueError(
+ f"Unsupported provider config: {transcription_provider_config} for model: {model}"
+ )
return litellm.OpenAIConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "azure":
if litellm.AzureOpenAIO1Config().is_o_series_model(model=model):
@@ -104,7 +134,7 @@ def get_supported_openai_params( # noqa: PLR0915
elif custom_llm_provider == "replicate":
return litellm.ReplicateConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "huggingface":
- return litellm.HuggingfaceConfig().get_supported_openai_params(model=model)
+ return litellm.HuggingFaceChatConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "jina_ai":
if request_type == "embeddings":
return litellm.JinaAIEmbeddingConfig().get_supported_openai_params()
@@ -191,6 +221,8 @@ def get_supported_openai_params( # noqa: PLR0915
return litellm.PredibaseConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "voyage":
return litellm.VoyageEmbeddingConfig().get_supported_openai_params(model=model)
+ elif custom_llm_provider == "infinity":
+ return litellm.InfinityEmbeddingConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "triton":
if request_type == "embeddings":
return litellm.TritonEmbeddingConfig().get_supported_openai_params(
@@ -215,7 +247,8 @@ def get_supported_openai_params( # noqa: PLR0915
provider_config = litellm.ProviderConfigManager.get_provider_chat_config(
model=model, provider=LlmProviders.CUSTOM
)
- return provider_config.get_supported_openai_params(model=model)
+ if provider_config:
+ return provider_config.get_supported_openai_params(model=model)
elif request_type == "embeddings":
return None
elif request_type == "transcription":
diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py
index a369b7f3e3..77d4fd7d5d 100644
--- a/litellm/litellm_core_utils/litellm_logging.py
+++ b/litellm/litellm_core_utils/litellm_logging.py
@@ -28,12 +28,26 @@ from litellm._logging import _is_debugging_on, verbose_logger
from litellm.batches.batch_utils import _handle_completed_batch
from litellm.caching.caching import DualCache, InMemoryCache
from litellm.caching.caching_handler import LLMCachingHandler
-from litellm.cost_calculator import _select_model_name_for_cost_calc
+
+from litellm.constants import (
+ DEFAULT_MOCK_RESPONSE_COMPLETION_TOKEN_COUNT,
+ DEFAULT_MOCK_RESPONSE_PROMPT_TOKEN_COUNT,
+)
+from litellm.cost_calculator import (
+ RealtimeAPITokenUsageProcessor,
+ _select_model_name_for_cost_calc,
+)
+from litellm.integrations.agentops import AgentOps
+from litellm.integrations.anthropic_cache_control_hook import AnthropicCacheControlHook
+from litellm.integrations.arize.arize import ArizeLogger
from litellm.integrations.custom_guardrail import CustomGuardrail
from litellm.integrations.custom_logger import CustomLogger
from litellm.integrations.mlflow import MlflowLogger
from litellm.integrations.pagerduty.pagerduty import PagerDutyAlerting
from litellm.litellm_core_utils.get_litellm_params import get_litellm_params
+from litellm.litellm_core_utils.llm_cost_calc.tool_call_cost_tracking import (
+ StandardBuiltInToolCostTracking,
+)
from litellm.litellm_core_utils.model_param_helper import ModelParamHelper
from litellm.litellm_core_utils.redact_messages import (
redact_message_input_output_from_custom_logger,
@@ -45,6 +59,7 @@ from litellm.types.llms.openai import (
Batch,
FineTuningJob,
HttpxBinaryResponseContent,
+ OpenAIFileObject,
ResponseCompletedEvent,
ResponsesAPIResponse,
)
@@ -56,12 +71,15 @@ from litellm.types.utils import (
ImageResponse,
LiteLLMBatch,
LiteLLMLoggingBaseClass,
+ LiteLLMRealtimeStreamLoggingObject,
ModelResponse,
ModelResponseStream,
RawRequestTypedDict,
+ StandardBuiltInToolsParams,
StandardCallbackDynamicParams,
StandardLoggingAdditionalHeaders,
StandardLoggingHiddenParams,
+ StandardLoggingMCPToolCall,
StandardLoggingMetadata,
StandardLoggingModelCostFailureDebugInformation,
StandardLoggingModelInformation,
@@ -76,11 +94,11 @@ from litellm.types.utils import (
from litellm.utils import _get_base_model_from_metadata, executor, print_verbose
from ..integrations.argilla import ArgillaLogger
-from ..integrations.arize.arize import ArizeLogger
from ..integrations.arize.arize_phoenix import ArizePhoenixLogger
from ..integrations.athina import AthinaLogger
from ..integrations.azure_storage.azure_storage import AzureBlobStorageLogger
from ..integrations.braintrust_logging import BraintrustLogger
+from ..integrations.custom_prompt_management import CustomPromptManagement
from ..integrations.datadog.datadog import DataDogLogger
from ..integrations.datadog.datadog_llm_obs import DataDogLLMObsLogger
from ..integrations.dynamodb import DyanmoDBLogger
@@ -110,7 +128,6 @@ from .exception_mapping_utils import _get_response_headers
from .initialize_dynamic_callback_params import (
initialize_standard_callback_dynamic_params as _initialize_standard_callback_dynamic_params,
)
-from .logging_utils import _assemble_complete_response_from_streaming_chunks
from .specialty_caches.dynamic_logging_cache import DynamicLoggingCache
try:
@@ -264,7 +281,9 @@ class Logging(LiteLLMLoggingBaseClass):
self.standard_callback_dynamic_params: StandardCallbackDynamicParams = (
self.initialize_standard_callback_dynamic_params(kwargs)
)
-
+ self.standard_built_in_tools_params: StandardBuiltInToolsParams = (
+ self.initialize_standard_built_in_tools_params(kwargs)
+ )
## TIME TO FIRST TOKEN LOGGING ##
self.completion_start_time: Optional[datetime.datetime] = None
self._llm_caching_handler: Optional[LLMCachingHandler] = None
@@ -283,6 +302,7 @@ class Logging(LiteLLMLoggingBaseClass):
"input": _input,
"litellm_params": litellm_params,
"applied_guardrails": applied_guardrails,
+ "model": model,
}
def process_dynamic_callbacks(self):
@@ -369,6 +389,23 @@ class Logging(LiteLLMLoggingBaseClass):
"""
return _initialize_standard_callback_dynamic_params(kwargs)
+ def initialize_standard_built_in_tools_params(
+ self, kwargs: Optional[Dict] = None
+ ) -> StandardBuiltInToolsParams:
+ """
+ Initialize the standard built-in tools params from the kwargs
+
+ checks if web_search_options in kwargs or tools and sets the corresponding attribute in StandardBuiltInToolsParams
+ """
+ return StandardBuiltInToolsParams(
+ web_search_options=StandardBuiltInToolCostTracking._get_web_search_options(
+ kwargs or {}
+ ),
+ file_search=StandardBuiltInToolCostTracking._get_file_search_tool_call(
+ kwargs or {}
+ ),
+ )
+
def update_environment_variables(
self,
litellm_params: Dict,
@@ -421,43 +458,111 @@ class Logging(LiteLLMLoggingBaseClass):
if "custom_llm_provider" in self.model_call_details:
self.custom_llm_provider = self.model_call_details["custom_llm_provider"]
+ def should_run_prompt_management_hooks(
+ self,
+ non_default_params: Dict,
+ prompt_id: Optional[str] = None,
+ ) -> bool:
+ """
+ Return True if prompt management hooks should be run
+ """
+ if prompt_id:
+ return True
+ if AnthropicCacheControlHook.should_use_anthropic_cache_control_hook(
+ non_default_params
+ ):
+ return True
+ return False
+
def get_chat_completion_prompt(
self,
model: str,
messages: List[AllMessageValues],
- non_default_params: dict,
- prompt_id: str,
+ non_default_params: Dict,
+ prompt_id: Optional[str],
prompt_variables: Optional[dict],
+ prompt_management_logger: Optional[CustomLogger] = None,
) -> Tuple[str, List[AllMessageValues], dict]:
+ custom_logger = (
+ prompt_management_logger
+ or self.get_custom_logger_for_prompt_management(
+ model=model, non_default_params=non_default_params
+ )
+ )
- for (
- custom_logger_compatible_callback
- ) in litellm._known_custom_logger_compatible_callbacks:
- if model.startswith(custom_logger_compatible_callback):
+ if custom_logger:
+ (
+ model,
+ messages,
+ non_default_params,
+ ) = custom_logger.get_chat_completion_prompt(
+ model=model,
+ messages=messages,
+ non_default_params=non_default_params or {},
+ prompt_id=prompt_id,
+ prompt_variables=prompt_variables,
+ dynamic_callback_params=self.standard_callback_dynamic_params,
+ )
+ self.messages = messages
+ return model, messages, non_default_params
+
+ def get_custom_logger_for_prompt_management(
+ self, model: str, non_default_params: Dict
+ ) -> Optional[CustomLogger]:
+ """
+ Get a custom logger for prompt management based on model name or available callbacks.
+
+ Args:
+ model: The model name to check for prompt management integration
+
+ Returns:
+ A CustomLogger instance if one is found, None otherwise
+ """
+ # First check if model starts with a known custom logger compatible callback
+ for callback_name in litellm._known_custom_logger_compatible_callbacks:
+ if model.startswith(callback_name):
custom_logger = _init_custom_logger_compatible_class(
- logging_integration=custom_logger_compatible_callback,
+ logging_integration=callback_name,
internal_usage_cache=None,
llm_router=None,
)
+ if custom_logger is not None:
+ self.model_call_details["prompt_integration"] = model.split("/")[0]
+ return custom_logger
- if custom_logger is None:
- continue
- old_name = model
+ # Then check for any registered CustomPromptManagement loggers
+ prompt_management_loggers = (
+ litellm.logging_callback_manager.get_custom_loggers_for_type(
+ callback_type=CustomPromptManagement
+ )
+ )
- model, messages, non_default_params = (
- custom_logger.get_chat_completion_prompt(
- model=model,
- messages=messages,
- non_default_params=non_default_params,
- prompt_id=prompt_id,
- prompt_variables=prompt_variables,
- dynamic_callback_params=self.standard_callback_dynamic_params,
- )
- )
- self.model_call_details["prompt_integration"] = old_name.split("/")[0]
- self.messages = messages
+ if prompt_management_loggers:
+ logger = prompt_management_loggers[0]
+ self.model_call_details["prompt_integration"] = logger.__class__.__name__
+ return logger
- return model, messages, non_default_params
+ if anthropic_cache_control_logger := AnthropicCacheControlHook.get_custom_logger_for_anthropic_cache_control_hook(
+ non_default_params
+ ):
+ self.model_call_details["prompt_integration"] = (
+ anthropic_cache_control_logger.__class__.__name__
+ )
+ return anthropic_cache_control_logger
+
+ return None
+
+ def get_custom_logger_for_anthropic_cache_control_hook(
+ self, non_default_params: Dict
+ ) -> Optional[CustomLogger]:
+ if non_default_params.get("cache_control_injection_points", None):
+ custom_logger = _init_custom_logger_compatible_class(
+ logging_integration="anthropic_cache_control_hook",
+ internal_usage_cache=None,
+ llm_router=None,
+ )
+ return custom_logger
+ return None
def _get_raw_request_body(self, data: Optional[Union[dict, str]]) -> dict:
if data is None:
@@ -471,6 +576,16 @@ class Logging(LiteLLMLoggingBaseClass):
}
return data
+ def _get_masked_api_base(self, api_base: str) -> str:
+ if "key=" in api_base:
+ # Find the position of "key=" in the string
+ key_index = api_base.find("key=") + 4
+ # Mask the last 5 characters after "key="
+ masked_api_base = api_base[:key_index] + "*" * 5 + api_base[-4:]
+ else:
+ masked_api_base = api_base
+ return str(masked_api_base)
+
def _pre_call(self, input, api_key, model=None, additional_args={}):
"""
Common helper function across the sync + async pre-call function
@@ -484,9 +599,11 @@ class Logging(LiteLLMLoggingBaseClass):
model
): # if model name was changes pre-call, overwrite the initial model call name with the new one
self.model_call_details["model"] = model
+ self.model_call_details["litellm_params"]["api_base"] = (
+ self._get_masked_api_base(additional_args.get("api_base", ""))
+ )
def pre_call(self, input, api_key, model=None, additional_args={}): # noqa: PLR0915
-
# Log the exact input to the LLM API
litellm.error_logs["PRE_CALL"] = locals()
try:
@@ -508,19 +625,16 @@ class Logging(LiteLLMLoggingBaseClass):
self.log_raw_request_response is True
or log_raw_request_response is True
):
-
_litellm_params = self.model_call_details.get("litellm_params", {})
_metadata = _litellm_params.get("metadata", {}) or {}
try:
# [Non-blocking Extra Debug Information in metadata]
if turn_off_message_logging is True:
-
_metadata["raw_request"] = (
"redacted by litellm. \
'litellm.turn_off_message_logging=True'"
)
else:
-
curl_command = self._get_request_curl_command(
api_base=additional_args.get("api_base", ""),
headers=additional_args.get("headers", {}),
@@ -551,7 +665,6 @@ class Logging(LiteLLMLoggingBaseClass):
error=str(e),
)
)
- traceback.print_exc()
_metadata["raw_request"] = (
"Unable to Log \
raw request: {}".format(
@@ -667,15 +780,6 @@ class Logging(LiteLLMLoggingBaseClass):
headers = {}
data = additional_args.get("complete_input_dict", {})
api_base = str(additional_args.get("api_base", ""))
- if "key=" in api_base:
- # Find the position of "key=" in the string
- key_index = api_base.find("key=") + 4
- # Mask the last 5 characters after "key="
- masked_api_base = api_base[:key_index] + "*" * 5 + api_base[-4:]
- else:
- masked_api_base = api_base
- self.model_call_details["litellm_params"]["api_base"] = masked_api_base
-
curl_command = self._get_request_curl_command(
api_base=api_base,
headers=headers,
@@ -690,11 +794,12 @@ class Logging(LiteLLMLoggingBaseClass):
def _get_request_curl_command(
self, api_base: str, headers: Optional[dict], additional_args: dict, data: dict
) -> str:
+ masked_api_base = self._get_masked_api_base(api_base)
if headers is None:
headers = {}
curl_command = "\n\nPOST Request Sent from LiteLLM:\n"
curl_command += "curl -X POST \\\n"
- curl_command += f"{api_base} \\\n"
+ curl_command += f"{masked_api_base} \\\n"
masked_headers = self._get_masked_headers(headers)
formatted_headers = " ".join(
[f"-H '{k}: {v}'" for k, v in masked_headers.items()]
@@ -840,8 +945,12 @@ class Logging(LiteLLMLoggingBaseClass):
FineTuningJob,
ResponsesAPIResponse,
ResponseCompletedEvent,
+ OpenAIFileObject,
+ LiteLLMRealtimeStreamLoggingObject,
],
cache_hit: Optional[bool] = None,
+ litellm_model_name: Optional[str] = None,
+ router_model_id: Optional[str] = None,
) -> Optional[float]:
"""
Calculate response cost using result + logging object variables.
@@ -867,7 +976,7 @@ class Logging(LiteLLMLoggingBaseClass):
try:
response_cost_calculator_kwargs = {
"response_object": result,
- "model": self.model,
+ "model": litellm_model_name or self.model,
"cache_hit": cache_hit,
"custom_llm_provider": self.model_call_details.get(
"custom_llm_provider", None
@@ -879,6 +988,8 @@ class Logging(LiteLLMLoggingBaseClass):
"optional_params": self.optional_params,
"custom_pricing": custom_pricing,
"prompt": prompt,
+ "standard_built_in_tools_params": self.standard_built_in_tools_params,
+ "router_model_id": router_model_id,
}
except Exception as e: # error creating kwargs for cost calculation
debug_info = StandardLoggingModelCostFailureDebugInformation(
@@ -942,7 +1053,6 @@ class Logging(LiteLLMLoggingBaseClass):
def should_run_callback(
self, callback: litellm.CALLBACK_TYPES, litellm_params: dict, event_hook: str
) -> bool:
-
if litellm.global_disable_no_log_param:
return True
@@ -959,6 +1069,10 @@ class Logging(LiteLLMLoggingBaseClass):
return False
return True
+ def _update_completion_start_time(self, completion_start_time: datetime.datetime):
+ self.completion_start_time = completion_start_time
+ self.model_call_details["completion_start_time"] = self.completion_start_time
+
def _success_handler_helper_fn(
self,
result=None,
@@ -985,26 +1099,51 @@ class Logging(LiteLLMLoggingBaseClass):
result = self._handle_anthropic_messages_response_logging(result=result)
## if model in model cost map - log the response cost
## else set cost to None
+
+ logging_result = result
+
+ if self.call_type == CallTypes.arealtime.value and isinstance(result, list):
+ combined_usage_object = RealtimeAPITokenUsageProcessor.collect_and_combine_usage_from_realtime_stream_results(
+ results=result
+ )
+ logging_result = (
+ RealtimeAPITokenUsageProcessor.create_logging_realtime_object(
+ usage=combined_usage_object,
+ results=result,
+ )
+ )
+
+ # self.model_call_details[
+ # "response_cost"
+ # ] = handle_realtime_stream_cost_calculation(
+ # results=result,
+ # combined_usage_object=combined_usage_object,
+ # custom_llm_provider=self.custom_llm_provider,
+ # litellm_model_name=self.model,
+ # )
+ # self.model_call_details["combined_usage_object"] = combined_usage_object
if (
standard_logging_object is None
and result is not None
and self.stream is not True
):
if (
- isinstance(result, ModelResponse)
- or isinstance(result, ModelResponseStream)
- or isinstance(result, EmbeddingResponse)
- or isinstance(result, ImageResponse)
- or isinstance(result, TranscriptionResponse)
- or isinstance(result, TextCompletionResponse)
- or isinstance(result, HttpxBinaryResponseContent) # tts
- or isinstance(result, RerankResponse)
- or isinstance(result, FineTuningJob)
- or isinstance(result, LiteLLMBatch)
- or isinstance(result, ResponsesAPIResponse)
+ isinstance(logging_result, ModelResponse)
+ or isinstance(logging_result, ModelResponseStream)
+ or isinstance(logging_result, EmbeddingResponse)
+ or isinstance(logging_result, ImageResponse)
+ or isinstance(logging_result, TranscriptionResponse)
+ or isinstance(logging_result, TextCompletionResponse)
+ or isinstance(logging_result, HttpxBinaryResponseContent) # tts
+ or isinstance(logging_result, RerankResponse)
+ or isinstance(logging_result, FineTuningJob)
+ or isinstance(logging_result, LiteLLMBatch)
+ or isinstance(logging_result, ResponsesAPIResponse)
+ or isinstance(logging_result, OpenAIFileObject)
+ or isinstance(logging_result, LiteLLMRealtimeStreamLoggingObject)
):
## HIDDEN PARAMS ##
- hidden_params = getattr(result, "_hidden_params", {})
+ hidden_params = getattr(logging_result, "_hidden_params", {})
if hidden_params:
# add to metadata for logging
if self.model_call_details.get("litellm_params") is not None:
@@ -1022,7 +1161,7 @@ class Logging(LiteLLMLoggingBaseClass):
self.model_call_details["litellm_params"]["metadata"][ # type: ignore
"hidden_params"
] = getattr(
- result, "_hidden_params", {}
+ logging_result, "_hidden_params", {}
)
## RESPONSE COST - Only calculate if not in hidden_params ##
if "response_cost" in hidden_params:
@@ -1031,21 +1170,22 @@ class Logging(LiteLLMLoggingBaseClass):
]
else:
self.model_call_details["response_cost"] = (
- self._response_cost_calculator(result=result)
+ self._response_cost_calculator(result=logging_result)
)
## STANDARDIZED LOGGING PAYLOAD
self.model_call_details["standard_logging_object"] = (
get_standard_logging_object_payload(
kwargs=self.model_call_details,
- init_response_obj=result,
+ init_response_obj=logging_result,
start_time=start_time,
end_time=end_time,
logging_obj=self,
status="success",
+ standard_built_in_tools_params=self.standard_built_in_tools_params,
)
)
- elif isinstance(result, dict): # pass-through endpoints
+ elif isinstance(result, dict) or isinstance(result, list):
## STANDARDIZED LOGGING PAYLOAD
self.model_call_details["standard_logging_object"] = (
get_standard_logging_object_payload(
@@ -1055,6 +1195,7 @@ class Logging(LiteLLMLoggingBaseClass):
end_time=end_time,
logging_obj=self,
status="success",
+ standard_built_in_tools_params=self.standard_built_in_tools_params,
)
)
elif standard_logging_object is not None:
@@ -1078,6 +1219,7 @@ class Logging(LiteLLMLoggingBaseClass):
prompt="",
completion=getattr(result, "content", ""),
total_time=float_diff,
+ standard_built_in_tools_params=self.standard_built_in_tools_params,
)
return start_time, end_time, result
@@ -1098,7 +1240,6 @@ class Logging(LiteLLMLoggingBaseClass):
standard_logging_object=kwargs.get("standard_logging_object", None),
)
try:
-
## BUILD COMPLETE STREAMED RESPONSE
complete_streaming_response: Optional[
Union[ModelResponse, TextCompletionResponse, ResponsesAPIResponse]
@@ -1131,6 +1272,7 @@ class Logging(LiteLLMLoggingBaseClass):
end_time=end_time,
logging_obj=self,
status="success",
+ standard_built_in_tools_params=self.standard_built_in_tools_params,
)
)
callbacks = self.get_combined_callback_list(
@@ -1150,7 +1292,6 @@ class Logging(LiteLLMLoggingBaseClass):
## LOGGING HOOK ##
for callback in callbacks:
if isinstance(callback, CustomLogger):
-
self.model_call_details, result = callback.logging_hook(
kwargs=self.model_call_details,
result=result,
@@ -1602,7 +1743,6 @@ class Logging(LiteLLMLoggingBaseClass):
if self.call_type == CallTypes.aretrieve_batch.value and isinstance(
result, LiteLLMBatch
):
-
response_cost, batch_usage, batch_models = await _handle_completed_batch(
batch=result, custom_llm_provider=self.custom_llm_provider
)
@@ -1671,6 +1811,7 @@ class Logging(LiteLLMLoggingBaseClass):
end_time=end_time,
logging_obj=self,
status="success",
+ standard_built_in_tools_params=self.standard_built_in_tools_params,
)
)
callbacks = self.get_combined_callback_list(
@@ -1887,6 +2028,7 @@ class Logging(LiteLLMLoggingBaseClass):
status="failure",
error_str=str(exception),
original_exception=exception,
+ standard_built_in_tools_params=self.standard_built_in_tools_params,
)
)
return start_time, end_time
@@ -2025,7 +2167,6 @@ class Logging(LiteLLMLoggingBaseClass):
)
is not True
): # custom logger class
-
callback.log_failure_event(
start_time=start_time,
end_time=end_time,
@@ -2351,18 +2492,6 @@ class Logging(LiteLLMLoggingBaseClass):
return result
elif isinstance(result, ResponseCompletedEvent):
return result.response
- elif isinstance(result, ModelResponseStream):
- complete_streaming_response: Optional[
- Union[ModelResponse, TextCompletionResponse]
- ] = _assemble_complete_response_from_streaming_chunks(
- result=result,
- start_time=start_time,
- end_time=end_time,
- request_kwargs=self.model_call_details,
- streaming_chunks=streaming_chunks,
- is_async=is_async,
- )
- return complete_streaming_response
return None
def _handle_anthropic_messages_response_logging(self, result: Any) -> ModelResponse:
@@ -2558,7 +2687,15 @@ def _init_custom_logger_compatible_class( # noqa: PLR0915
"""
try:
custom_logger_init_args = custom_logger_init_args or {}
- if logging_integration == "lago":
+ if logging_integration == "agentops": # Add AgentOps initialization
+ for callback in _in_memory_loggers:
+ if isinstance(callback, AgentOps):
+ return callback # type: ignore
+
+ agentops_logger = AgentOps()
+ _in_memory_loggers.append(agentops_logger)
+ return agentops_logger # type: ignore
+ elif logging_integration == "lago":
for callback in _in_memory_loggers:
if isinstance(callback, LagoLogger):
return callback # type: ignore
@@ -2671,13 +2808,13 @@ def _init_custom_logger_compatible_class( # noqa: PLR0915
)
for callback in _in_memory_loggers:
if (
- isinstance(callback, OpenTelemetry)
+ isinstance(callback, ArizeLogger)
and callback.callback_name == "arize"
):
return callback # type: ignore
- _otel_logger = OpenTelemetry(config=otel_config, callback_name="arize")
- _in_memory_loggers.append(_otel_logger)
- return _otel_logger # type: ignore
+ _arize_otel_logger = ArizeLogger(config=otel_config, callback_name="arize")
+ _in_memory_loggers.append(_arize_otel_logger)
+ return _arize_otel_logger # type: ignore
elif logging_integration == "arize_phoenix":
from litellm.integrations.opentelemetry import (
OpenTelemetry,
@@ -2821,6 +2958,13 @@ def _init_custom_logger_compatible_class( # noqa: PLR0915
pagerduty_logger = PagerDutyAlerting(**custom_logger_init_args)
_in_memory_loggers.append(pagerduty_logger)
return pagerduty_logger # type: ignore
+ elif logging_integration == "anthropic_cache_control_hook":
+ for callback in _in_memory_loggers:
+ if isinstance(callback, AnthropicCacheControlHook):
+ return callback
+ anthropic_cache_control_hook = AnthropicCacheControlHook()
+ _in_memory_loggers.append(anthropic_cache_control_hook)
+ return anthropic_cache_control_hook # type: ignore
elif logging_integration == "gcs_pubsub":
for callback in _in_memory_loggers:
if isinstance(callback, GcsPubSubLogger):
@@ -2910,15 +3054,13 @@ def get_custom_logger_compatible_class( # noqa: PLR0915
if isinstance(callback, OpenTelemetry):
return callback
elif logging_integration == "arize":
- from litellm.integrations.opentelemetry import OpenTelemetry
-
if "ARIZE_SPACE_KEY" not in os.environ:
raise ValueError("ARIZE_SPACE_KEY not found in environment variables")
if "ARIZE_API_KEY" not in os.environ:
raise ValueError("ARIZE_API_KEY not found in environment variables")
for callback in _in_memory_loggers:
if (
- isinstance(callback, OpenTelemetry)
+ isinstance(callback, ArizeLogger)
and callback.callback_name == "arize"
):
return callback
@@ -2961,6 +3103,10 @@ def get_custom_logger_compatible_class( # noqa: PLR0915
for callback in _in_memory_loggers:
if isinstance(callback, PagerDutyAlerting):
return callback
+ elif logging_integration == "anthropic_cache_control_hook":
+ for callback in _in_memory_loggers:
+ if isinstance(callback, AnthropicCacheControlHook):
+ return callback
elif logging_integration == "gcs_pubsub":
for callback in _in_memory_loggers:
if isinstance(callback, GcsPubSubLogger):
@@ -3069,6 +3215,8 @@ class StandardLoggingPayloadSetup:
litellm_params: Optional[dict] = None,
prompt_integration: Optional[str] = None,
applied_guardrails: Optional[List[str]] = None,
+ mcp_tool_call_metadata: Optional[StandardLoggingMCPToolCall] = None,
+ usage_object: Optional[dict] = None,
) -> StandardLoggingMetadata:
"""
Clean and filter the metadata dictionary to include only the specified keys in StandardLoggingMetadata.
@@ -3115,6 +3263,8 @@ class StandardLoggingPayloadSetup:
user_api_key_end_user_id=None,
prompt_management_metadata=prompt_management_metadata,
applied_guardrails=applied_guardrails,
+ mcp_tool_call_metadata=mcp_tool_call_metadata,
+ usage_object=usage_object,
)
if isinstance(metadata, dict):
# Filter the metadata dictionary to include only the specified keys
@@ -3140,8 +3290,12 @@ class StandardLoggingPayloadSetup:
return clean_metadata
@staticmethod
- def get_usage_from_response_obj(response_obj: Optional[dict]) -> Usage:
+ def get_usage_from_response_obj(
+ response_obj: Optional[dict], combined_usage_object: Optional[Usage] = None
+ ) -> Usage:
## BASE CASE ##
+ if combined_usage_object is not None:
+ return combined_usage_object
if response_obj is None:
return Usage(
prompt_tokens=0,
@@ -3178,7 +3332,6 @@ class StandardLoggingPayloadSetup:
custom_llm_provider: Optional[str],
init_response_obj: Union[Any, BaseModel, dict],
) -> StandardLoggingModelInformation:
-
model_cost_name = _select_model_name_for_cost_calc(
model=None,
completion_response=init_response_obj, # type: ignore
@@ -3241,7 +3394,6 @@ class StandardLoggingPayloadSetup:
def get_additional_headers(
additiona_headers: Optional[dict],
) -> Optional[StandardLoggingAdditionalHeaders]:
-
if additiona_headers is None:
return None
@@ -3271,6 +3423,8 @@ class StandardLoggingPayloadSetup:
additional_headers=None,
litellm_overhead_time_ms=None,
batch_models=None,
+ litellm_model_name=None,
+ usage_object=None,
)
if hidden_params is not None:
for key in StandardLoggingHiddenParams.__annotations__.keys():
@@ -3356,6 +3510,7 @@ def get_standard_logging_object_payload(
status: StandardLoggingPayloadStatus,
error_str: Optional[str] = None,
original_exception: Optional[Exception] = None,
+ standard_built_in_tools_params: Optional[StandardBuiltInToolsParams] = None,
) -> Optional[StandardLoggingPayload]:
try:
kwargs = kwargs or {}
@@ -3385,6 +3540,8 @@ def get_standard_logging_object_payload(
response_cost=None,
litellm_overhead_time_ms=None,
batch_models=None,
+ litellm_model_name=None,
+ usage_object=None,
)
)
@@ -3401,8 +3558,12 @@ def get_standard_logging_object_payload(
call_type = kwargs.get("call_type")
cache_hit = kwargs.get("cache_hit", False)
usage = StandardLoggingPayloadSetup.get_usage_from_response_obj(
- response_obj=response_obj
+ response_obj=response_obj,
+ combined_usage_object=cast(
+ Optional[Usage], kwargs.get("combined_usage_object")
+ ),
)
+
id = response_obj.get("id", kwargs.get("litellm_call_id"))
_model_id = metadata.get("model_info", {}).get("id", "")
@@ -3415,12 +3576,14 @@ def get_standard_logging_object_payload(
)
# cleanup timestamps
- start_time_float, end_time_float, completion_start_time_float = (
- StandardLoggingPayloadSetup.cleanup_timestamps(
- start_time=start_time,
- end_time=end_time,
- completion_start_time=completion_start_time,
- )
+ (
+ start_time_float,
+ end_time_float,
+ completion_start_time_float,
+ ) = StandardLoggingPayloadSetup.cleanup_timestamps(
+ start_time=start_time,
+ end_time=end_time,
+ completion_start_time=completion_start_time,
)
response_time = StandardLoggingPayloadSetup.get_response_time(
start_time_float=start_time_float,
@@ -3438,6 +3601,8 @@ def get_standard_logging_object_payload(
litellm_params=litellm_params,
prompt_integration=kwargs.get("prompt_integration", None),
applied_guardrails=kwargs.get("applied_guardrails", None),
+ mcp_tool_call_metadata=kwargs.get("mcp_tool_call_metadata", None),
+ usage_object=usage.model_dump(),
)
_request_body = proxy_server_request.get("body", {})
@@ -3447,7 +3612,6 @@ def get_standard_logging_object_payload(
saved_cache_cost: float = 0.0
if cache_hit is True:
-
id = f"{id}_cache_hit{time.time()}" # do not duplicate the request id
saved_cache_cost = (
logging_obj._response_cost_calculator(
@@ -3530,6 +3694,7 @@ def get_standard_logging_object_payload(
guardrail_information=metadata.get(
"standard_logging_guardrail_information", None
),
+ standard_built_in_tools_params=standard_built_in_tools_params,
)
emit_standard_logging_payload(payload)
@@ -3547,7 +3712,7 @@ def emit_standard_logging_payload(payload: StandardLoggingPayload):
def get_standard_logging_metadata(
- metadata: Optional[Dict[str, Any]]
+ metadata: Optional[Dict[str, Any]],
) -> StandardLoggingMetadata:
"""
Clean and filter the metadata dictionary to include only the specified keys in StandardLoggingMetadata.
@@ -3577,6 +3742,8 @@ def get_standard_logging_metadata(
user_api_key_end_user_id=None,
prompt_management_metadata=None,
applied_guardrails=None,
+ mcp_tool_call_metadata=None,
+ usage_object=None,
)
if isinstance(metadata, dict):
# Filter the metadata dictionary to include only the specified keys
@@ -3670,6 +3837,8 @@ def create_dummy_standard_logging_payload() -> StandardLoggingPayload:
additional_headers=None,
litellm_overhead_time_ms=None,
batch_models=None,
+ litellm_model_name=None,
+ usage_object=None,
)
# Convert numeric values to appropriate types
@@ -3693,9 +3862,12 @@ def create_dummy_standard_logging_payload() -> StandardLoggingPayload:
response_cost=response_cost,
response_cost_failure_debug_info=None,
status=str("success"),
- total_tokens=int(30),
- prompt_tokens=int(20),
- completion_tokens=int(10),
+ total_tokens=int(
+ DEFAULT_MOCK_RESPONSE_PROMPT_TOKEN_COUNT
+ + DEFAULT_MOCK_RESPONSE_COMPLETION_TOKEN_COUNT
+ ),
+ prompt_tokens=int(DEFAULT_MOCK_RESPONSE_PROMPT_TOKEN_COUNT),
+ completion_tokens=int(DEFAULT_MOCK_RESPONSE_COMPLETION_TOKEN_COUNT),
startTime=start_time,
endTime=end_time,
completionStartTime=completion_start_time,
diff --git a/litellm/litellm_core_utils/llm_cost_calc/tool_call_cost_tracking.py b/litellm/litellm_core_utils/llm_cost_calc/tool_call_cost_tracking.py
new file mode 100644
index 0000000000..34c370ffca
--- /dev/null
+++ b/litellm/litellm_core_utils/llm_cost_calc/tool_call_cost_tracking.py
@@ -0,0 +1,200 @@
+"""
+Helper utilities for tracking the cost of built-in tools.
+"""
+
+from typing import Any, Dict, List, Optional
+
+import litellm
+from litellm.constants import OPENAI_FILE_SEARCH_COST_PER_1K_CALLS
+from litellm.types.llms.openai import FileSearchTool, WebSearchOptions
+from litellm.types.utils import (
+ ModelInfo,
+ ModelResponse,
+ SearchContextCostPerQuery,
+ StandardBuiltInToolsParams,
+)
+
+
+class StandardBuiltInToolCostTracking:
+ """
+ Helper class for tracking the cost of built-in tools
+
+ Example: Web Search
+ """
+
+ @staticmethod
+ def get_cost_for_built_in_tools(
+ model: str,
+ response_object: Any,
+ custom_llm_provider: Optional[str] = None,
+ standard_built_in_tools_params: Optional[StandardBuiltInToolsParams] = None,
+ ) -> float:
+ """
+ Get the cost of using built-in tools.
+
+ Supported tools:
+ - Web Search
+
+ """
+ if standard_built_in_tools_params is not None:
+ if (
+ standard_built_in_tools_params.get("web_search_options", None)
+ is not None
+ ):
+ model_info = StandardBuiltInToolCostTracking._safe_get_model_info(
+ model=model, custom_llm_provider=custom_llm_provider
+ )
+
+ return StandardBuiltInToolCostTracking.get_cost_for_web_search(
+ web_search_options=standard_built_in_tools_params.get(
+ "web_search_options", None
+ ),
+ model_info=model_info,
+ )
+
+ if standard_built_in_tools_params.get("file_search", None) is not None:
+ return StandardBuiltInToolCostTracking.get_cost_for_file_search(
+ file_search=standard_built_in_tools_params.get("file_search", None),
+ )
+
+ if isinstance(response_object, ModelResponse):
+ if StandardBuiltInToolCostTracking.chat_completion_response_includes_annotations(
+ response_object
+ ):
+ model_info = StandardBuiltInToolCostTracking._safe_get_model_info(
+ model=model, custom_llm_provider=custom_llm_provider
+ )
+ return StandardBuiltInToolCostTracking.get_default_cost_for_web_search(
+ model_info
+ )
+ return 0.0
+
+ @staticmethod
+ def _safe_get_model_info(
+ model: str, custom_llm_provider: Optional[str] = None
+ ) -> Optional[ModelInfo]:
+ try:
+ return litellm.get_model_info(
+ model=model, custom_llm_provider=custom_llm_provider
+ )
+ except Exception:
+ return None
+
+ @staticmethod
+ def get_cost_for_web_search(
+ web_search_options: Optional[WebSearchOptions] = None,
+ model_info: Optional[ModelInfo] = None,
+ ) -> float:
+ """
+ If request includes `web_search_options`, calculate the cost of the web search.
+ """
+ if web_search_options is None:
+ return 0.0
+ if model_info is None:
+ return 0.0
+
+ search_context_pricing: SearchContextCostPerQuery = (
+ model_info.get("search_context_cost_per_query", {}) or {}
+ )
+ if web_search_options.get("search_context_size", None) == "low":
+ return search_context_pricing.get("search_context_size_low", 0.0)
+ elif web_search_options.get("search_context_size", None) == "medium":
+ return search_context_pricing.get("search_context_size_medium", 0.0)
+ elif web_search_options.get("search_context_size", None) == "high":
+ return search_context_pricing.get("search_context_size_high", 0.0)
+ return StandardBuiltInToolCostTracking.get_default_cost_for_web_search(
+ model_info
+ )
+
+ @staticmethod
+ def get_default_cost_for_web_search(
+ model_info: Optional[ModelInfo] = None,
+ ) -> float:
+ """
+ If no web search options are provided, use the `search_context_size_medium` pricing.
+
+ https://platform.openai.com/docs/pricing#web-search
+ """
+ if model_info is None:
+ return 0.0
+ search_context_pricing: SearchContextCostPerQuery = (
+ model_info.get("search_context_cost_per_query", {}) or {}
+ ) or {}
+ return search_context_pricing.get("search_context_size_medium", 0.0)
+
+ @staticmethod
+ def get_cost_for_file_search(
+ file_search: Optional[FileSearchTool] = None,
+ ) -> float:
+ """ "
+ Charged at $2.50/1k calls
+
+ Doc: https://platform.openai.com/docs/pricing#built-in-tools
+ """
+ if file_search is None:
+ return 0.0
+ return OPENAI_FILE_SEARCH_COST_PER_1K_CALLS
+
+ @staticmethod
+ def chat_completion_response_includes_annotations(
+ response_object: ModelResponse,
+ ) -> bool:
+ for _choice in response_object.choices:
+ message = getattr(_choice, "message", None)
+ if (
+ message is not None
+ and hasattr(message, "annotations")
+ and message.annotations is not None
+ and len(message.annotations) > 0
+ ):
+ return True
+ return False
+
+ @staticmethod
+ def _get_web_search_options(kwargs: Dict) -> Optional[WebSearchOptions]:
+ if "web_search_options" in kwargs:
+ return WebSearchOptions(**kwargs.get("web_search_options", {}))
+
+ tools = StandardBuiltInToolCostTracking._get_tools_from_kwargs(
+ kwargs, "web_search_preview"
+ )
+ if tools:
+ # Look for web search tool in the tools array
+ for tool in tools:
+ if isinstance(tool, dict):
+ if StandardBuiltInToolCostTracking._is_web_search_tool_call(tool):
+ return WebSearchOptions(**tool)
+ return None
+
+ @staticmethod
+ def _get_tools_from_kwargs(kwargs: Dict, tool_type: str) -> Optional[List[Dict]]:
+ if "tools" in kwargs:
+ tools = kwargs.get("tools", [])
+ return tools
+ return None
+
+ @staticmethod
+ def _get_file_search_tool_call(kwargs: Dict) -> Optional[FileSearchTool]:
+ tools = StandardBuiltInToolCostTracking._get_tools_from_kwargs(
+ kwargs, "file_search"
+ )
+ if tools:
+ for tool in tools:
+ if isinstance(tool, dict):
+ if StandardBuiltInToolCostTracking._is_file_search_tool_call(tool):
+ return FileSearchTool(**tool)
+ return None
+
+ @staticmethod
+ def _is_web_search_tool_call(tool: Dict) -> bool:
+ if tool.get("type", None) == "web_search_preview":
+ return True
+ if "search_context_size" in tool:
+ return True
+ return False
+
+ @staticmethod
+ def _is_file_search_tool_call(tool: Dict) -> bool:
+ if tool.get("type", None) == "file_search":
+ return True
+ return False
diff --git a/litellm/litellm_core_utils/llm_cost_calc/utils.py b/litellm/litellm_core_utils/llm_cost_calc/utils.py
index 7af3a26d2e..48809fe856 100644
--- a/litellm/litellm_core_utils/llm_cost_calc/utils.py
+++ b/litellm/litellm_core_utils/llm_cost_calc/utils.py
@@ -1,7 +1,7 @@
# What is this?
## Helper utilities for cost_per_token()
-from typing import Optional, Tuple
+from typing import Optional, Tuple, cast
import litellm
from litellm import verbose_logger
@@ -90,35 +90,70 @@ def _generic_cost_per_character(
return prompt_cost, completion_cost
-def _get_prompt_token_base_cost(model_info: ModelInfo, usage: Usage) -> float:
+def _get_token_base_cost(model_info: ModelInfo, usage: Usage) -> Tuple[float, float]:
"""
Return prompt cost for a given model and usage.
- If input_tokens > 128k and `input_cost_per_token_above_128k_tokens` is set, then we use the `input_cost_per_token_above_128k_tokens` field.
+ If input_tokens > threshold and `input_cost_per_token_above_[x]k_tokens` or `input_cost_per_token_above_[x]_tokens` is set,
+ then we use the corresponding threshold cost.
"""
- input_cost_per_token_above_128k_tokens = model_info.get(
- "input_cost_per_token_above_128k_tokens"
- )
- if _is_above_128k(usage.prompt_tokens) and input_cost_per_token_above_128k_tokens:
- return input_cost_per_token_above_128k_tokens
- return model_info["input_cost_per_token"]
+ prompt_base_cost = model_info["input_cost_per_token"]
+ completion_base_cost = model_info["output_cost_per_token"]
+
+ ## CHECK IF ABOVE THRESHOLD
+ threshold: Optional[float] = None
+ for key, value in sorted(model_info.items(), reverse=True):
+ if key.startswith("input_cost_per_token_above_") and value is not None:
+ try:
+ # Handle both formats: _above_128k_tokens and _above_128_tokens
+ threshold_str = key.split("_above_")[1].split("_tokens")[0]
+ threshold = float(threshold_str.replace("k", "")) * (
+ 1000 if "k" in threshold_str else 1
+ )
+ if usage.prompt_tokens > threshold:
+ prompt_base_cost = cast(
+ float,
+ model_info.get(key, prompt_base_cost),
+ )
+ completion_base_cost = cast(
+ float,
+ model_info.get(
+ f"output_cost_per_token_above_{threshold_str}_tokens",
+ completion_base_cost,
+ ),
+ )
+ break
+ except (IndexError, ValueError):
+ continue
+ except Exception:
+ continue
+
+ return prompt_base_cost, completion_base_cost
-def _get_completion_token_base_cost(model_info: ModelInfo, usage: Usage) -> float:
+def calculate_cost_component(
+ model_info: ModelInfo, cost_key: str, usage_value: Optional[float]
+) -> float:
"""
- Return prompt cost for a given model and usage.
+ Generic cost calculator for any usage component
- If input_tokens > 128k and `input_cost_per_token_above_128k_tokens` is set, then we use the `input_cost_per_token_above_128k_tokens` field.
+ Args:
+ model_info: Dictionary containing cost information
+ cost_key: The key for the cost multiplier in model_info (e.g., 'input_cost_per_audio_token')
+ usage_value: The actual usage value (e.g., number of tokens, characters, seconds)
+
+ Returns:
+ float: The calculated cost
"""
- output_cost_per_token_above_128k_tokens = model_info.get(
- "output_cost_per_token_above_128k_tokens"
- )
+ cost_per_unit = model_info.get(cost_key)
if (
- _is_above_128k(usage.completion_tokens)
- and output_cost_per_token_above_128k_tokens
+ cost_per_unit is not None
+ and isinstance(cost_per_unit, float)
+ and usage_value is not None
+ and usage_value > 0
):
- return output_cost_per_token_above_128k_tokens
- return model_info["output_cost_per_token"]
+ return float(usage_value) * cost_per_unit
+ return 0.0
def generic_cost_per_token(
@@ -136,6 +171,7 @@ def generic_cost_per_token(
Returns:
Tuple[float, float] - prompt_cost_in_usd, completion_cost_in_usd
"""
+
## GET MODEL INFO
model_info = get_model_info(model=model, custom_llm_provider=custom_llm_provider)
@@ -143,38 +179,150 @@ def generic_cost_per_token(
### Cost of processing (non-cache hit + cache hit) + Cost of cache-writing (cache writing)
prompt_cost = 0.0
### PROCESSING COST
- non_cache_hit_tokens = usage.prompt_tokens
+ text_tokens = usage.prompt_tokens
cache_hit_tokens = 0
- if usage.prompt_tokens_details and usage.prompt_tokens_details.cached_tokens:
- cache_hit_tokens = usage.prompt_tokens_details.cached_tokens
- non_cache_hit_tokens = non_cache_hit_tokens - cache_hit_tokens
-
- prompt_base_cost = _get_prompt_token_base_cost(model_info=model_info, usage=usage)
-
- prompt_cost = float(non_cache_hit_tokens) * prompt_base_cost
-
- _cache_read_input_token_cost = model_info.get("cache_read_input_token_cost")
- if (
- _cache_read_input_token_cost is not None
- and usage.prompt_tokens_details
- and usage.prompt_tokens_details.cached_tokens
- ):
- prompt_cost += (
- float(usage.prompt_tokens_details.cached_tokens)
- * _cache_read_input_token_cost
+ audio_tokens = 0
+ character_count = 0
+ image_count = 0
+ video_length_seconds = 0
+ if usage.prompt_tokens_details:
+ cache_hit_tokens = (
+ cast(
+ Optional[int], getattr(usage.prompt_tokens_details, "cached_tokens", 0)
+ )
+ or 0
+ )
+ text_tokens = (
+ cast(
+ Optional[int], getattr(usage.prompt_tokens_details, "text_tokens", None)
+ )
+ or 0 # default to prompt tokens, if this field is not set
+ )
+ audio_tokens = (
+ cast(Optional[int], getattr(usage.prompt_tokens_details, "audio_tokens", 0))
+ or 0
+ )
+ character_count = (
+ cast(
+ Optional[int],
+ getattr(usage.prompt_tokens_details, "character_count", 0),
+ )
+ or 0
+ )
+ image_count = (
+ cast(Optional[int], getattr(usage.prompt_tokens_details, "image_count", 0))
+ or 0
+ )
+ video_length_seconds = (
+ cast(
+ Optional[int],
+ getattr(usage.prompt_tokens_details, "video_length_seconds", 0),
+ )
+ or 0
)
- ### CACHE WRITING COST
- _cache_creation_input_token_cost = model_info.get("cache_creation_input_token_cost")
- if _cache_creation_input_token_cost is not None:
- prompt_cost += (
- float(usage._cache_creation_input_tokens) * _cache_creation_input_token_cost
- )
+ ## EDGE CASE - text tokens not set inside PromptTokensDetails
+ if text_tokens == 0:
+ text_tokens = usage.prompt_tokens - cache_hit_tokens - audio_tokens
- ## CALCULATE OUTPUT COST
- completion_base_cost = _get_completion_token_base_cost(
+ prompt_base_cost, completion_base_cost = _get_token_base_cost(
model_info=model_info, usage=usage
)
- completion_cost = usage["completion_tokens"] * completion_base_cost
+
+ prompt_cost = float(text_tokens) * prompt_base_cost
+
+ ### CACHE READ COST
+ prompt_cost += calculate_cost_component(
+ model_info, "cache_read_input_token_cost", cache_hit_tokens
+ )
+
+ ### AUDIO COST
+ prompt_cost += calculate_cost_component(
+ model_info, "input_cost_per_audio_token", audio_tokens
+ )
+
+ ### CACHE WRITING COST
+ prompt_cost += calculate_cost_component(
+ model_info,
+ "cache_creation_input_token_cost",
+ usage._cache_creation_input_tokens,
+ )
+
+ ### CHARACTER COST
+
+ prompt_cost += calculate_cost_component(
+ model_info, "input_cost_per_character", character_count
+ )
+
+ ### IMAGE COUNT COST
+ prompt_cost += calculate_cost_component(
+ model_info, "input_cost_per_image", image_count
+ )
+
+ ### VIDEO LENGTH COST
+ prompt_cost += calculate_cost_component(
+ model_info, "input_cost_per_video_per_second", video_length_seconds
+ )
+
+ ## CALCULATE OUTPUT COST
+ text_tokens = 0
+ audio_tokens = 0
+ reasoning_tokens = 0
+ is_text_tokens_total = False
+ if usage.completion_tokens_details is not None:
+ audio_tokens = (
+ cast(
+ Optional[int],
+ getattr(usage.completion_tokens_details, "audio_tokens", 0),
+ )
+ or 0
+ )
+ text_tokens = (
+ cast(
+ Optional[int],
+ getattr(usage.completion_tokens_details, "text_tokens", None),
+ )
+ or 0 # default to completion tokens, if this field is not set
+ )
+ reasoning_tokens = (
+ cast(
+ Optional[int],
+ getattr(usage.completion_tokens_details, "reasoning_tokens", 0),
+ )
+ or 0
+ )
+
+ if text_tokens == 0:
+ text_tokens = usage.completion_tokens
+ if text_tokens == usage.completion_tokens:
+ is_text_tokens_total = True
+ ## TEXT COST
+ completion_cost = float(text_tokens) * completion_base_cost
+
+ _output_cost_per_audio_token: Optional[float] = model_info.get(
+ "output_cost_per_audio_token"
+ )
+
+ _output_cost_per_reasoning_token: Optional[float] = model_info.get(
+ "output_cost_per_reasoning_token"
+ )
+
+ ## AUDIO COST
+ if not is_text_tokens_total and audio_tokens is not None and audio_tokens > 0:
+ _output_cost_per_audio_token = (
+ _output_cost_per_audio_token
+ if _output_cost_per_audio_token is not None
+ else completion_base_cost
+ )
+ completion_cost += float(audio_tokens) * _output_cost_per_audio_token
+
+ ## REASONING COST
+ if not is_text_tokens_total and reasoning_tokens and reasoning_tokens > 0:
+ _output_cost_per_reasoning_token = (
+ _output_cost_per_reasoning_token
+ if _output_cost_per_reasoning_token is not None
+ else completion_base_cost
+ )
+ completion_cost += float(reasoning_tokens) * _output_cost_per_reasoning_token
return prompt_cost, completion_cost
diff --git a/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py b/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py
index ebb1032a19..5a8319a747 100644
--- a/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py
+++ b/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py
@@ -9,10 +9,12 @@ from typing import Dict, Iterable, List, Literal, Optional, Tuple, Union
import litellm
from litellm._logging import verbose_logger
from litellm.constants import RESPONSE_FORMAT_TOOL_NAME
+from litellm.types.llms.databricks import DatabricksTool
from litellm.types.llms.openai import ChatCompletionThinkingBlock
from litellm.types.utils import (
ChatCompletionDeltaToolCall,
ChatCompletionMessageToolCall,
+ ChatCompletionRedactedThinkingBlock,
Choices,
Delta,
EmbeddingResponse,
@@ -35,6 +37,25 @@ from litellm.types.utils import (
from .get_headers import get_response_headers
+def convert_tool_call_to_json_mode(
+ tool_calls: List[ChatCompletionMessageToolCall],
+ convert_tool_call_to_json_mode: bool,
+) -> Tuple[Optional[Message], Optional[str]]:
+ if _should_convert_tool_call_to_json_mode(
+ tool_calls=tool_calls,
+ convert_tool_call_to_json_mode=convert_tool_call_to_json_mode,
+ ):
+ # to support 'json_schema' logic on older models
+ json_mode_content_str: Optional[str] = tool_calls[0]["function"].get(
+ "arguments"
+ )
+ if json_mode_content_str is not None:
+ message = litellm.Message(content=json_mode_content_str)
+ finish_reason = "stop"
+ return message, finish_reason
+ return None, None
+
+
async def convert_to_streaming_response_async(response_object: Optional[dict] = None):
"""
Asynchronously converts a response object to a streaming response.
@@ -258,14 +279,12 @@ def _extract_reasoning_content(message: dict) -> Tuple[Optional[str], Optional[s
class LiteLLMResponseObjectHandler:
-
@staticmethod
def convert_to_image_response(
response_object: dict,
model_response_object: Optional[ImageResponse] = None,
hidden_params: Optional[dict] = None,
) -> ImageResponse:
-
response_object.update({"hidden_params": hidden_params})
if model_response_object is None:
@@ -337,21 +356,14 @@ class LiteLLMResponseObjectHandler:
Only supported for HF TGI models
"""
transformed_logprobs: Optional[TextCompletionLogprobs] = None
- if custom_llm_provider == "huggingface":
- # only supported for TGI models
- try:
- raw_response = response._hidden_params.get("original_response", None)
- transformed_logprobs = litellm.huggingface._transform_logprobs(
- hf_response=raw_response
- )
- except Exception as e:
- verbose_logger.exception(f"LiteLLM non blocking exception: {e}")
return transformed_logprobs
def _should_convert_tool_call_to_json_mode(
- tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None,
+ tool_calls: Optional[
+ Union[List[ChatCompletionMessageToolCall], List[DatabricksTool]]
+ ] = None,
convert_tool_call_to_json_mode: Optional[bool] = None,
) -> bool:
"""
@@ -475,15 +487,22 @@ def convert_to_model_response_object( # noqa: PLR0915
)
# Handle thinking models that display `thinking_blocks` within `content`
- thinking_blocks: Optional[List[ChatCompletionThinkingBlock]] = None
+ thinking_blocks: Optional[
+ List[
+ Union[
+ ChatCompletionThinkingBlock,
+ ChatCompletionRedactedThinkingBlock,
+ ]
+ ]
+ ] = None
if "thinking_blocks" in choice["message"]:
thinking_blocks = choice["message"]["thinking_blocks"]
provider_specific_fields["thinking_blocks"] = thinking_blocks
if reasoning_content:
- provider_specific_fields["reasoning_content"] = (
- reasoning_content
- )
+ provider_specific_fields[
+ "reasoning_content"
+ ] = reasoning_content
message = Message(
content=content,
@@ -494,6 +513,7 @@ def convert_to_model_response_object( # noqa: PLR0915
provider_specific_fields=provider_specific_fields,
reasoning_content=reasoning_content,
thinking_blocks=thinking_blocks,
+ annotations=choice["message"].get("annotations", None),
)
finish_reason = choice.get("finish_reason", None)
if finish_reason is None:
diff --git a/litellm/litellm_core_utils/llm_response_utils/response_metadata.py b/litellm/litellm_core_utils/llm_response_utils/response_metadata.py
index 03595e27a4..614b5573cc 100644
--- a/litellm/litellm_core_utils/llm_response_utils/response_metadata.py
+++ b/litellm/litellm_core_utils/llm_response_utils/response_metadata.py
@@ -36,14 +36,20 @@ class ResponseMetadata:
self, logging_obj: LiteLLMLoggingObject, model: Optional[str], kwargs: dict
) -> None:
"""Set hidden parameters on the response"""
+
+ ## ADD OTHER HIDDEN PARAMS
+ model_id = kwargs.get("model_info", {}).get("id", None)
new_params = {
"litellm_call_id": getattr(logging_obj, "litellm_call_id", None),
- "model_id": kwargs.get("model_info", {}).get("id", None),
"api_base": get_api_base(model=model or "", optional_params=kwargs),
- "response_cost": logging_obj._response_cost_calculator(result=self.result),
+ "model_id": model_id,
+ "response_cost": logging_obj._response_cost_calculator(
+ result=self.result, litellm_model_name=model, router_model_id=model_id
+ ),
"additional_headers": process_response_headers(
self._get_value_from_hidden_params("additional_headers") or {}
),
+ "litellm_model_name": model,
}
self._update_hidden_params(new_params)
diff --git a/litellm/litellm_core_utils/logging_callback_manager.py b/litellm/litellm_core_utils/logging_callback_manager.py
index a20e826c43..c57a2401b7 100644
--- a/litellm/litellm_core_utils/logging_callback_manager.py
+++ b/litellm/litellm_core_utils/logging_callback_manager.py
@@ -1,4 +1,4 @@
-from typing import Callable, List, Set, Union
+from typing import Callable, List, Set, Type, Union
import litellm
from litellm._logging import verbose_logger
@@ -86,21 +86,20 @@ class LoggingCallbackManager:
callback=callback, parent_list=litellm._async_failure_callback
)
- def remove_callback_from_list_by_object(
- self, callback_list, obj
- ):
+ def remove_callback_from_list_by_object(self, callback_list, obj):
"""
Remove callbacks that are methods of a particular object (e.g., router cleanup)
"""
- if not isinstance(callback_list, list): # Not list -> do nothing
+ if not isinstance(callback_list, list): # Not list -> do nothing
return
-
- remove_list=[c for c in callback_list if hasattr(c, '__self__') and c.__self__ == obj]
+
+ remove_list = [
+ c for c in callback_list if hasattr(c, "__self__") and c.__self__ == obj
+ ]
for c in remove_list:
callback_list.remove(c)
-
def _add_string_callback_to_list(
self, callback: str, parent_list: List[Union[CustomLogger, Callable, str]]
):
@@ -254,3 +253,11 @@ class LoggingCallbackManager:
):
matched_callbacks.add(callback)
return matched_callbacks
+
+ def get_custom_loggers_for_type(
+ self, callback_type: Type[CustomLogger]
+ ) -> List[CustomLogger]:
+ """
+ Get all custom loggers that are instances of the given class type
+ """
+ return [c for c in self._get_all_callbacks() if isinstance(c, callback_type)]
diff --git a/litellm/litellm_core_utils/logging_utils.py b/litellm/litellm_core_utils/logging_utils.py
index 6782435af6..c7512ea146 100644
--- a/litellm/litellm_core_utils/logging_utils.py
+++ b/litellm/litellm_core_utils/logging_utils.py
@@ -77,6 +77,10 @@ def _assemble_complete_response_from_streaming_chunks(
complete_streaming_response: Optional[
Union[ModelResponse, TextCompletionResponse]
] = None
+
+ if isinstance(result, ModelResponse):
+ return result
+
if result.choices[0].finish_reason is not None: # if it's the last chunk
streaming_chunks.append(result)
try:
diff --git a/litellm/litellm_core_utils/model_param_helper.py b/litellm/litellm_core_utils/model_param_helper.py
index 09a2c15a77..91f2f1341c 100644
--- a/litellm/litellm_core_utils/model_param_helper.py
+++ b/litellm/litellm_core_utils/model_param_helper.py
@@ -1,6 +1,5 @@
from typing import Set
-from openai.types.audio.transcription_create_params import TranscriptionCreateParams
from openai.types.chat.completion_create_params import (
CompletionCreateParamsNonStreaming,
CompletionCreateParamsStreaming,
@@ -13,11 +12,11 @@ from openai.types.completion_create_params import (
)
from openai.types.embedding_create_params import EmbeddingCreateParams
+from litellm._logging import verbose_logger
from litellm.types.rerank import RerankRequest
class ModelParamHelper:
-
@staticmethod
def get_standard_logging_model_parameters(
model_parameters: dict,
@@ -76,6 +75,10 @@ class ModelParamHelper:
combined_kwargs = combined_kwargs.difference(exclude_kwargs)
return combined_kwargs
+ @staticmethod
+ def get_litellm_provider_specific_params_for_chat_params() -> Set[str]:
+ return set(["thinking"])
+
@staticmethod
def _get_litellm_supported_chat_completion_kwargs() -> Set[str]:
"""
@@ -83,9 +86,18 @@ class ModelParamHelper:
This follows the OpenAI API Spec
"""
- all_chat_completion_kwargs = set(
- CompletionCreateParamsNonStreaming.__annotations__.keys()
- ).union(set(CompletionCreateParamsStreaming.__annotations__.keys()))
+ non_streaming_params: Set[str] = set(
+ getattr(CompletionCreateParamsNonStreaming, "__annotations__", {}).keys()
+ )
+ streaming_params: Set[str] = set(
+ getattr(CompletionCreateParamsStreaming, "__annotations__", {}).keys()
+ )
+ litellm_provider_specific_params: Set[str] = (
+ ModelParamHelper.get_litellm_provider_specific_params_for_chat_params()
+ )
+ all_chat_completion_kwargs: Set[str] = non_streaming_params.union(
+ streaming_params
+ ).union(litellm_provider_specific_params)
return all_chat_completion_kwargs
@staticmethod
@@ -96,8 +108,16 @@ class ModelParamHelper:
This follows the OpenAI API Spec
"""
all_text_completion_kwargs = set(
- TextCompletionCreateParamsNonStreaming.__annotations__.keys()
- ).union(set(TextCompletionCreateParamsStreaming.__annotations__.keys()))
+ getattr(
+ TextCompletionCreateParamsNonStreaming, "__annotations__", {}
+ ).keys()
+ ).union(
+ set(
+ getattr(
+ TextCompletionCreateParamsStreaming, "__annotations__", {}
+ ).keys()
+ )
+ )
return all_text_completion_kwargs
@staticmethod
@@ -114,7 +134,7 @@ class ModelParamHelper:
This follows the OpenAI API Spec
"""
- return set(EmbeddingCreateParams.__annotations__.keys())
+ return set(getattr(EmbeddingCreateParams, "__annotations__", {}).keys())
@staticmethod
def _get_litellm_supported_transcription_kwargs() -> Set[str]:
@@ -123,7 +143,28 @@ class ModelParamHelper:
This follows the OpenAI API Spec
"""
- return set(TranscriptionCreateParams.__annotations__.keys())
+ try:
+ from openai.types.audio.transcription_create_params import (
+ TranscriptionCreateParamsNonStreaming,
+ TranscriptionCreateParamsStreaming,
+ )
+
+ non_streaming_kwargs = set(
+ getattr(
+ TranscriptionCreateParamsNonStreaming, "__annotations__", {}
+ ).keys()
+ )
+ streaming_kwargs = set(
+ getattr(
+ TranscriptionCreateParamsStreaming, "__annotations__", {}
+ ).keys()
+ )
+
+ all_transcription_kwargs = non_streaming_kwargs.union(streaming_kwargs)
+ return all_transcription_kwargs
+ except Exception as e:
+ verbose_logger.debug("Error getting transcription kwargs %s", str(e))
+ return set()
@staticmethod
def _get_exclude_kwargs() -> Set[str]:
diff --git a/litellm/litellm_core_utils/prompt_templates/common_utils.py b/litellm/litellm_core_utils/prompt_templates/common_utils.py
index 6ce8faa5c6..40cd4e286b 100644
--- a/litellm/litellm_core_utils/prompt_templates/common_utils.py
+++ b/litellm/litellm_core_utils/prompt_templates/common_utils.py
@@ -2,14 +2,26 @@
Common utility functions used for translating messages across providers
"""
-from typing import Dict, List, Literal, Optional, Union, cast
+import io
+import mimetypes
+import re
+from os import PathLike
+from typing import Dict, List, Literal, Mapping, Optional, Union, cast
from litellm.types.llms.openai import (
AllMessageValues,
ChatCompletionAssistantMessage,
+ ChatCompletionFileObject,
ChatCompletionUserMessage,
)
-from litellm.types.utils import Choices, ModelResponse, StreamingChoices
+from litellm.types.utils import (
+ Choices,
+ ExtractedFileData,
+ FileTypes,
+ ModelResponse,
+ SpecialEnums,
+ StreamingChoices,
+)
DEFAULT_USER_CONTINUE_MESSAGE = ChatCompletionUserMessage(
content="Please continue.", role="user"
@@ -34,7 +46,7 @@ def handle_messages_with_content_list_to_str_conversion(
def strip_name_from_messages(
- messages: List[AllMessageValues],
+ messages: List[AllMessageValues], allowed_name_roles: List[str] = ["user"]
) -> List[AllMessageValues]:
"""
Removes 'name' from messages
@@ -43,7 +55,7 @@ def strip_name_from_messages(
for message in messages:
msg_role = message.get("role")
msg_copy = message.copy()
- if msg_role == "user":
+ if msg_role not in allowed_name_roles:
msg_copy.pop("name", None) # type: ignore
new_messages.append(msg_copy)
return new_messages
@@ -77,6 +89,16 @@ def convert_content_list_to_str(message: AllMessageValues) -> str:
return texts
+def get_str_from_messages(messages: List[AllMessageValues]) -> str:
+ """
+ Converts a list of messages to a string
+ """
+ text = ""
+ for message in messages:
+ text += convert_content_list_to_str(message=message)
+ return text
+
+
def is_non_content_values_set(message: AllMessageValues) -> bool:
ignore_keys = ["content", "role", "name"]
return any(
@@ -247,7 +269,6 @@ def _insert_assistant_continue_message(
and message.get("role") == "user" # Current is user
and messages[i + 1].get("role") == "user"
): # Next is user
-
# Insert assistant message
continue_message = (
assistant_continue_message or DEFAULT_ASSISTANT_CONTINUE_MESSAGE
@@ -283,3 +304,226 @@ def get_completion_messages(
messages, assistant_continue_message, ensure_alternating_roles
)
return messages
+
+
+def get_format_from_file_id(file_id: Optional[str]) -> Optional[str]:
+ """
+ Gets format from file id
+
+ unified_file_id = litellm_proxy:{};unified_id,{}
+ If not a unified file id, returns 'file' as default format
+ """
+ from litellm.proxy.hooks.managed_files import _PROXY_LiteLLMManagedFiles
+
+ if not file_id:
+ return None
+ try:
+ transformed_file_id = (
+ _PROXY_LiteLLMManagedFiles._convert_b64_uid_to_unified_uid(file_id)
+ )
+ if transformed_file_id.startswith(
+ SpecialEnums.LITELM_MANAGED_FILE_ID_PREFIX.value
+ ):
+ match = re.match(
+ f"{SpecialEnums.LITELM_MANAGED_FILE_ID_PREFIX.value}:(.*?);unified_id",
+ transformed_file_id,
+ )
+ if match:
+ return match.group(1)
+
+ return None
+ except Exception:
+ return None
+
+
+def update_messages_with_model_file_ids(
+ messages: List[AllMessageValues],
+ model_id: str,
+ model_file_id_mapping: Dict[str, Dict[str, str]],
+) -> List[AllMessageValues]:
+ """
+ Updates messages with model file ids.
+
+ model_file_id_mapping: Dict[str, Dict[str, str]] = {
+ "litellm_proxy/file_id": {
+ "model_id": "provider_file_id"
+ }
+ }
+ """
+
+ for message in messages:
+ if message.get("role") == "user":
+ content = message.get("content")
+ if content:
+ if isinstance(content, str):
+ continue
+ for c in content:
+ if c["type"] == "file":
+ file_object = cast(ChatCompletionFileObject, c)
+ file_object_file_field = file_object["file"]
+ file_id = file_object_file_field.get("file_id")
+ format = file_object_file_field.get(
+ "format", get_format_from_file_id(file_id)
+ )
+
+ if file_id:
+ provider_file_id = (
+ model_file_id_mapping.get(file_id, {}).get(model_id)
+ or file_id
+ )
+ file_object_file_field["file_id"] = provider_file_id
+ if format:
+ file_object_file_field["format"] = format
+ return messages
+
+
+def extract_file_data(file_data: FileTypes) -> ExtractedFileData:
+ """
+ Extracts and processes file data from various input formats.
+
+ Args:
+ file_data: Can be a tuple of (filename, content, [content_type], [headers]) or direct file content
+
+ Returns:
+ ExtractedFileData containing:
+ - filename: Name of the file if provided
+ - content: The file content in bytes
+ - content_type: MIME type of the file
+ - headers: Any additional headers
+ """
+ # Parse the file_data based on its type
+ filename = None
+ file_content = None
+ content_type = None
+ file_headers: Mapping[str, str] = {}
+
+ if isinstance(file_data, tuple):
+ if len(file_data) == 2:
+ filename, file_content = file_data
+ elif len(file_data) == 3:
+ filename, file_content, content_type = file_data
+ elif len(file_data) == 4:
+ filename, file_content, content_type, file_headers = file_data
+ else:
+ file_content = file_data
+ # Convert content to bytes
+ if isinstance(file_content, (str, PathLike)):
+ # If it's a path, open and read the file
+ with open(file_content, "rb") as f:
+ content = f.read()
+ elif isinstance(file_content, io.IOBase):
+ # If it's a file-like object
+ content = file_content.read()
+
+ if isinstance(content, str):
+ content = content.encode("utf-8")
+ # Reset file pointer to beginning
+ file_content.seek(0)
+ elif isinstance(file_content, bytes):
+ content = file_content
+ else:
+ raise ValueError(f"Unsupported file content type: {type(file_content)}")
+
+ # Use provided content type or guess based on filename
+ if not content_type:
+ content_type = (
+ mimetypes.guess_type(filename)[0]
+ if filename
+ else "application/octet-stream"
+ )
+
+ return ExtractedFileData(
+ filename=filename,
+ content=content,
+ content_type=content_type,
+ headers=file_headers,
+ )
+
+
+def unpack_defs(schema, defs):
+ properties = schema.get("properties", None)
+ if properties is None:
+ return
+
+ for name, value in properties.items():
+ ref_key = value.get("$ref", None)
+ if ref_key is not None:
+ ref = defs[ref_key.split("defs/")[-1]]
+ unpack_defs(ref, defs)
+ properties[name] = ref
+ continue
+
+ anyof = value.get("anyOf", None)
+ if anyof is not None:
+ for i, atype in enumerate(anyof):
+ ref_key = atype.get("$ref", None)
+ if ref_key is not None:
+ ref = defs[ref_key.split("defs/")[-1]]
+ unpack_defs(ref, defs)
+ anyof[i] = ref
+ continue
+
+ items = value.get("items", None)
+ if items is not None:
+ ref_key = items.get("$ref", None)
+ if ref_key is not None:
+ ref = defs[ref_key.split("defs/")[-1]]
+ unpack_defs(ref, defs)
+ value["items"] = ref
+ continue
+
+
+def _get_image_mime_type_from_url(url: str) -> Optional[str]:
+ """
+ Get mime type for common image URLs
+ See gemini mime types: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/image-understanding#image-requirements
+
+ Supported by Gemini:
+ application/pdf
+ audio/mpeg
+ audio/mp3
+ audio/wav
+ image/png
+ image/jpeg
+ image/webp
+ text/plain
+ video/mov
+ video/mpeg
+ video/mp4
+ video/mpg
+ video/avi
+ video/wmv
+ video/mpegps
+ video/flv
+ """
+ url = url.lower()
+
+ # Map file extensions to mime types
+ mime_types = {
+ # Images
+ (".jpg", ".jpeg"): "image/jpeg",
+ (".png",): "image/png",
+ (".webp",): "image/webp",
+ # Videos
+ (".mp4",): "video/mp4",
+ (".mov",): "video/mov",
+ (".mpeg", ".mpg"): "video/mpeg",
+ (".avi",): "video/avi",
+ (".wmv",): "video/wmv",
+ (".mpegps",): "video/mpegps",
+ (".flv",): "video/flv",
+ # Audio
+ (".mp3",): "audio/mp3",
+ (".wav",): "audio/wav",
+ (".mpeg",): "audio/mpeg",
+ # Documents
+ (".pdf",): "application/pdf",
+ (".txt",): "text/plain",
+ }
+
+ # Check each extension group against the URL
+ for extensions, mime_type in mime_types.items():
+ if any(url.endswith(ext) for ext in extensions):
+ return mime_type
+
+ return None
diff --git a/litellm/litellm_core_utils/prompt_templates/factory.py b/litellm/litellm_core_utils/prompt_templates/factory.py
index ff366b2396..5b11b224bb 100644
--- a/litellm/litellm_core_utils/prompt_templates/factory.py
+++ b/litellm/litellm_core_utils/prompt_templates/factory.py
@@ -1,7 +1,6 @@
import copy
import json
import re
-import traceback
import uuid
import xml.etree.ElementTree as ET
from enum import Enum
@@ -22,6 +21,7 @@ from litellm.types.llms.openai import (
AllMessageValues,
ChatCompletionAssistantMessage,
ChatCompletionAssistantToolCall,
+ ChatCompletionFileObject,
ChatCompletionFunctionMessage,
ChatCompletionImageObject,
ChatCompletionTextObject,
@@ -181,7 +181,7 @@ def _handle_ollama_system_message(
def ollama_pt(
- model, messages
+ model: str, messages: list
) -> Union[
str, OllamaVisionModelObject
]: # https://github.com/ollama/ollama/blob/af4cf55884ac54b9e637cd71dadfe9b7a5685877/docs/modelfile.md#template
@@ -250,7 +250,7 @@ def ollama_pt(
f"Tool Calls: {json.dumps(ollama_tool_calls, indent=2)}"
)
- msg_i += 1
+ msg_i += 1
if assistant_content_str:
prompt += f"### Assistant:\n{assistant_content_str}\n\n"
@@ -747,7 +747,6 @@ def convert_to_anthropic_image_obj(
data=base64_data,
)
except Exception as e:
- traceback.print_exc()
if "Error: Unable to fetch image from URL" in str(e):
raise e
raise Exception(
@@ -1042,10 +1041,10 @@ def convert_to_gemini_tool_call_invoke(
if tool_calls is not None:
for tool in tool_calls:
if "function" in tool:
- gemini_function_call: Optional[VertexFunctionCall] = (
- _gemini_tool_call_invoke_helper(
- function_call_params=tool["function"]
- )
+ gemini_function_call: Optional[
+ VertexFunctionCall
+ ] = _gemini_tool_call_invoke_helper(
+ function_call_params=tool["function"]
)
if gemini_function_call is not None:
_parts_list.append(
@@ -1299,20 +1298,37 @@ def convert_to_anthropic_tool_invoke(
]
}
"""
- anthropic_tool_invoke = [
- AnthropicMessagesToolUseParam(
+ anthropic_tool_invoke = []
+
+ for tool in tool_calls:
+ if not get_attribute_or_key(tool, "type") == "function":
+ continue
+
+ _anthropic_tool_use_param = AnthropicMessagesToolUseParam(
type="tool_use",
- id=get_attribute_or_key(tool, "id"),
- name=get_attribute_or_key(get_attribute_or_key(tool, "function"), "name"),
+ id=cast(str, get_attribute_or_key(tool, "id")),
+ name=cast(
+ str,
+ get_attribute_or_key(get_attribute_or_key(tool, "function"), "name"),
+ ),
input=json.loads(
get_attribute_or_key(
get_attribute_or_key(tool, "function"), "arguments"
)
),
)
- for tool in tool_calls
- if get_attribute_or_key(tool, "type") == "function"
- ]
+
+ _content_element = add_cache_control_to_content(
+ anthropic_content_element=_anthropic_tool_use_param,
+ orignal_content_element=dict(tool),
+ )
+
+ if "cache_control" in _content_element:
+ _anthropic_tool_use_param["cache_control"] = _content_element[
+ "cache_control"
+ ]
+
+ anthropic_tool_invoke.append(_anthropic_tool_use_param)
return anthropic_tool_invoke
@@ -1323,6 +1339,7 @@ def add_cache_control_to_content(
AnthropicMessagesImageParam,
AnthropicMessagesTextParam,
AnthropicMessagesDocumentParam,
+ AnthropicMessagesToolUseParam,
ChatCompletionThinkingBlock,
],
orignal_content_element: Union[dict, AllMessageValues],
@@ -1432,9 +1449,9 @@ def anthropic_messages_pt( # noqa: PLR0915
)
if "cache_control" in _content_element:
- _anthropic_content_element["cache_control"] = (
- _content_element["cache_control"]
- )
+ _anthropic_content_element[
+ "cache_control"
+ ] = _content_element["cache_control"]
user_content.append(_anthropic_content_element)
elif m.get("type", "") == "text":
m = cast(ChatCompletionTextObject, m)
@@ -1455,6 +1472,25 @@ def anthropic_messages_pt( # noqa: PLR0915
user_content.append(_content_element)
elif m.get("type", "") == "document":
user_content.append(cast(AnthropicMessagesDocumentParam, m))
+ elif m.get("type", "") == "file":
+ file_message = cast(ChatCompletionFileObject, m)
+ file_data = file_message["file"].get("file_data")
+ if file_data:
+ image_chunk = convert_to_anthropic_image_obj(
+ openai_image_url=file_data,
+ format=file_message["file"].get("format"),
+ )
+ anthropic_document_param = (
+ AnthropicMessagesDocumentParam(
+ type="document",
+ source=AnthropicContentParamSource(
+ type="base64",
+ media_type=image_chunk["media_type"],
+ data=image_chunk["data"],
+ ),
+ )
+ )
+ user_content.append(anthropic_document_param)
elif isinstance(user_message_types_block["content"], str):
_anthropic_content_text_element: AnthropicMessagesTextParam = {
"type": "text",
@@ -1466,9 +1502,9 @@ def anthropic_messages_pt( # noqa: PLR0915
)
if "cache_control" in _content_element:
- _anthropic_content_text_element["cache_control"] = (
- _content_element["cache_control"]
- )
+ _anthropic_content_text_element[
+ "cache_control"
+ ] = _content_element["cache_control"]
user_content.append(_anthropic_content_text_element)
@@ -1533,7 +1569,6 @@ def anthropic_messages_pt( # noqa: PLR0915
"content"
] # don't pass empty text blocks. anthropic api raises errors.
):
-
_anthropic_text_content_element = AnthropicMessagesTextParam(
type="text",
text=assistant_content_block["content"],
@@ -1569,7 +1604,6 @@ def anthropic_messages_pt( # noqa: PLR0915
msg_i += 1
if assistant_content:
-
new_messages.append({"role": "assistant", "content": assistant_content})
if msg_i == init_msg_i: # prevent infinite loops
@@ -2224,6 +2258,14 @@ def _parse_content_type(content_type: str) -> str:
return m.get_content_type()
+def _parse_mime_type(base64_data: str) -> Optional[str]:
+ mime_type_match = re.match(r"data:(.*?);base64", base64_data)
+ if mime_type_match:
+ return mime_type_match.group(1)
+ else:
+ return None
+
+
class BedrockImageProcessor:
"""Handles both sync and async image processing for Bedrock conversations."""
@@ -2245,7 +2287,6 @@ class BedrockImageProcessor:
@staticmethod
async def get_image_details_async(image_url) -> Tuple[str, str]:
try:
-
client = get_async_httpx_client(
llm_provider=httpxSpecialProvider.PromptFactory,
params={"concurrent_limit": 1},
@@ -2612,7 +2653,6 @@ def get_user_message_block_or_continue_message(
for item in modified_content_block:
# Check if the list is empty
if item["type"] == "text":
-
if not item["text"].strip():
# Replace empty text with continue message
_user_continue_message = ChatCompletionUserMessage(
@@ -2889,6 +2929,11 @@ class BedrockConverseMessagesProcessor:
image_url=image_url, format=format
)
_parts.append(_part) # type: ignore
+ elif element["type"] == "file":
+ _part = await BedrockConverseMessagesProcessor._async_process_file_message(
+ message=cast(ChatCompletionFileObject, element)
+ )
+ _parts.append(_part)
_cache_point_block = (
litellm.AmazonConverseConfig()._get_cache_point_block(
message_block=cast(
@@ -3058,6 +3103,45 @@ class BedrockConverseMessagesProcessor:
reasoning_content_blocks.append(bedrock_content_block)
return reasoning_content_blocks
+ @staticmethod
+ def _process_file_message(message: ChatCompletionFileObject) -> BedrockContentBlock:
+ file_message = message["file"]
+ file_data = file_message.get("file_data")
+ file_id = file_message.get("file_id")
+
+ if file_data is None and file_id is None:
+ raise litellm.BadRequestError(
+ message="file_data and file_id cannot both be None. Got={}".format(
+ message
+ ),
+ model="",
+ llm_provider="bedrock",
+ )
+ format = file_message.get("format")
+ return BedrockImageProcessor.process_image_sync(
+ image_url=cast(str, file_id or file_data), format=format
+ )
+
+ @staticmethod
+ async def _async_process_file_message(
+ message: ChatCompletionFileObject,
+ ) -> BedrockContentBlock:
+ file_message = message["file"]
+ file_data = file_message.get("file_data")
+ file_id = file_message.get("file_id")
+ format = file_message.get("format")
+ if file_data is None and file_id is None:
+ raise litellm.BadRequestError(
+ message="file_data and file_id cannot both be None. Got={}".format(
+ message
+ ),
+ model="",
+ llm_provider="bedrock",
+ )
+ return await BedrockImageProcessor.process_image_async(
+ image_url=cast(str, file_id or file_data), format=format
+ )
+
def _bedrock_converse_messages_pt( # noqa: PLR0915
messages: List,
@@ -3130,6 +3214,13 @@ def _bedrock_converse_messages_pt( # noqa: PLR0915
format=format,
)
_parts.append(_part) # type: ignore
+ elif element["type"] == "file":
+ _part = (
+ BedrockConverseMessagesProcessor._process_file_message(
+ message=cast(ChatCompletionFileObject, element)
+ )
+ )
+ _parts.append(_part)
_cache_point_block = (
litellm.AmazonConverseConfig()._get_cache_point_block(
message_block=cast(
@@ -3207,7 +3298,6 @@ def _bedrock_converse_messages_pt( # noqa: PLR0915
assistant_content: List[BedrockContentBlock] = []
## MERGE CONSECUTIVE ASSISTANT CONTENT ##
while msg_i < len(messages) and messages[msg_i]["role"] == "assistant":
-
assistant_message_block = get_assistant_message_block_or_continue_message(
message=messages[msg_i],
assistant_continue_message=assistant_continue_message,
@@ -3358,6 +3448,8 @@ def _bedrock_tools_pt(tools: List) -> List[BedrockToolBlock]:
}
]
"""
+ from litellm.litellm_core_utils.prompt_templates.common_utils import unpack_defs
+
tool_block_list: List[BedrockToolBlock] = []
for tool in tools:
parameters = tool.get("function", {}).get(
@@ -3371,6 +3463,13 @@ def _bedrock_tools_pt(tools: List) -> List[BedrockToolBlock]:
description = tool.get("function", {}).get(
"description", name
) # converse api requires a description
+
+ defs = parameters.pop("$defs", {})
+ defs_copy = copy.deepcopy(defs)
+ # flatten the defs
+ for _, value in defs_copy.items():
+ unpack_defs(value, defs_copy)
+ unpack_defs(parameters, defs_copy)
tool_input_schema = BedrockToolInputSchemaBlock(json=parameters)
tool_spec = BedrockToolSpecBlock(
inputSchema=tool_input_schema, name=name, description=description
@@ -3410,7 +3509,6 @@ def response_schema_prompt(model: str, response_schema: dict) -> str:
{"role": "user", "content": "{}".format(response_schema)}
]
if f"{model}/response_schema_prompt" in litellm.custom_prompt_dict:
-
custom_prompt_details = litellm.custom_prompt_dict[
f"{model}/response_schema_prompt"
] # allow user to define custom response schema prompt by model
diff --git a/litellm/litellm_core_utils/realtime_streaming.py b/litellm/litellm_core_utils/realtime_streaming.py
index aebd049692..5dcabe2dd3 100644
--- a/litellm/litellm_core_utils/realtime_streaming.py
+++ b/litellm/litellm_core_utils/realtime_streaming.py
@@ -30,6 +30,11 @@ import json
from typing import Any, Dict, List, Optional, Union
import litellm
+from litellm._logging import verbose_logger
+from litellm.types.llms.openai import (
+ OpenAIRealtimeStreamResponseBaseObject,
+ OpenAIRealtimeStreamSessionEvents,
+)
from .litellm_logging import Logging as LiteLLMLogging
@@ -53,7 +58,12 @@ class RealTimeStreaming:
self.websocket = websocket
self.backend_ws = backend_ws
self.logging_obj = logging_obj
- self.messages: List = []
+ self.messages: List[
+ Union[
+ OpenAIRealtimeStreamResponseBaseObject,
+ OpenAIRealtimeStreamSessionEvents,
+ ]
+ ] = []
self.input_message: Dict = {}
_logged_real_time_event_types = litellm.logged_real_time_event_types
@@ -62,10 +72,14 @@ class RealTimeStreaming:
_logged_real_time_event_types = DefaultLoggedRealTimeEventTypes
self.logged_real_time_event_types = _logged_real_time_event_types
- def _should_store_message(self, message: Union[str, bytes]) -> bool:
- if isinstance(message, bytes):
- message = message.decode("utf-8")
- message_obj = json.loads(message)
+ def _should_store_message(
+ self,
+ message_obj: Union[
+ dict,
+ OpenAIRealtimeStreamSessionEvents,
+ OpenAIRealtimeStreamResponseBaseObject,
+ ],
+ ) -> bool:
_msg_type = message_obj["type"]
if self.logged_real_time_event_types == "*":
return True
@@ -75,8 +89,22 @@ class RealTimeStreaming:
def store_message(self, message: Union[str, bytes]):
"""Store message in list"""
- if self._should_store_message(message):
- self.messages.append(message)
+ if isinstance(message, bytes):
+ message = message.decode("utf-8")
+ message_obj = json.loads(message)
+ try:
+ if (
+ message_obj.get("type") == "session.created"
+ or message_obj.get("type") == "session.updated"
+ ):
+ message_obj = OpenAIRealtimeStreamSessionEvents(**message_obj) # type: ignore
+ else:
+ message_obj = OpenAIRealtimeStreamResponseBaseObject(**message_obj) # type: ignore
+ except Exception as e:
+ verbose_logger.debug(f"Error parsing message for logging: {e}")
+ raise e
+ if self._should_store_message(message_obj):
+ self.messages.append(message_obj)
def store_input(self, message: dict):
"""Store input message"""
@@ -122,7 +150,6 @@ class RealTimeStreaming:
pass
async def bidirectional_forward(self):
-
forward_task = asyncio.create_task(self.backend_to_client_send_messages())
try:
await self.client_ack_messages()
diff --git a/litellm/litellm_core_utils/redact_messages.py b/litellm/litellm_core_utils/redact_messages.py
index 50e0e0b575..a62031a9c9 100644
--- a/litellm/litellm_core_utils/redact_messages.py
+++ b/litellm/litellm_core_utils/redact_messages.py
@@ -135,9 +135,9 @@ def _get_turn_off_message_logging_from_dynamic_params(
handles boolean and string values of `turn_off_message_logging`
"""
- standard_callback_dynamic_params: Optional[StandardCallbackDynamicParams] = (
- model_call_details.get("standard_callback_dynamic_params", None)
- )
+ standard_callback_dynamic_params: Optional[
+ StandardCallbackDynamicParams
+ ] = model_call_details.get("standard_callback_dynamic_params", None)
if standard_callback_dynamic_params:
_turn_off_message_logging = standard_callback_dynamic_params.get(
"turn_off_message_logging"
diff --git a/litellm/litellm_core_utils/safe_json_dumps.py b/litellm/litellm_core_utils/safe_json_dumps.py
index 990c0ed561..7ad0038ecb 100644
--- a/litellm/litellm_core_utils/safe_json_dumps.py
+++ b/litellm/litellm_core_utils/safe_json_dumps.py
@@ -1,8 +1,9 @@
import json
from typing import Any, Union
+from litellm.constants import DEFAULT_MAX_RECURSE_DEPTH
-def safe_dumps(data: Any, max_depth: int = 10) -> str:
+def safe_dumps(data: Any, max_depth: int = DEFAULT_MAX_RECURSE_DEPTH) -> str:
"""
Recursively serialize data while detecting circular references.
If a circular reference is detected then a marker string is returned.
diff --git a/litellm/litellm_core_utils/sensitive_data_masker.py b/litellm/litellm_core_utils/sensitive_data_masker.py
index a1df115ff0..23b9ec32fc 100644
--- a/litellm/litellm_core_utils/sensitive_data_masker.py
+++ b/litellm/litellm_core_utils/sensitive_data_masker.py
@@ -1,5 +1,7 @@
from typing import Any, Dict, Optional, Set
+from litellm.constants import DEFAULT_MAX_RECURSE_DEPTH
+
class SensitiveDataMasker:
def __init__(
@@ -39,7 +41,10 @@ class SensitiveDataMasker:
return result
def mask_dict(
- self, data: Dict[str, Any], depth: int = 0, max_depth: int = 10
+ self,
+ data: Dict[str, Any],
+ depth: int = 0,
+ max_depth: int = DEFAULT_MAX_RECURSE_DEPTH,
) -> Dict[str, Any]:
if depth >= max_depth:
return data
diff --git a/litellm/litellm_core_utils/streaming_chunk_builder_utils.py b/litellm/litellm_core_utils/streaming_chunk_builder_utils.py
index e78b10c289..198b71cad3 100644
--- a/litellm/litellm_core_utils/streaming_chunk_builder_utils.py
+++ b/litellm/litellm_core_utils/streaming_chunk_builder_utils.py
@@ -1,6 +1,6 @@
import base64
import time
-from typing import Any, Dict, List, Optional, Union
+from typing import Any, Dict, List, Optional, Union, cast
from litellm.types.llms.openai import (
ChatCompletionAssistantContentValue,
@@ -9,10 +9,13 @@ from litellm.types.llms.openai import (
from litellm.types.utils import (
ChatCompletionAudioResponse,
ChatCompletionMessageToolCall,
+ Choices,
CompletionTokensDetails,
+ CompletionTokensDetailsWrapper,
Function,
FunctionCall,
ModelResponse,
+ ModelResponseStream,
PromptTokensDetails,
Usage,
)
@@ -103,75 +106,63 @@ class ChunkProcessor:
def get_combined_tool_content(
self, tool_call_chunks: List[Dict[str, Any]]
) -> List[ChatCompletionMessageToolCall]:
-
- argument_list: List[str] = []
- delta = tool_call_chunks[0]["choices"][0]["delta"]
- id = None
- name = None
- type = None
tool_calls_list: List[ChatCompletionMessageToolCall] = []
- prev_index = None
- prev_name = None
- prev_id = None
- curr_id = None
- curr_index = 0
+ tool_call_map: Dict[
+ int, Dict[str, Any]
+ ] = {} # Map to store tool calls by index
+
for chunk in tool_call_chunks:
choices = chunk["choices"]
for choice in choices:
delta = choice.get("delta", {})
- tool_calls = delta.get("tool_calls", "")
- # Check if a tool call is present
- if tool_calls and tool_calls[0].function is not None:
- if tool_calls[0].id:
- id = tool_calls[0].id
- curr_id = id
- if prev_id is None:
- prev_id = curr_id
- if tool_calls[0].index:
- curr_index = tool_calls[0].index
- if tool_calls[0].function.arguments:
- # Now, tool_calls is expected to be a dictionary
- arguments = tool_calls[0].function.arguments
- argument_list.append(arguments)
- if tool_calls[0].function.name:
- name = tool_calls[0].function.name
- if tool_calls[0].type:
- type = tool_calls[0].type
- if prev_index is None:
- prev_index = curr_index
- if prev_name is None:
- prev_name = name
- if curr_index != prev_index: # new tool call
- combined_arguments = "".join(argument_list)
+ tool_calls = delta.get("tool_calls", [])
+
+ for tool_call in tool_calls:
+ if not tool_call or not hasattr(tool_call, "function"):
+ continue
+
+ index = getattr(tool_call, "index", 0)
+ if index not in tool_call_map:
+ tool_call_map[index] = {
+ "id": None,
+ "name": None,
+ "type": None,
+ "arguments": [],
+ }
+
+ if hasattr(tool_call, "id") and tool_call.id:
+ tool_call_map[index]["id"] = tool_call.id
+ if hasattr(tool_call, "type") and tool_call.type:
+ tool_call_map[index]["type"] = tool_call.type
+ if hasattr(tool_call, "function"):
+ if (
+ hasattr(tool_call.function, "name")
+ and tool_call.function.name
+ ):
+ tool_call_map[index]["name"] = tool_call.function.name
+ if (
+ hasattr(tool_call.function, "arguments")
+ and tool_call.function.arguments
+ ):
+ tool_call_map[index]["arguments"].append(
+ tool_call.function.arguments
+ )
+
+ # Convert the map to a list of tool calls
+ for index in sorted(tool_call_map.keys()):
+ tool_call_data = tool_call_map[index]
+ if tool_call_data["id"] and tool_call_data["name"]:
+ combined_arguments = "".join(tool_call_data["arguments"]) or "{}"
tool_calls_list.append(
ChatCompletionMessageToolCall(
- id=prev_id,
+ id=tool_call_data["id"],
function=Function(
arguments=combined_arguments,
- name=prev_name,
+ name=tool_call_data["name"],
),
- type=type,
+ type=tool_call_data["type"] or "function",
)
)
- argument_list = [] # reset
- prev_index = curr_index
- prev_id = curr_id
- prev_name = name
-
- combined_arguments = (
- "".join(argument_list) or "{}"
- ) # base case, return empty dict
-
- tool_calls_list.append(
- ChatCompletionMessageToolCall(
- id=id,
- type="function",
- function=Function(
- arguments=combined_arguments,
- name=name,
- ),
- )
- )
return tool_calls_list
@@ -203,14 +194,14 @@ class ChunkProcessor:
)
def get_combined_content(
- self, chunks: List[Dict[str, Any]]
+ self, chunks: List[Dict[str, Any]], delta_key: str = "content"
) -> ChatCompletionAssistantContentValue:
content_list: List[str] = []
for chunk in chunks:
choices = chunk["choices"]
for choice in choices:
delta = choice.get("delta", {})
- content = delta.get("content", "")
+ content = delta.get(delta_key, "")
if content is None:
continue # openai v1.0.0 sets content = None for chunks
content_list.append(content)
@@ -221,6 +212,11 @@ class ChunkProcessor:
# Update the "content" field within the response dictionary
return combined_content
+ def get_combined_reasoning_content(
+ self, chunks: List[Dict[str, Any]]
+ ) -> ChatCompletionAssistantContentValue:
+ return self.get_combined_content(chunks, delta_key="reasoning_content")
+
def get_combined_audio_content(
self, chunks: List[Dict[str, Any]]
) -> ChatCompletionAudioResponse:
@@ -296,12 +292,27 @@ class ChunkProcessor:
"prompt_tokens_details": prompt_tokens_details,
}
+ def count_reasoning_tokens(self, response: ModelResponse) -> int:
+ reasoning_tokens = 0
+ for choice in response.choices:
+ if (
+ hasattr(cast(Choices, choice).message, "reasoning_content")
+ and cast(Choices, choice).message.reasoning_content is not None
+ ):
+ reasoning_tokens += token_counter(
+ text=cast(Choices, choice).message.reasoning_content,
+ count_response_tokens=True,
+ )
+
+ return reasoning_tokens
+
def calculate_usage(
self,
chunks: List[Union[Dict[str, Any], ModelResponse]],
model: str,
completion_output: str,
messages: Optional[List] = None,
+ reasoning_tokens: Optional[int] = None,
) -> Usage:
"""
Calculate usage for the given chunks.
@@ -319,8 +330,12 @@ class ChunkProcessor:
usage_chunk: Optional[Usage] = None
if "usage" in chunk:
usage_chunk = chunk["usage"]
- elif isinstance(chunk, ModelResponse) and hasattr(chunk, "_hidden_params"):
+ elif (
+ isinstance(chunk, ModelResponse)
+ or isinstance(chunk, ModelResponseStream)
+ ) and hasattr(chunk, "_hidden_params"):
usage_chunk = chunk._hidden_params.get("usage", None)
+
if usage_chunk is not None:
usage_chunk_dict = self._usage_chunk_calculation_helper(usage_chunk)
if (
@@ -378,6 +393,19 @@ class ChunkProcessor:
) # for anthropic
if completion_tokens_details is not None:
returned_usage.completion_tokens_details = completion_tokens_details
+
+ if reasoning_tokens is not None:
+ if returned_usage.completion_tokens_details is None:
+ returned_usage.completion_tokens_details = (
+ CompletionTokensDetailsWrapper(reasoning_tokens=reasoning_tokens)
+ )
+ elif (
+ returned_usage.completion_tokens_details is not None
+ and returned_usage.completion_tokens_details.reasoning_tokens is None
+ ):
+ returned_usage.completion_tokens_details.reasoning_tokens = (
+ reasoning_tokens
+ )
if prompt_tokens_details is not None:
returned_usage.prompt_tokens_details = prompt_tokens_details
diff --git a/litellm/litellm_core_utils/streaming_handler.py b/litellm/litellm_core_utils/streaming_handler.py
index 5d5a8bf256..ec20a1ad4c 100644
--- a/litellm/litellm_core_utils/streaming_handler.py
+++ b/litellm/litellm_core_utils/streaming_handler.py
@@ -1,5 +1,6 @@
import asyncio
import collections.abc
+import datetime
import json
import threading
import time
@@ -84,9 +85,9 @@ class CustomStreamWrapper:
self.system_fingerprint: Optional[str] = None
self.received_finish_reason: Optional[str] = None
- self.intermittent_finish_reason: Optional[str] = (
- None # finish reasons that show up mid-stream
- )
+ self.intermittent_finish_reason: Optional[
+ str
+ ] = None # finish reasons that show up mid-stream
self.special_tokens = [
"<|assistant|>",
"<|system|>",
@@ -213,10 +214,7 @@ class CustomStreamWrapper:
Output parse / special tokens for sagemaker + hf streaming.
"""
hold = False
- if (
- self.custom_llm_provider != "huggingface"
- and self.custom_llm_provider != "sagemaker"
- ):
+ if self.custom_llm_provider != "sagemaker":
return hold, chunk
if finish_reason:
@@ -289,49 +287,6 @@ class CustomStreamWrapper:
except Exception as e:
raise e
- def handle_huggingface_chunk(self, chunk):
- try:
- if not isinstance(chunk, str):
- chunk = chunk.decode(
- "utf-8"
- ) # DO NOT REMOVE this: This is required for HF inference API + Streaming
- text = ""
- is_finished = False
- finish_reason = ""
- print_verbose(f"chunk: {chunk}")
- if chunk.startswith("data:"):
- data_json = json.loads(chunk[5:])
- print_verbose(f"data json: {data_json}")
- if "token" in data_json and "text" in data_json["token"]:
- text = data_json["token"]["text"]
- if data_json.get("details", False) and data_json["details"].get(
- "finish_reason", False
- ):
- is_finished = True
- finish_reason = data_json["details"]["finish_reason"]
- elif data_json.get(
- "generated_text", False
- ): # if full generated text exists, then stream is complete
- text = "" # don't return the final bos token
- is_finished = True
- finish_reason = "stop"
- elif data_json.get("error", False):
- raise Exception(data_json.get("error"))
- return {
- "text": text,
- "is_finished": is_finished,
- "finish_reason": finish_reason,
- }
- elif "error" in chunk:
- raise ValueError(chunk)
- return {
- "text": text,
- "is_finished": is_finished,
- "finish_reason": finish_reason,
- }
- except Exception as e:
- raise e
-
def handle_ai21_chunk(self, chunk): # fake streaming
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
@@ -799,6 +754,10 @@ class CustomStreamWrapper:
"provider_specific_fields" in response_obj
and response_obj["provider_specific_fields"] is not None
)
+ or (
+ "annotations" in model_response.choices[0].delta
+ and model_response.choices[0].delta.annotations is not None
+ )
):
return True
else:
@@ -810,7 +769,6 @@ class CustomStreamWrapper:
model_response: ModelResponseStream,
response_obj: Dict[str, Any],
):
-
print_verbose(
f"completion_obj: {completion_obj}, model_response.choices[0]: {model_response.choices[0]}, response_obj: {response_obj}"
)
@@ -898,6 +856,8 @@ class CustomStreamWrapper:
return model_response
# Default - return StopIteration
+ if hasattr(model_response, "usage"):
+ self.chunks.append(model_response)
raise StopIteration
# flush any remaining holding chunk
if len(self.holding_chunk) > 0:
@@ -937,7 +897,6 @@ class CustomStreamWrapper:
and model_response.choices[0].delta.audio is not None
):
return model_response
-
else:
if hasattr(model_response, "usage"):
self.chunks.append(model_response)
@@ -1003,7 +962,6 @@ class CustomStreamWrapper:
self.custom_llm_provider
and self.custom_llm_provider in litellm._custom_providers
):
-
if self.received_finish_reason is not None:
if "provider_specific_fields" not in chunk:
raise StopIteration
@@ -1045,11 +1003,6 @@ class CustomStreamWrapper:
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
self.received_finish_reason = response_obj["finish_reason"]
- elif self.custom_llm_provider and self.custom_llm_provider == "huggingface":
- response_obj = self.handle_huggingface_chunk(chunk)
- completion_obj["content"] = response_obj["text"]
- if response_obj["is_finished"]:
- self.received_finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "predibase":
response_obj = self.handle_predibase_chunk(chunk)
completion_obj["content"] = response_obj["text"]
@@ -1374,9 +1327,9 @@ class CustomStreamWrapper:
_json_delta = delta.model_dump()
print_verbose(f"_json_delta: {_json_delta}")
if "role" not in _json_delta or _json_delta["role"] is None:
- _json_delta["role"] = (
- "assistant" # mistral's api returns role as None
- )
+ _json_delta[
+ "role"
+ ] = "assistant" # mistral's api returns role as None
if "tool_calls" in _json_delta and isinstance(
_json_delta["tool_calls"], list
):
@@ -1470,6 +1423,24 @@ class CustomStreamWrapper:
"""
self.logging_loop = loop
+ def cache_streaming_response(self, processed_chunk, cache_hit: bool):
+ """
+ Caches the streaming response
+ """
+ if not cache_hit and self.logging_obj._llm_caching_handler is not None:
+ self.logging_obj._llm_caching_handler._sync_add_streaming_response_to_cache(
+ processed_chunk
+ )
+
+ async def async_cache_streaming_response(self, processed_chunk, cache_hit: bool):
+ """
+ Caches the streaming response
+ """
+ if not cache_hit and self.logging_obj._llm_caching_handler is not None:
+ await self.logging_obj._llm_caching_handler._add_streaming_response_to_cache(
+ processed_chunk
+ )
+
def run_success_logging_and_cache_storage(self, processed_chunk, cache_hit: bool):
"""
Runs success logging in a thread and adds the response to the cache
@@ -1501,12 +1472,6 @@ class CustomStreamWrapper:
## SYNC LOGGING
self.logging_obj.success_handler(processed_chunk, None, None, cache_hit)
- ## Sync store in cache
- if self.logging_obj._llm_caching_handler is not None:
- self.logging_obj._llm_caching_handler._sync_add_streaming_response_to_cache(
- processed_chunk
- )
-
def finish_reason_handler(self):
model_response = self.model_response_creator()
_finish_reason = self.received_finish_reason or self.intermittent_finish_reason
@@ -1552,11 +1517,16 @@ class CustomStreamWrapper:
if response is None:
continue
+ if self.logging_obj.completion_start_time is None:
+ self.logging_obj._update_completion_start_time(
+ completion_start_time=datetime.datetime.now()
+ )
## LOGGING
- threading.Thread(
- target=self.run_success_logging_and_cache_storage,
- args=(response, cache_hit),
- ).start() # log response
+ executor.submit(
+ self.run_success_logging_and_cache_storage,
+ response,
+ cache_hit,
+ ) # log response
choice = response.choices[0]
if isinstance(choice, StreamingChoices):
self.response_uptil_now += choice.delta.get("content", "") or ""
@@ -1600,13 +1570,27 @@ class CustomStreamWrapper:
"usage",
getattr(complete_streaming_response, "usage"),
)
-
- ## LOGGING
- threading.Thread(
- target=self.logging_obj.success_handler,
- args=(response, None, None, cache_hit),
- ).start() # log response
-
+ self.cache_streaming_response(
+ processed_chunk=complete_streaming_response.model_copy(
+ deep=True
+ ),
+ cache_hit=cache_hit,
+ )
+ executor.submit(
+ self.logging_obj.success_handler,
+ complete_streaming_response.model_copy(deep=True),
+ None,
+ None,
+ cache_hit,
+ )
+ else:
+ executor.submit(
+ self.logging_obj.success_handler,
+ response,
+ None,
+ None,
+ cache_hit,
+ )
if self.sent_stream_usage is False and self.send_stream_usage is True:
self.sent_stream_usage = True
return response
@@ -1618,10 +1602,11 @@ class CustomStreamWrapper:
usage = calculate_total_usage(chunks=self.chunks)
processed_chunk._hidden_params["usage"] = usage
## LOGGING
- threading.Thread(
- target=self.run_success_logging_and_cache_storage,
- args=(processed_chunk, cache_hit),
- ).start() # log response
+ executor.submit(
+ self.run_success_logging_and_cache_storage,
+ processed_chunk,
+ cache_hit,
+ ) # log response
return processed_chunk
except Exception as e:
traceback_exception = traceback.format_exc()
@@ -1690,11 +1675,9 @@ class CustomStreamWrapper:
if processed_chunk is None:
continue
- if self.logging_obj._llm_caching_handler is not None:
- asyncio.create_task(
- self.logging_obj._llm_caching_handler._add_streaming_response_to_cache(
- processed_chunk=cast(ModelResponse, processed_chunk),
- )
+ if self.logging_obj.completion_start_time is None:
+ self.logging_obj._update_completion_start_time(
+ completion_start_time=datetime.datetime.now()
)
choice = processed_chunk.choices[0]
@@ -1732,9 +1715,9 @@ class CustomStreamWrapper:
chunk = next(self.completion_stream)
if chunk is not None and chunk != b"":
print_verbose(f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}")
- processed_chunk: Optional[ModelResponseStream] = (
- self.chunk_creator(chunk=chunk)
- )
+ processed_chunk: Optional[
+ ModelResponseStream
+ ] = self.chunk_creator(chunk=chunk)
print_verbose(
f"PROCESSED CHUNK POST CHUNK CREATOR: {processed_chunk}"
)
@@ -1767,6 +1750,14 @@ class CustomStreamWrapper:
"usage",
getattr(complete_streaming_response, "usage"),
)
+ asyncio.create_task(
+ self.async_cache_streaming_response(
+ processed_chunk=complete_streaming_response.model_copy(
+ deep=True
+ ),
+ cache_hit=cache_hit,
+ )
+ )
if self.sent_stream_usage is False and self.send_stream_usage is True:
self.sent_stream_usage = True
return response
diff --git a/litellm/litellm_core_utils/token_counter.py b/litellm/litellm_core_utils/token_counter.py
index e6bc65ccff..afd5ab5ff4 100644
--- a/litellm/litellm_core_utils/token_counter.py
+++ b/litellm/litellm_core_utils/token_counter.py
@@ -11,6 +11,10 @@ from litellm.constants import (
DEFAULT_IMAGE_HEIGHT,
DEFAULT_IMAGE_TOKEN_COUNT,
DEFAULT_IMAGE_WIDTH,
+ MAX_LONG_SIDE_FOR_IMAGE_HIGH_RES,
+ MAX_SHORT_SIDE_FOR_IMAGE_HIGH_RES,
+ MAX_TILE_HEIGHT,
+ MAX_TILE_WIDTH,
)
from litellm.llms.custom_httpx.http_handler import _get_httpx_client
@@ -97,11 +101,14 @@ def resize_image_high_res(
height: int,
) -> Tuple[int, int]:
# Maximum dimensions for high res mode
- max_short_side = 768
- max_long_side = 2000
+ max_short_side = MAX_SHORT_SIDE_FOR_IMAGE_HIGH_RES
+ max_long_side = MAX_LONG_SIDE_FOR_IMAGE_HIGH_RES
# Return early if no resizing is needed
- if width <= 768 and height <= 768:
+ if (
+ width <= MAX_SHORT_SIDE_FOR_IMAGE_HIGH_RES
+ and height <= MAX_SHORT_SIDE_FOR_IMAGE_HIGH_RES
+ ):
return width, height
# Determine the longer and shorter sides
@@ -132,7 +139,10 @@ def resize_image_high_res(
# Test the function with the given example
def calculate_tiles_needed(
- resized_width, resized_height, tile_width=512, tile_height=512
+ resized_width,
+ resized_height,
+ tile_width=MAX_TILE_WIDTH,
+ tile_height=MAX_TILE_HEIGHT,
):
tiles_across = (resized_width + tile_width - 1) // tile_width
tiles_down = (resized_height + tile_height - 1) // tile_height
diff --git a/litellm/llms/aiohttp_openai/chat/transformation.py b/litellm/llms/aiohttp_openai/chat/transformation.py
index 212db1853b..c2d4e5adcd 100644
--- a/litellm/llms/aiohttp_openai/chat/transformation.py
+++ b/litellm/llms/aiohttp_openai/chat/transformation.py
@@ -27,6 +27,7 @@ class AiohttpOpenAIChatConfig(OpenAILikeChatConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -49,6 +50,7 @@ class AiohttpOpenAIChatConfig(OpenAILikeChatConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/anthropic/chat/handler.py b/litellm/llms/anthropic/chat/handler.py
index f2c5f390d7..397aa1e047 100644
--- a/litellm/llms/anthropic/chat/handler.py
+++ b/litellm/llms/anthropic/chat/handler.py
@@ -4,7 +4,7 @@ Calling + translation logic for anthropic's `/v1/messages` endpoint
import copy
import json
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import httpx # type: ignore
@@ -21,7 +21,6 @@ from litellm.llms.custom_httpx.http_handler import (
get_async_httpx_client,
)
from litellm.types.llms.anthropic import (
- AnthropicChatCompletionUsageBlock,
ContentBlockDelta,
ContentBlockStart,
ContentBlockStop,
@@ -30,15 +29,16 @@ from litellm.types.llms.anthropic import (
UsageDelta,
)
from litellm.types.llms.openai import (
+ ChatCompletionRedactedThinkingBlock,
ChatCompletionThinkingBlock,
ChatCompletionToolCallChunk,
- ChatCompletionUsageBlock,
)
from litellm.types.utils import (
Delta,
GenericStreamingChunk,
ModelResponseStream,
StreamingChoices,
+ Usage,
)
from litellm.utils import CustomStreamWrapper, ModelResponse, ProviderConfigManager
@@ -290,7 +290,6 @@ class AnthropicChatCompletion(BaseLLM):
headers={},
client=None,
):
-
optional_params = copy.deepcopy(optional_params)
stream = optional_params.pop("stream", None)
json_mode: bool = optional_params.pop("json_mode", False)
@@ -303,12 +302,17 @@ class AnthropicChatCompletion(BaseLLM):
model=model,
messages=messages,
optional_params={**optional_params, "is_vertex_request": is_vertex_request},
+ litellm_params=litellm_params,
)
config = ProviderConfigManager.get_provider_chat_config(
model=model,
provider=LlmProviders(custom_llm_provider),
)
+ if config is None:
+ raise ValueError(
+ f"Provider config not found for model: {model} and provider: {custom_llm_provider}"
+ )
data = config.transform_request(
model=model,
@@ -488,48 +492,29 @@ class ModelResponseIterator:
return True
return False
- def _handle_usage(
- self, anthropic_usage_chunk: Union[dict, UsageDelta]
- ) -> AnthropicChatCompletionUsageBlock:
-
- usage_block = AnthropicChatCompletionUsageBlock(
- prompt_tokens=anthropic_usage_chunk.get("input_tokens", 0),
- completion_tokens=anthropic_usage_chunk.get("output_tokens", 0),
- total_tokens=anthropic_usage_chunk.get("input_tokens", 0)
- + anthropic_usage_chunk.get("output_tokens", 0),
+ def _handle_usage(self, anthropic_usage_chunk: Union[dict, UsageDelta]) -> Usage:
+ return AnthropicConfig().calculate_usage(
+ usage_object=cast(dict, anthropic_usage_chunk), reasoning_content=None
)
- cache_creation_input_tokens = anthropic_usage_chunk.get(
- "cache_creation_input_tokens"
- )
- if cache_creation_input_tokens is not None and isinstance(
- cache_creation_input_tokens, int
- ):
- usage_block["cache_creation_input_tokens"] = cache_creation_input_tokens
-
- cache_read_input_tokens = anthropic_usage_chunk.get("cache_read_input_tokens")
- if cache_read_input_tokens is not None and isinstance(
- cache_read_input_tokens, int
- ):
- usage_block["cache_read_input_tokens"] = cache_read_input_tokens
-
- return usage_block
-
- def _content_block_delta_helper(self, chunk: dict) -> Tuple[
+ def _content_block_delta_helper(
+ self, chunk: dict
+ ) -> Tuple[
str,
Optional[ChatCompletionToolCallChunk],
- List[ChatCompletionThinkingBlock],
+ List[Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]],
Dict[str, Any],
]:
"""
Helper function to handle the content block delta
"""
-
text = ""
tool_use: Optional[ChatCompletionToolCallChunk] = None
provider_specific_fields = {}
content_block = ContentBlockDelta(**chunk) # type: ignore
- thinking_blocks: List[ChatCompletionThinkingBlock] = []
+ thinking_blocks: List[
+ Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]
+ ] = []
self.content_blocks.append(content_block)
if "text" in content_block["delta"]:
@@ -558,20 +543,25 @@ class ModelResponseIterator:
)
]
provider_specific_fields["thinking_blocks"] = thinking_blocks
+
return text, tool_use, thinking_blocks, provider_specific_fields
def _handle_reasoning_content(
- self, thinking_blocks: List[ChatCompletionThinkingBlock]
+ self,
+ thinking_blocks: List[
+ Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]
+ ],
) -> Optional[str]:
"""
Handle the reasoning content
"""
reasoning_content = None
for block in thinking_blocks:
+ thinking_content = cast(Optional[str], block.get("thinking"))
if reasoning_content is None:
reasoning_content = ""
- if "thinking" in block:
- reasoning_content += block["thinking"]
+ if thinking_content is not None:
+ reasoning_content += thinking_content
return reasoning_content
def chunk_parser(self, chunk: dict) -> ModelResponseStream:
@@ -581,10 +571,16 @@ class ModelResponseIterator:
text = ""
tool_use: Optional[ChatCompletionToolCallChunk] = None
finish_reason = ""
- usage: Optional[ChatCompletionUsageBlock] = None
+ usage: Optional[Usage] = None
provider_specific_fields: Dict[str, Any] = {}
reasoning_content: Optional[str] = None
- thinking_blocks: Optional[List[ChatCompletionThinkingBlock]] = None
+ thinking_blocks: Optional[
+ List[
+ Union[
+ ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock
+ ]
+ ]
+ ] = None
index = int(chunk.get("index", 0))
if type_chunk == "content_block_delta":
@@ -592,9 +588,12 @@ class ModelResponseIterator:
Anthropic content chunk
chunk = {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Hello'}}
"""
- text, tool_use, thinking_blocks, provider_specific_fields = (
- self._content_block_delta_helper(chunk=chunk)
- )
+ (
+ text,
+ tool_use,
+ thinking_blocks,
+ provider_specific_fields,
+ ) = self._content_block_delta_helper(chunk=chunk)
if thinking_blocks:
reasoning_content = self._handle_reasoning_content(
thinking_blocks=thinking_blocks
@@ -619,8 +618,16 @@ class ModelResponseIterator:
},
"index": self.tool_index,
}
+ elif (
+ content_block_start["content_block"]["type"] == "redacted_thinking"
+ ):
+ thinking_blocks = [
+ ChatCompletionRedactedThinkingBlock(
+ type="redacted_thinking",
+ data=content_block_start["content_block"]["data"],
+ )
+ ]
elif type_chunk == "content_block_stop":
-
ContentBlockStop(**chunk) # type: ignore
# check if tool call content block
is_empty = self.check_empty_tool_call_args()
diff --git a/litellm/llms/anthropic/chat/transformation.py b/litellm/llms/anthropic/chat/transformation.py
index 383c1cd3e5..06e0553f8d 100644
--- a/litellm/llms/anthropic/chat/transformation.py
+++ b/litellm/llms/anthropic/chat/transformation.py
@@ -5,7 +5,13 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast
import httpx
import litellm
-from litellm.constants import RESPONSE_FORMAT_TOOL_NAME
+from litellm.constants import (
+ DEFAULT_ANTHROPIC_CHAT_MAX_TOKENS,
+ DEFAULT_REASONING_EFFORT_HIGH_THINKING_BUDGET,
+ DEFAULT_REASONING_EFFORT_LOW_THINKING_BUDGET,
+ DEFAULT_REASONING_EFFORT_MEDIUM_THINKING_BUDGET,
+ RESPONSE_FORMAT_TOOL_NAME,
+)
from litellm.litellm_core_utils.core_helpers import map_finish_reason
from litellm.litellm_core_utils.prompt_templates.factory import anthropic_messages_pt
from litellm.llms.base_llm.base_utils import type_to_response_format_param
@@ -18,21 +24,31 @@ from litellm.types.llms.anthropic import (
AnthropicMessagesTool,
AnthropicMessagesToolChoice,
AnthropicSystemMessageContent,
+ AnthropicThinkingParam,
)
from litellm.types.llms.openai import (
+ REASONING_EFFORT,
AllMessageValues,
ChatCompletionCachedContent,
+ ChatCompletionRedactedThinkingBlock,
ChatCompletionSystemMessage,
ChatCompletionThinkingBlock,
ChatCompletionToolCallChunk,
ChatCompletionToolCallFunctionChunk,
ChatCompletionToolParam,
)
+from litellm.types.utils import CompletionTokensDetailsWrapper
from litellm.types.utils import Message as LitellmMessage
from litellm.types.utils import PromptTokensDetailsWrapper
-from litellm.utils import ModelResponse, Usage, add_dummy_tool, has_tool_call_blocks
+from litellm.utils import (
+ ModelResponse,
+ Usage,
+ add_dummy_tool,
+ has_tool_call_blocks,
+ token_counter,
+)
-from ..common_utils import AnthropicError, process_anthropic_headers
+from ..common_utils import AnthropicError, AnthropicModelInfo, process_anthropic_headers
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
@@ -42,16 +58,16 @@ else:
LoggingClass = Any
-class AnthropicConfig(BaseConfig):
+class AnthropicConfig(AnthropicModelInfo, BaseConfig):
"""
Reference: https://docs.anthropic.com/claude/reference/messages_post
to pass metadata to anthropic, it's {"user_id": "any-relevant-information"}
"""
- max_tokens: Optional[int] = (
- 4096 # anthropic requires a default value (Opus, Sonnet, and Haiku have the same default)
- )
+ max_tokens: Optional[
+ int
+ ] = DEFAULT_ANTHROPIC_CHAT_MAX_TOKENS # anthropic requires a default value (Opus, Sonnet, and Haiku have the same default)
stop_sequences: Optional[list] = None
temperature: Optional[int] = None
top_p: Optional[int] = None
@@ -63,7 +79,7 @@ class AnthropicConfig(BaseConfig):
self,
max_tokens: Optional[
int
- ] = 4096, # You can pass in a value yourself or use the default value 4096
+ ] = DEFAULT_ANTHROPIC_CHAT_MAX_TOKENS, # You can pass in a value yourself or use the default value 4096
stop_sequences: Optional[list] = None,
temperature: Optional[int] = None,
top_p: Optional[int] = None,
@@ -94,6 +110,7 @@ class AnthropicConfig(BaseConfig):
"parallel_tool_calls",
"response_format",
"user",
+ "reasoning_effort",
]
if "claude-3-7-sonnet" in model:
@@ -104,7 +121,6 @@ class AnthropicConfig(BaseConfig):
def get_json_schema_from_pydantic_object(
self, response_format: Union[Any, Dict, None]
) -> Optional[dict]:
-
return type_to_response_format_param(
response_format, ref_template="/$defs/{model}"
) # Relevant issue: https://github.com/BerriAI/litellm/issues/7755
@@ -115,42 +131,6 @@ class AnthropicConfig(BaseConfig):
"anthropic-beta": "prompt-caching-2024-07-31",
}
- def get_anthropic_headers(
- self,
- api_key: str,
- anthropic_version: Optional[str] = None,
- computer_tool_used: bool = False,
- prompt_caching_set: bool = False,
- pdf_used: bool = False,
- is_vertex_request: bool = False,
- user_anthropic_beta_headers: Optional[List[str]] = None,
- ) -> dict:
-
- betas = set()
- if prompt_caching_set:
- betas.add("prompt-caching-2024-07-31")
- if computer_tool_used:
- betas.add("computer-use-2024-10-22")
- if pdf_used:
- betas.add("pdfs-2024-09-25")
- headers = {
- "anthropic-version": anthropic_version or "2023-06-01",
- "x-api-key": api_key,
- "accept": "application/json",
- "content-type": "application/json",
- }
-
- if user_anthropic_beta_headers is not None:
- betas.update(user_anthropic_beta_headers)
-
- # Don't send any beta headers to Vertex, Vertex has failed requests when they are sent
- if is_vertex_request is True:
- pass
- elif len(betas) > 0:
- headers["anthropic-beta"] = ",".join(betas)
-
- return headers
-
def _map_tool_choice(
self, tool_choice: Optional[str], parallel_tool_use: Optional[bool]
) -> Optional[AnthropicMessagesToolChoice]:
@@ -293,6 +273,57 @@ class AnthropicConfig(BaseConfig):
new_stop = new_v
return new_stop
+ @staticmethod
+ def _map_reasoning_effort(
+ reasoning_effort: Optional[Union[REASONING_EFFORT, str]]
+ ) -> Optional[AnthropicThinkingParam]:
+ if reasoning_effort is None:
+ return None
+ elif reasoning_effort == "low":
+ return AnthropicThinkingParam(
+ type="enabled",
+ budget_tokens=DEFAULT_REASONING_EFFORT_LOW_THINKING_BUDGET,
+ )
+ elif reasoning_effort == "medium":
+ return AnthropicThinkingParam(
+ type="enabled",
+ budget_tokens=DEFAULT_REASONING_EFFORT_MEDIUM_THINKING_BUDGET,
+ )
+ elif reasoning_effort == "high":
+ return AnthropicThinkingParam(
+ type="enabled",
+ budget_tokens=DEFAULT_REASONING_EFFORT_HIGH_THINKING_BUDGET,
+ )
+ else:
+ raise ValueError(f"Unmapped reasoning effort: {reasoning_effort}")
+
+ def map_response_format_to_anthropic_tool(
+ self, value: Optional[dict], optional_params: dict, is_thinking_enabled: bool
+ ) -> Optional[AnthropicMessagesTool]:
+ ignore_response_format_types = ["text"]
+ if (
+ value is None or value["type"] in ignore_response_format_types
+ ): # value is a no-op
+ return None
+
+ json_schema: Optional[dict] = None
+ if "response_schema" in value:
+ json_schema = value["response_schema"]
+ elif "json_schema" in value:
+ json_schema = value["json_schema"]["schema"]
+ """
+ When using tools in this way: - https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode
+ - You usually want to provide a single tool
+ - You should set tool_choice (see Forcing tool use) to instruct the model to explicitly use that tool
+ - Remember that the model will pass the input to the tool, so the name of the tool and description should be from the model’s perspective.
+ """
+
+ _tool = self._create_json_tool_call_for_response_format(
+ json_schema=json_schema,
+ )
+
+ return _tool
+
def map_openai_params(
self,
non_default_params: dict,
@@ -300,6 +331,10 @@ class AnthropicConfig(BaseConfig):
model: str,
drop_params: bool,
) -> dict:
+ is_thinking_enabled = self.is_thinking_enabled(
+ non_default_params=non_default_params
+ )
+
for param, value in non_default_params.items():
if param == "max_tokens":
optional_params["max_tokens"] = value
@@ -312,11 +347,11 @@ class AnthropicConfig(BaseConfig):
optional_params=optional_params, tools=tool_value
)
if param == "tool_choice" or param == "parallel_tool_calls":
- _tool_choice: Optional[AnthropicMessagesToolChoice] = (
- self._map_tool_choice(
- tool_choice=non_default_params.get("tool_choice"),
- parallel_tool_use=non_default_params.get("parallel_tool_calls"),
- )
+ _tool_choice: Optional[
+ AnthropicMessagesToolChoice
+ ] = self._map_tool_choice(
+ tool_choice=non_default_params.get("tool_choice"),
+ parallel_tool_use=non_default_params.get("parallel_tool_calls"),
)
if _tool_choice is not None:
@@ -332,36 +367,31 @@ class AnthropicConfig(BaseConfig):
if param == "top_p":
optional_params["top_p"] = value
if param == "response_format" and isinstance(value, dict):
-
- ignore_response_format_types = ["text"]
- if value["type"] in ignore_response_format_types: # value is a no-op
- continue
-
- json_schema: Optional[dict] = None
- if "response_schema" in value:
- json_schema = value["response_schema"]
- elif "json_schema" in value:
- json_schema = value["json_schema"]["schema"]
- """
- When using tools in this way: - https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode
- - You usually want to provide a single tool
- - You should set tool_choice (see Forcing tool use) to instruct the model to explicitly use that tool
- - Remember that the model will pass the input to the tool, so the name of the tool and description should be from the model’s perspective.
- """
-
- _tool_choice = {"name": RESPONSE_FORMAT_TOOL_NAME, "type": "tool"}
- _tool = self._create_json_tool_call_for_response_format(
- json_schema=json_schema,
+ _tool = self.map_response_format_to_anthropic_tool(
+ value, optional_params, is_thinking_enabled
)
+ if _tool is None:
+ continue
+ if not is_thinking_enabled:
+ _tool_choice = {"name": RESPONSE_FORMAT_TOOL_NAME, "type": "tool"}
+ optional_params["tool_choice"] = _tool_choice
+ optional_params["json_mode"] = True
optional_params = self._add_tools_to_optional_params(
optional_params=optional_params, tools=[_tool]
)
- optional_params["tool_choice"] = _tool_choice
- optional_params["json_mode"] = True
if param == "user":
optional_params["metadata"] = {"user_id": value}
if param == "thinking":
optional_params["thinking"] = value
+ elif param == "reasoning_effort" and isinstance(value, str):
+ optional_params["thinking"] = AnthropicConfig._map_reasoning_effort(
+ value
+ )
+
+ ## handle thinking tokens
+ self.update_optional_params_with_thinking_tokens(
+ non_default_params=non_default_params, optional_params=optional_params
+ )
return optional_params
def _create_json_tool_call_for_response_format(
@@ -387,56 +417,13 @@ class AnthropicConfig(BaseConfig):
_input_schema["additionalProperties"] = True
_input_schema["properties"] = {}
else:
- _input_schema["properties"] = {"values": json_schema}
+ _input_schema.update(cast(AnthropicInputSchema, json_schema))
_tool = AnthropicMessagesTool(
name=RESPONSE_FORMAT_TOOL_NAME, input_schema=_input_schema
)
return _tool
- def is_cache_control_set(self, messages: List[AllMessageValues]) -> bool:
- """
- Return if {"cache_control": ..} in message content block
-
- Used to check if anthropic prompt caching headers need to be set.
- """
- for message in messages:
- if message.get("cache_control", None) is not None:
- return True
- _message_content = message.get("content")
- if _message_content is not None and isinstance(_message_content, list):
- for content in _message_content:
- if "cache_control" in content:
- return True
-
- return False
-
- def is_computer_tool_used(
- self, tools: Optional[List[AllAnthropicToolsValues]]
- ) -> bool:
- if tools is None:
- return False
- for tool in tools:
- if "type" in tool and tool["type"].startswith("computer_"):
- return True
- return False
-
- def is_pdf_used(self, messages: List[AllMessageValues]) -> bool:
- """
- Set to true if media passed into messages.
-
- """
- for message in messages:
- if (
- "content" in message
- and message["content"] is not None
- and isinstance(message["content"], list)
- ):
- for content in message["content"]:
- if "type" in content and content["type"] != "text":
- return True
- return False
-
def translate_system_message(
self, messages: List[AllMessageValues]
) -> List[AnthropicSystemMessageContent]:
@@ -457,9 +444,9 @@ class AnthropicConfig(BaseConfig):
text=system_message_block["content"],
)
if "cache_control" in system_message_block:
- anthropic_system_message_content["cache_control"] = (
- system_message_block["cache_control"]
- )
+ anthropic_system_message_content[
+ "cache_control"
+ ] = system_message_block["cache_control"]
anthropic_system_message_list.append(
anthropic_system_message_content
)
@@ -473,9 +460,9 @@ class AnthropicConfig(BaseConfig):
)
)
if "cache_control" in _content:
- anthropic_system_message_content["cache_control"] = (
- _content["cache_control"]
- )
+ anthropic_system_message_content[
+ "cache_control"
+ ] = _content["cache_control"]
anthropic_system_message_list.append(
anthropic_system_message_content
@@ -584,16 +571,26 @@ class AnthropicConfig(BaseConfig):
)
return _message
- def extract_response_content(self, completion_response: dict) -> Tuple[
+ def extract_response_content(
+ self, completion_response: dict
+ ) -> Tuple[
str,
Optional[List[Any]],
- Optional[List[ChatCompletionThinkingBlock]],
+ Optional[
+ List[
+ Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]
+ ]
+ ],
Optional[str],
List[ChatCompletionToolCallChunk],
]:
text_content = ""
citations: Optional[List[Any]] = None
- thinking_blocks: Optional[List[ChatCompletionThinkingBlock]] = None
+ thinking_blocks: Optional[
+ List[
+ Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]
+ ]
+ ] = None
reasoning_content: Optional[str] = None
tool_calls: List[ChatCompletionToolCallChunk] = []
for idx, content in enumerate(completion_response["content"]):
@@ -612,22 +609,71 @@ class AnthropicConfig(BaseConfig):
index=idx,
)
)
- ## CITATIONS
- if content.get("citations", None) is not None:
- if citations is None:
- citations = []
- citations.append(content["citations"])
- if content.get("thinking", None) is not None:
+
+ elif content.get("thinking", None) is not None:
if thinking_blocks is None:
thinking_blocks = []
thinking_blocks.append(cast(ChatCompletionThinkingBlock, content))
+ elif content["type"] == "redacted_thinking":
+ if thinking_blocks is None:
+ thinking_blocks = []
+ thinking_blocks.append(
+ cast(ChatCompletionRedactedThinkingBlock, content)
+ )
+
+ ## CITATIONS
+ if content.get("citations") is not None:
+ if citations is None:
+ citations = []
+ citations.append(content["citations"])
if thinking_blocks is not None:
reasoning_content = ""
for block in thinking_blocks:
- if "thinking" in block:
- reasoning_content += block["thinking"]
+ thinking_content = cast(Optional[str], block.get("thinking"))
+ if thinking_content is not None:
+ reasoning_content += thinking_content
+
return text_content, citations, thinking_blocks, reasoning_content, tool_calls
+ def calculate_usage(
+ self, usage_object: dict, reasoning_content: Optional[str]
+ ) -> Usage:
+ prompt_tokens = usage_object.get("input_tokens", 0)
+ completion_tokens = usage_object.get("output_tokens", 0)
+ _usage = usage_object
+ cache_creation_input_tokens: int = 0
+ cache_read_input_tokens: int = 0
+
+ if "cache_creation_input_tokens" in _usage:
+ cache_creation_input_tokens = _usage["cache_creation_input_tokens"]
+ if "cache_read_input_tokens" in _usage:
+ cache_read_input_tokens = _usage["cache_read_input_tokens"]
+ prompt_tokens += cache_read_input_tokens
+
+ prompt_tokens_details = PromptTokensDetailsWrapper(
+ cached_tokens=cache_read_input_tokens
+ )
+ completion_token_details = (
+ CompletionTokensDetailsWrapper(
+ reasoning_tokens=token_counter(
+ text=reasoning_content, count_response_tokens=True
+ )
+ )
+ if reasoning_content
+ else None
+ )
+ total_tokens = prompt_tokens + completion_tokens
+ usage = Usage(
+ prompt_tokens=prompt_tokens,
+ completion_tokens=completion_tokens,
+ total_tokens=total_tokens,
+ prompt_tokens_details=prompt_tokens_details,
+ cache_creation_input_tokens=cache_creation_input_tokens,
+ cache_read_input_tokens=cache_read_input_tokens,
+ completion_tokens_details=completion_token_details,
+ )
+ return usage
+
def transform_response(
self,
model: str,
@@ -676,13 +722,23 @@ class AnthropicConfig(BaseConfig):
else:
text_content = ""
citations: Optional[List[Any]] = None
- thinking_blocks: Optional[List[ChatCompletionThinkingBlock]] = None
+ thinking_blocks: Optional[
+ List[
+ Union[
+ ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock
+ ]
+ ]
+ ] = None
reasoning_content: Optional[str] = None
tool_calls: List[ChatCompletionToolCallChunk] = []
- text_content, citations, thinking_blocks, reasoning_content, tool_calls = (
- self.extract_response_content(completion_response=completion_response)
- )
+ (
+ text_content,
+ citations,
+ thinking_blocks,
+ reasoning_content,
+ tool_calls,
+ ) = self.extract_response_content(completion_response=completion_response)
_message = litellm.Message(
tool_calls=tool_calls,
@@ -714,35 +770,14 @@ class AnthropicConfig(BaseConfig):
)
## CALCULATING USAGE
- prompt_tokens = completion_response["usage"]["input_tokens"]
- completion_tokens = completion_response["usage"]["output_tokens"]
- _usage = completion_response["usage"]
- cache_creation_input_tokens: int = 0
- cache_read_input_tokens: int = 0
+ usage = self.calculate_usage(
+ usage_object=completion_response["usage"],
+ reasoning_content=reasoning_content,
+ )
+ setattr(model_response, "usage", usage) # type: ignore
model_response.created = int(time.time())
model_response.model = completion_response["model"]
- if "cache_creation_input_tokens" in _usage:
- cache_creation_input_tokens = _usage["cache_creation_input_tokens"]
- prompt_tokens += cache_creation_input_tokens
- if "cache_read_input_tokens" in _usage:
- cache_read_input_tokens = _usage["cache_read_input_tokens"]
- prompt_tokens += cache_read_input_tokens
-
- prompt_tokens_details = PromptTokensDetailsWrapper(
- cached_tokens=cache_read_input_tokens
- )
- total_tokens = prompt_tokens + completion_tokens
- usage = Usage(
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
- total_tokens=total_tokens,
- prompt_tokens_details=prompt_tokens_details,
- cache_creation_input_tokens=cache_creation_input_tokens,
- cache_read_input_tokens=cache_read_input_tokens,
- )
-
- setattr(model_response, "usage", usage) # type: ignore
model_response._hidden_params = _hidden_params
return model_response
@@ -786,46 +821,3 @@ class AnthropicConfig(BaseConfig):
message=error_message,
headers=cast(httpx.Headers, headers),
)
-
- def _get_user_anthropic_beta_headers(
- self, anthropic_beta_header: Optional[str]
- ) -> Optional[List[str]]:
- if anthropic_beta_header is None:
- return None
- return anthropic_beta_header.split(",")
-
- def validate_environment(
- self,
- headers: dict,
- model: str,
- messages: List[AllMessageValues],
- optional_params: dict,
- api_key: Optional[str] = None,
- api_base: Optional[str] = None,
- ) -> Dict:
- if api_key is None:
- raise litellm.AuthenticationError(
- message="Missing Anthropic API Key - A call is being made to anthropic but no key is set either in the environment variables or via params. Please set `ANTHROPIC_API_KEY` in your environment vars",
- llm_provider="anthropic",
- model=model,
- )
-
- tools = optional_params.get("tools")
- prompt_caching_set = self.is_cache_control_set(messages=messages)
- computer_tool_used = self.is_computer_tool_used(tools=tools)
- pdf_used = self.is_pdf_used(messages=messages)
- user_anthropic_beta_headers = self._get_user_anthropic_beta_headers(
- anthropic_beta_header=headers.get("anthropic-beta")
- )
- anthropic_headers = self.get_anthropic_headers(
- computer_tool_used=computer_tool_used,
- prompt_caching_set=prompt_caching_set,
- pdf_used=pdf_used,
- api_key=api_key,
- is_vertex_request=optional_params.get("is_vertex_request", False),
- user_anthropic_beta_headers=user_anthropic_beta_headers,
- )
-
- headers = {**headers, **anthropic_headers}
-
- return headers
diff --git a/litellm/llms/anthropic/common_utils.py b/litellm/llms/anthropic/common_utils.py
index 409bbe2d82..bacd2a54d0 100644
--- a/litellm/llms/anthropic/common_utils.py
+++ b/litellm/llms/anthropic/common_utils.py
@@ -2,11 +2,16 @@
This file contains common utils for anthropic calls.
"""
-from typing import Optional, Union
+from typing import Dict, List, Optional, Union
import httpx
+import litellm
+from litellm.llms.base_llm.base_utils import BaseLLMModelInfo
from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.anthropic import AllAnthropicToolsValues
+from litellm.types.llms.openai import AllMessageValues
class AnthropicError(BaseLLMException):
@@ -19,6 +24,176 @@ class AnthropicError(BaseLLMException):
super().__init__(status_code=status_code, message=message, headers=headers)
+class AnthropicModelInfo(BaseLLMModelInfo):
+ def is_cache_control_set(self, messages: List[AllMessageValues]) -> bool:
+ """
+ Return if {"cache_control": ..} in message content block
+
+ Used to check if anthropic prompt caching headers need to be set.
+ """
+ for message in messages:
+ if message.get("cache_control", None) is not None:
+ return True
+ _message_content = message.get("content")
+ if _message_content is not None and isinstance(_message_content, list):
+ for content in _message_content:
+ if "cache_control" in content:
+ return True
+
+ return False
+
+ def is_computer_tool_used(
+ self, tools: Optional[List[AllAnthropicToolsValues]]
+ ) -> bool:
+ if tools is None:
+ return False
+ for tool in tools:
+ if "type" in tool and tool["type"].startswith("computer_"):
+ return True
+ return False
+
+ def is_pdf_used(self, messages: List[AllMessageValues]) -> bool:
+ """
+ Set to true if media passed into messages.
+
+ """
+ for message in messages:
+ if (
+ "content" in message
+ and message["content"] is not None
+ and isinstance(message["content"], list)
+ ):
+ for content in message["content"]:
+ if "type" in content and content["type"] != "text":
+ return True
+ return False
+
+ def _get_user_anthropic_beta_headers(
+ self, anthropic_beta_header: Optional[str]
+ ) -> Optional[List[str]]:
+ if anthropic_beta_header is None:
+ return None
+ return anthropic_beta_header.split(",")
+
+ def get_anthropic_headers(
+ self,
+ api_key: str,
+ anthropic_version: Optional[str] = None,
+ computer_tool_used: bool = False,
+ prompt_caching_set: bool = False,
+ pdf_used: bool = False,
+ is_vertex_request: bool = False,
+ user_anthropic_beta_headers: Optional[List[str]] = None,
+ ) -> dict:
+ betas = set()
+ if prompt_caching_set:
+ betas.add("prompt-caching-2024-07-31")
+ if computer_tool_used:
+ betas.add("computer-use-2024-10-22")
+ if pdf_used:
+ betas.add("pdfs-2024-09-25")
+ headers = {
+ "anthropic-version": anthropic_version or "2023-06-01",
+ "x-api-key": api_key,
+ "accept": "application/json",
+ "content-type": "application/json",
+ }
+
+ if user_anthropic_beta_headers is not None:
+ betas.update(user_anthropic_beta_headers)
+
+ # Don't send any beta headers to Vertex, Vertex has failed requests when they are sent
+ if is_vertex_request is True:
+ pass
+ elif len(betas) > 0:
+ headers["anthropic-beta"] = ",".join(betas)
+
+ return headers
+
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> Dict:
+ if api_key is None:
+ raise litellm.AuthenticationError(
+ message="Missing Anthropic API Key - A call is being made to anthropic but no key is set either in the environment variables or via params. Please set `ANTHROPIC_API_KEY` in your environment vars",
+ llm_provider="anthropic",
+ model=model,
+ )
+
+ tools = optional_params.get("tools")
+ prompt_caching_set = self.is_cache_control_set(messages=messages)
+ computer_tool_used = self.is_computer_tool_used(tools=tools)
+ pdf_used = self.is_pdf_used(messages=messages)
+ user_anthropic_beta_headers = self._get_user_anthropic_beta_headers(
+ anthropic_beta_header=headers.get("anthropic-beta")
+ )
+ anthropic_headers = self.get_anthropic_headers(
+ computer_tool_used=computer_tool_used,
+ prompt_caching_set=prompt_caching_set,
+ pdf_used=pdf_used,
+ api_key=api_key,
+ is_vertex_request=optional_params.get("is_vertex_request", False),
+ user_anthropic_beta_headers=user_anthropic_beta_headers,
+ )
+
+ headers = {**headers, **anthropic_headers}
+
+ return headers
+
+ @staticmethod
+ def get_api_base(api_base: Optional[str] = None) -> Optional[str]:
+ return (
+ api_base
+ or get_secret_str("ANTHROPIC_API_BASE")
+ or "https://api.anthropic.com"
+ )
+
+ @staticmethod
+ def get_api_key(api_key: Optional[str] = None) -> Optional[str]:
+ return api_key or get_secret_str("ANTHROPIC_API_KEY")
+
+ @staticmethod
+ def get_base_model(model: Optional[str] = None) -> Optional[str]:
+ return model.replace("anthropic/", "") if model else None
+
+ def get_models(
+ self, api_key: Optional[str] = None, api_base: Optional[str] = None
+ ) -> List[str]:
+ api_base = AnthropicModelInfo.get_api_base(api_base)
+ api_key = AnthropicModelInfo.get_api_key(api_key)
+ if api_base is None or api_key is None:
+ raise ValueError(
+ "ANTHROPIC_API_BASE or ANTHROPIC_API_KEY is not set. Please set the environment variable, to query Anthropic's `/models` endpoint."
+ )
+ response = litellm.module_level_client.get(
+ url=f"{api_base}/v1/models",
+ headers={"x-api-key": api_key, "anthropic-version": "2023-06-01"},
+ )
+
+ try:
+ response.raise_for_status()
+ except httpx.HTTPStatusError:
+ raise Exception(
+ f"Failed to fetch models from Anthropic. Status code: {response.status_code}, Response: {response.text}"
+ )
+
+ models = response.json()["data"]
+
+ litellm_model_names = []
+ for model in models:
+ stripped_model_name = model["id"]
+ litellm_model_name = "anthropic/" + stripped_model_name
+ litellm_model_names.append(litellm_model_name)
+ return litellm_model_names
+
+
def process_anthropic_headers(headers: Union[httpx.Headers, dict]) -> dict:
openai_headers = {}
if "anthropic-ratelimit-requests-limit" in headers:
diff --git a/litellm/llms/anthropic/completion/transformation.py b/litellm/llms/anthropic/completion/transformation.py
index 7a260b6f94..9e3287aa8a 100644
--- a/litellm/llms/anthropic/completion/transformation.py
+++ b/litellm/llms/anthropic/completion/transformation.py
@@ -11,6 +11,7 @@ from typing import AsyncIterator, Dict, Iterator, List, Optional, Union
import httpx
import litellm
+from litellm.constants import DEFAULT_MAX_TOKENS
from litellm.litellm_core_utils.prompt_templates.factory import (
custom_prompt,
prompt_factory,
@@ -54,9 +55,9 @@ class AnthropicTextConfig(BaseConfig):
to pass metadata to anthropic, it's {"user_id": "any-relevant-information"}
"""
- max_tokens_to_sample: Optional[int] = (
- litellm.max_tokens
- ) # anthropic requires a default
+ max_tokens_to_sample: Optional[
+ int
+ ] = litellm.max_tokens # anthropic requires a default
stop_sequences: Optional[list] = None
temperature: Optional[int] = None
top_p: Optional[int] = None
@@ -65,7 +66,9 @@ class AnthropicTextConfig(BaseConfig):
def __init__(
self,
- max_tokens_to_sample: Optional[int] = 256, # anthropic requires a default
+ max_tokens_to_sample: Optional[
+ int
+ ] = DEFAULT_MAX_TOKENS, # anthropic requires a default
stop_sequences: Optional[list] = None,
temperature: Optional[int] = None,
top_p: Optional[int] = None,
@@ -84,6 +87,7 @@ class AnthropicTextConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/anthropic/experimental_pass_through/messages/handler.py b/litellm/llms/anthropic/experimental_pass_through/messages/handler.py
index a7dfff74d9..ab335ca7c1 100644
--- a/litellm/llms/anthropic/experimental_pass_through/messages/handler.py
+++ b/litellm/llms/anthropic/experimental_pass_through/messages/handler.py
@@ -1,12 +1,12 @@
"""
- call /messages on Anthropic API
-- Make streaming + non-streaming request - just pass it through direct to Anthropic. No need to do anything special here
+- Make streaming + non-streaming request - just pass it through direct to Anthropic. No need to do anything special here
- Ensure requests are logged in the DB - stream + non-stream
"""
import json
-from typing import Any, AsyncIterator, Dict, Optional, Union, cast
+from typing import AsyncIterator, Dict, List, Optional, Union, cast
import httpx
@@ -19,13 +19,15 @@ from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
get_async_httpx_client,
)
+from litellm.types.llms.anthropic_messages.anthropic_response import (
+ AnthropicMessagesResponse,
+)
from litellm.types.router import GenericLiteLLMParams
from litellm.types.utils import ProviderSpecificHeader
from litellm.utils import ProviderConfigManager, client
class AnthropicMessagesHandler:
-
@staticmethod
async def _handle_anthropic_streaming(
response: httpx.Response,
@@ -41,7 +43,9 @@ class AnthropicMessagesHandler:
from litellm.proxy.pass_through_endpoints.success_handler import (
PassThroughEndpointLogging,
)
- from litellm.proxy.pass_through_endpoints.types import EndpointType
+ from litellm.types.passthrough_endpoints.pass_through_endpoints import (
+ EndpointType,
+ )
# Create success handler object
passthrough_success_handler_obj = PassThroughEndpointLogging()
@@ -61,26 +65,40 @@ class AnthropicMessagesHandler:
@client
async def anthropic_messages(
- api_key: str,
+ max_tokens: int,
+ messages: List[Dict],
model: str,
- stream: bool = False,
+ metadata: Optional[Dict] = None,
+ stop_sequences: Optional[List[str]] = None,
+ stream: Optional[bool] = False,
+ system: Optional[str] = None,
+ temperature: Optional[float] = None,
+ thinking: Optional[Dict] = None,
+ tool_choice: Optional[Dict] = None,
+ tools: Optional[List[Dict]] = None,
+ top_k: Optional[int] = None,
+ top_p: Optional[float] = None,
+ api_key: Optional[str] = None,
api_base: Optional[str] = None,
client: Optional[AsyncHTTPHandler] = None,
custom_llm_provider: Optional[str] = None,
**kwargs,
-) -> Union[Dict[str, Any], AsyncIterator]:
+) -> Union[AnthropicMessagesResponse, AsyncIterator]:
"""
Makes Anthropic `/v1/messages` API calls In the Anthropic API Spec
"""
# Use provided client or create a new one
optional_params = GenericLiteLLMParams(**kwargs)
- model, _custom_llm_provider, dynamic_api_key, dynamic_api_base = (
- litellm.get_llm_provider(
- model=model,
- custom_llm_provider=custom_llm_provider,
- api_base=optional_params.api_base,
- api_key=optional_params.api_key,
- )
+ (
+ model,
+ _custom_llm_provider,
+ dynamic_api_key,
+ dynamic_api_base,
+ ) = litellm.get_llm_provider(
+ model=model,
+ custom_llm_provider=custom_llm_provider,
+ api_base=optional_params.api_base,
+ api_key=optional_params.api_key,
)
anthropic_messages_provider_config: Optional[BaseAnthropicMessagesConfig] = (
ProviderConfigManager.get_provider_anthropic_messages_config(
@@ -127,10 +145,8 @@ async def anthropic_messages(
},
custom_llm_provider=_custom_llm_provider,
)
- litellm_logging_obj.model_call_details.update(kwargs)
-
# Prepare request body
- request_body = kwargs.copy()
+ request_body = locals().copy()
request_body = {
k: v
for k, v in request_body.items()
@@ -138,10 +154,12 @@ async def anthropic_messages(
in anthropic_messages_provider_config.get_supported_anthropic_messages_params(
model=model
)
+ and v is not None
}
request_body["stream"] = stream
request_body["model"] = model
litellm_logging_obj.stream = stream
+ litellm_logging_obj.model_call_details.update(request_body)
# Make the request
request_url = anthropic_messages_provider_config.get_complete_url(
@@ -162,7 +180,7 @@ async def anthropic_messages(
url=request_url,
headers=headers,
data=json.dumps(request_body),
- stream=stream,
+ stream=stream or False,
)
response.raise_for_status()
diff --git a/litellm/llms/azure/assistants.py b/litellm/llms/azure/assistants.py
index 1328eb1fea..271cd698e7 100644
--- a/litellm/llms/azure/assistants.py
+++ b/litellm/llms/azure/assistants.py
@@ -43,6 +43,7 @@ class AzureAssistantsAPI(BaseAzureLLM):
api_base=api_base,
model_name="",
api_version=api_version,
+ is_async=False,
)
azure_openai_client = AzureOpenAI(**azure_client_params) # type: ignore
else:
@@ -68,6 +69,7 @@ class AzureAssistantsAPI(BaseAzureLLM):
api_base=api_base,
model_name="",
api_version=api_version,
+ is_async=True,
)
azure_openai_client = AsyncAzureOpenAI(**azure_client_params)
@@ -286,6 +288,7 @@ class AzureAssistantsAPI(BaseAzureLLM):
timeout=timeout,
max_retries=max_retries,
client=client,
+ litellm_params=litellm_params,
)
thread_message: OpenAIMessage = openai_client.beta.threads.messages.create( # type: ignore
diff --git a/litellm/llms/azure/audio_transcriptions.py b/litellm/llms/azure/audio_transcriptions.py
index 52a3d780fb..be7d0fa30d 100644
--- a/litellm/llms/azure/audio_transcriptions.py
+++ b/litellm/llms/azure/audio_transcriptions.py
@@ -1,10 +1,9 @@
import uuid
-from typing import Any, Optional
+from typing import Any, Coroutine, Optional, Union
from openai import AsyncAzureOpenAI, AzureOpenAI
from pydantic import BaseModel
-import litellm
from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_name
from litellm.types.utils import FileTypes
from litellm.utils import (
@@ -14,6 +13,7 @@ from litellm.utils import (
)
from .azure import AzureChatCompletion
+from .common_utils import AzureOpenAIError
class AzureAudioTranscription(AzureChatCompletion):
@@ -33,20 +33,11 @@ class AzureAudioTranscription(AzureChatCompletion):
azure_ad_token: Optional[str] = None,
atranscription: bool = False,
litellm_params: Optional[dict] = None,
- ) -> TranscriptionResponse:
+ ) -> Union[TranscriptionResponse, Coroutine[Any, Any, TranscriptionResponse]]:
data = {"model": model, "file": audio_file, **optional_params}
- # init AzureOpenAI Client
- azure_client_params = self.initialize_azure_sdk_client(
- litellm_params=litellm_params or {},
- api_key=api_key,
- model_name=model,
- api_version=api_version,
- api_base=api_base,
- )
-
if atranscription is True:
- return self.async_audio_transcriptions( # type: ignore
+ return self.async_audio_transcriptions(
audio_file=audio_file,
data=data,
model_response=model_response,
@@ -54,14 +45,26 @@ class AzureAudioTranscription(AzureChatCompletion):
api_key=api_key,
api_base=api_base,
client=client,
- azure_client_params=azure_client_params,
max_retries=max_retries,
logging_obj=logging_obj,
+ model=model,
+ litellm_params=litellm_params,
+ )
+
+ azure_client = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ _is_async=False,
+ client=client,
+ litellm_params=litellm_params,
+ )
+ if not isinstance(azure_client, AzureOpenAI):
+ raise AzureOpenAIError(
+ status_code=500,
+ message="azure_client is not an instance of AzureOpenAI",
)
- if client is None:
- azure_client = AzureOpenAI(http_client=litellm.client_session, **azure_client_params) # type: ignore
- else:
- azure_client = client
## LOGGING
logging_obj.pre_call(
@@ -98,24 +101,34 @@ class AzureAudioTranscription(AzureChatCompletion):
async def async_audio_transcriptions(
self,
audio_file: FileTypes,
+ model: str,
data: dict,
model_response: TranscriptionResponse,
timeout: float,
- azure_client_params: dict,
logging_obj: Any,
+ api_version: Optional[str] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
client=None,
max_retries=None,
- ):
+ litellm_params: Optional[dict] = None,
+ ) -> TranscriptionResponse:
response = None
try:
- if client is None:
- async_azure_client = AsyncAzureOpenAI(
- **azure_client_params,
+ async_azure_client = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ _is_async=True,
+ client=client,
+ litellm_params=litellm_params,
+ )
+ if not isinstance(async_azure_client, AsyncAzureOpenAI):
+ raise AzureOpenAIError(
+ status_code=500,
+ message="async_azure_client is not an instance of AsyncAzureOpenAI",
)
- else:
- async_azure_client = client
## LOGGING
logging_obj.pre_call(
@@ -168,7 +181,12 @@ class AzureAudioTranscription(AzureChatCompletion):
model_response_object=model_response,
hidden_params=hidden_params,
response_type="audio_transcription",
- ) # type: ignore
+ )
+ if not isinstance(response, TranscriptionResponse):
+ raise AzureOpenAIError(
+ status_code=500,
+ message="response is not an instance of TranscriptionResponse",
+ )
return response
except Exception as e:
## LOGGING
diff --git a/litellm/llms/azure/azure.py b/litellm/llms/azure/azure.py
index 7fba70141c..bb60680ebc 100644
--- a/litellm/llms/azure/azure.py
+++ b/litellm/llms/azure/azure.py
@@ -1,13 +1,13 @@
import asyncio
import json
import time
-from typing import Any, Callable, Dict, List, Literal, Optional, Union
+from typing import Any, Callable, Coroutine, Dict, List, Optional, Union
import httpx # type: ignore
from openai import APITimeoutError, AsyncAzureOpenAI, AzureOpenAI
import litellm
-from litellm.constants import DEFAULT_MAX_RETRIES
+from litellm.constants import AZURE_OPERATION_POLLING_TIMEOUT, DEFAULT_MAX_RETRIES
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.litellm_core_utils.logging_utils import track_llm_api_timing
from litellm.llms.custom_httpx.http_handler import (
@@ -141,41 +141,6 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
return headers
- def _get_sync_azure_client(
- self,
- api_version: Optional[str],
- api_base: Optional[str],
- api_key: Optional[str],
- azure_ad_token: Optional[str],
- azure_ad_token_provider: Optional[Callable],
- model: str,
- max_retries: int,
- timeout: Union[float, httpx.Timeout],
- client: Optional[Any],
- client_type: Literal["sync", "async"],
- litellm_params: Optional[dict] = None,
- ):
- # init AzureOpenAI Client
- azure_client_params: Dict[str, Any] = self.initialize_azure_sdk_client(
- litellm_params=litellm_params or {},
- api_key=api_key,
- model_name=model,
- api_version=api_version,
- api_base=api_base,
- )
- if client is None:
- if client_type == "sync":
- azure_client = AzureOpenAI(**azure_client_params) # type: ignore
- elif client_type == "async":
- azure_client = AsyncAzureOpenAI(**azure_client_params) # type: ignore
- else:
- azure_client = client
- if api_version is not None and isinstance(azure_client._custom_query, dict):
- # set api_version to version passed by user
- azure_client._custom_query.setdefault("api-version", api_version)
-
- return azure_client
-
def make_sync_azure_openai_chat_completion_request(
self,
azure_client: AzureOpenAI,
@@ -263,47 +228,21 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
max_retries = DEFAULT_MAX_RETRIES
json_mode: Optional[bool] = optional_params.pop("json_mode", False)
- azure_client_params = self.initialize_azure_sdk_client(
- litellm_params=litellm_params or {},
- api_key=api_key,
- api_base=api_base,
- model_name=model,
- api_version=api_version,
- )
### CHECK IF CLOUDFLARE AI GATEWAY ###
### if so - set the model as part of the base url
if "gateway.ai.cloudflare.com" in api_base:
- ## build base url - assume api base includes resource name
- if client is None:
- if not api_base.endswith("/"):
- api_base += "/"
- api_base += f"{model}"
-
- azure_client_params = {
- "api_version": api_version,
- "base_url": f"{api_base}",
- "http_client": litellm.client_session,
- "max_retries": max_retries,
- "timeout": timeout,
- }
- if api_key is not None:
- azure_client_params["api_key"] = api_key
- elif azure_ad_token is not None:
- if azure_ad_token.startswith("oidc/"):
- azure_ad_token = get_azure_ad_token_from_oidc(
- azure_ad_token
- )
-
- azure_client_params["azure_ad_token"] = azure_ad_token
- elif azure_ad_token_provider is not None:
- azure_client_params["azure_ad_token_provider"] = (
- azure_ad_token_provider
- )
-
- if acompletion is True:
- client = AsyncAzureOpenAI(**azure_client_params)
- else:
- client = AzureOpenAI(**azure_client_params)
+ client = self._init_azure_client_for_cloudflare_ai_gateway(
+ api_base=api_base,
+ model=model,
+ api_version=api_version,
+ max_retries=max_retries,
+ timeout=timeout,
+ api_key=api_key,
+ azure_ad_token=azure_ad_token,
+ azure_ad_token_provider=azure_ad_token_provider,
+ acompletion=acompletion,
+ client=client,
+ )
data = {"model": None, "messages": messages, **optional_params}
else:
@@ -330,7 +269,7 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
timeout=timeout,
client=client,
max_retries=max_retries,
- azure_client_params=azure_client_params,
+ litellm_params=litellm_params,
)
else:
return self.acompletion(
@@ -348,7 +287,7 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
logging_obj=logging_obj,
max_retries=max_retries,
convert_tool_call_to_json_mode=json_mode,
- azure_client_params=azure_client_params,
+ litellm_params=litellm_params,
)
elif "stream" in optional_params and optional_params["stream"] is True:
return self.streaming(
@@ -364,6 +303,7 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
timeout=timeout,
client=client,
max_retries=max_retries,
+ litellm_params=litellm_params,
)
else:
## LOGGING
@@ -385,21 +325,15 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
status_code=422, message="max retries must be an int"
)
# init AzureOpenAI Client
- if (
- client is None
- or not isinstance(client, AzureOpenAI)
- or dynamic_params
- ):
- azure_client = AzureOpenAI(**azure_client_params)
- else:
- azure_client = client
- if api_version is not None and isinstance(
- azure_client._custom_query, dict
- ):
- # set api_version to version passed by user
- azure_client._custom_query.setdefault(
- "api-version", api_version
- )
+ azure_client = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ client=client,
+ _is_async=False,
+ litellm_params=litellm_params,
+ )
if not isinstance(azure_client, AzureOpenAI):
raise AzureOpenAIError(
status_code=500,
@@ -459,16 +393,22 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
azure_ad_token_provider: Optional[Callable] = None,
convert_tool_call_to_json_mode: Optional[bool] = None,
client=None, # this is the AsyncAzureOpenAI
- azure_client_params: dict = {},
+ litellm_params: Optional[dict] = {},
):
response = None
try:
# setting Azure client
- if client is None or dynamic_params:
- azure_client = AsyncAzureOpenAI(**azure_client_params)
- else:
- azure_client = client
-
+ azure_client = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ client=client,
+ _is_async=True,
+ litellm_params=litellm_params,
+ )
+ if not isinstance(azure_client, AsyncAzureOpenAI):
+ raise ValueError("Azure client is not an instance of AsyncAzureOpenAI")
## LOGGING
logging_obj.pre_call(
input=data["messages"],
@@ -554,6 +494,7 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
azure_ad_token: Optional[str] = None,
azure_ad_token_provider: Optional[Callable] = None,
client=None,
+ litellm_params: Optional[dict] = {},
):
# init AzureOpenAI Client
azure_client_params = {
@@ -576,10 +517,20 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
elif azure_ad_token_provider is not None:
azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider
- if client is None or dynamic_params:
- azure_client = AzureOpenAI(**azure_client_params)
- else:
- azure_client = client
+ azure_client = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ client=client,
+ _is_async=False,
+ litellm_params=litellm_params,
+ )
+ if not isinstance(azure_client, AzureOpenAI):
+ raise AzureOpenAIError(
+ status_code=500,
+ message="azure_client is not an instance of AzureOpenAI",
+ )
## LOGGING
logging_obj.pre_call(
input=data["messages"],
@@ -621,13 +572,21 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
azure_ad_token: Optional[str] = None,
azure_ad_token_provider: Optional[Callable] = None,
client=None,
- azure_client_params: dict = {},
+ litellm_params: Optional[dict] = {},
):
try:
- if client is None or dynamic_params:
- azure_client = AsyncAzureOpenAI(**azure_client_params)
- else:
- azure_client = client
+ azure_client = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ client=client,
+ _is_async=True,
+ litellm_params=litellm_params,
+ )
+ if not isinstance(azure_client, AsyncAzureOpenAI):
+ raise ValueError("Azure client is not an instance of AsyncAzureOpenAI")
+
## LOGGING
logging_obj.pre_call(
input=data["messages"],
@@ -678,22 +637,35 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
async def aembedding(
self,
+ model: str,
data: dict,
model_response: EmbeddingResponse,
- azure_client_params: dict,
input: list,
logging_obj: LiteLLMLoggingObj,
+ api_base: str,
api_key: Optional[str] = None,
+ api_version: Optional[str] = None,
client: Optional[AsyncAzureOpenAI] = None,
- timeout=None,
- ):
+ timeout: Optional[Union[float, httpx.Timeout]] = None,
+ max_retries: Optional[int] = None,
+ azure_ad_token: Optional[str] = None,
+ azure_ad_token_provider: Optional[Callable] = None,
+ litellm_params: Optional[dict] = {},
+ ) -> EmbeddingResponse:
response = None
try:
+ openai_aclient = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ _is_async=True,
+ client=client,
+ litellm_params=litellm_params,
+ )
+ if not isinstance(openai_aclient, AsyncAzureOpenAI):
+ raise ValueError("Azure client is not an instance of AsyncAzureOpenAI")
- if client is None:
- openai_aclient = AsyncAzureOpenAI(**azure_client_params)
- else:
- openai_aclient = client
raw_response = await openai_aclient.embeddings.with_raw_response.create(
**data, timeout=timeout
)
@@ -707,13 +679,19 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
additional_args={"complete_input_dict": data},
original_response=stringified_response,
)
- return convert_to_model_response_object(
+ embedding_response = convert_to_model_response_object(
response_object=stringified_response,
model_response_object=model_response,
hidden_params={"headers": headers},
_response_headers=process_azure_headers(headers),
response_type="embedding",
)
+ if not isinstance(embedding_response, EmbeddingResponse):
+ raise AzureOpenAIError(
+ status_code=500,
+ message="embedding_response is not an instance of EmbeddingResponse",
+ )
+ return embedding_response
except Exception as e:
## LOGGING
logging_obj.post_call(
@@ -742,7 +720,7 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
aembedding=None,
headers: Optional[dict] = None,
litellm_params: Optional[dict] = None,
- ) -> EmbeddingResponse:
+ ) -> Union[EmbeddingResponse, Coroutine[Any, Any, EmbeddingResponse]]:
if headers:
optional_params["extra_headers"] = headers
if self._client_session is None:
@@ -751,20 +729,6 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
data = {"model": model, "input": input, **optional_params}
if max_retries is None:
max_retries = litellm.DEFAULT_MAX_RETRIES
- if not isinstance(max_retries, int):
- raise AzureOpenAIError(
- status_code=422, message="max retries must be an int"
- )
-
- # init AzureOpenAI Client
-
- azure_client_params = self.initialize_azure_sdk_client(
- litellm_params=litellm_params or {},
- api_key=api_key,
- model_name=model,
- api_version=api_version,
- api_base=api_base,
- )
## LOGGING
logging_obj.pre_call(
input=input,
@@ -776,20 +740,33 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
)
if aembedding is True:
- return self.aembedding( # type: ignore
+ return self.aembedding(
data=data,
input=input,
+ model=model,
logging_obj=logging_obj,
api_key=api_key,
model_response=model_response,
- azure_client_params=azure_client_params,
timeout=timeout,
client=client,
+ litellm_params=litellm_params,
+ api_base=api_base,
)
- if client is None:
- azure_client = AzureOpenAI(**azure_client_params) # type: ignore
- else:
- azure_client = client
+ azure_client = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ _is_async=False,
+ client=client,
+ litellm_params=litellm_params,
+ )
+ if not isinstance(azure_client, AzureOpenAI):
+ raise AzureOpenAIError(
+ status_code=500,
+ message="azure_client is not an instance of AzureOpenAI",
+ )
+
## COMPLETION CALL
raw_response = azure_client.embeddings.with_raw_response.create(**data, timeout=timeout) # type: ignore
headers = dict(raw_response.headers)
@@ -857,7 +834,6 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
"2023-10-01-preview",
]
): # CREATE + POLL for azure dall-e-2 calls
-
api_base = modify_url(
original_url=api_base, new_path="/openai/images/generations:submit"
)
@@ -881,7 +857,7 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
await response.aread()
- timeout_secs: int = 120
+ timeout_secs: int = AZURE_OPERATION_POLLING_TIMEOUT
start_time = time.time()
if "status" not in response.json():
raise Exception(
@@ -889,7 +865,6 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
)
while response.json()["status"] not in ["succeeded", "failed"]:
if time.time() - start_time > timeout_secs:
-
raise AzureOpenAIError(
status_code=408, message="Operation polling timed out."
)
@@ -957,7 +932,6 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
"2023-10-01-preview",
]
): # CREATE + POLL for azure dall-e-2 calls
-
api_base = modify_url(
original_url=api_base, new_path="/openai/images/generations:submit"
)
@@ -981,7 +955,7 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
response.read()
- timeout_secs: int = 120
+ timeout_secs: int = AZURE_OPERATION_POLLING_TIMEOUT
start_time = time.time()
if "status" not in response.json():
raise Exception(
@@ -1155,6 +1129,7 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
model_name=model or "",
api_version=api_version,
api_base=api_base,
+ is_async=False,
)
if aimg_generation is True:
return self.aimage_generation(data=data, input=input, logging_obj=logging_obj, model_response=model_response, api_key=api_key, client=client, azure_client_params=azure_client_params, timeout=timeout, headers=headers) # type: ignore
@@ -1220,7 +1195,6 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
client=None,
litellm_params: Optional[dict] = None,
) -> HttpxBinaryResponseContent:
-
max_retries = optional_params.pop("max_retries", 2)
if aspeech is not None and aspeech is True:
@@ -1240,17 +1214,13 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
litellm_params=litellm_params,
) # type: ignore
- azure_client: AzureOpenAI = self._get_sync_azure_client(
+ azure_client: AzureOpenAI = self.get_azure_openai_client(
api_base=api_base,
api_version=api_version,
api_key=api_key,
- azure_ad_token=azure_ad_token,
- azure_ad_token_provider=azure_ad_token_provider,
model=model,
- max_retries=max_retries,
- timeout=timeout,
+ _is_async=False,
client=client,
- client_type="sync",
litellm_params=litellm_params,
) # type: ignore
@@ -1278,18 +1248,13 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
client=None,
litellm_params: Optional[dict] = None,
) -> HttpxBinaryResponseContent:
-
- azure_client: AsyncAzureOpenAI = self._get_sync_azure_client(
+ azure_client: AsyncAzureOpenAI = self.get_azure_openai_client(
api_base=api_base,
api_version=api_version,
api_key=api_key,
- azure_ad_token=azure_ad_token,
- azure_ad_token_provider=azure_ad_token_provider,
model=model,
- max_retries=max_retries,
- timeout=timeout,
+ _is_async=True,
client=client,
- client_type="async",
litellm_params=litellm_params,
) # type: ignore
diff --git a/litellm/llms/azure/batches/handler.py b/litellm/llms/azure/batches/handler.py
index 1b93c526d5..7fc6388ba8 100644
--- a/litellm/llms/azure/batches/handler.py
+++ b/litellm/llms/azure/batches/handler.py
@@ -50,15 +50,15 @@ class AzureBatchesAPI(BaseAzureLLM):
client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None,
litellm_params: Optional[dict] = None,
) -> Union[LiteLLMBatch, Coroutine[Any, Any, LiteLLMBatch]]:
- azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = (
- self.get_azure_openai_client(
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- client=client,
- _is_async=_is_async,
- litellm_params=litellm_params or {},
- )
+ azure_client: Optional[
+ Union[AzureOpenAI, AsyncAzureOpenAI]
+ ] = self.get_azure_openai_client(
+ api_key=api_key,
+ api_base=api_base,
+ api_version=api_version,
+ client=client,
+ _is_async=_is_async,
+ litellm_params=litellm_params or {},
)
if azure_client is None:
raise ValueError(
@@ -96,15 +96,15 @@ class AzureBatchesAPI(BaseAzureLLM):
client: Optional[AzureOpenAI] = None,
litellm_params: Optional[dict] = None,
):
- azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = (
- self.get_azure_openai_client(
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- client=client,
- _is_async=_is_async,
- litellm_params=litellm_params or {},
- )
+ azure_client: Optional[
+ Union[AzureOpenAI, AsyncAzureOpenAI]
+ ] = self.get_azure_openai_client(
+ api_key=api_key,
+ api_base=api_base,
+ api_version=api_version,
+ client=client,
+ _is_async=_is_async,
+ litellm_params=litellm_params or {},
)
if azure_client is None:
raise ValueError(
@@ -144,15 +144,15 @@ class AzureBatchesAPI(BaseAzureLLM):
client: Optional[AzureOpenAI] = None,
litellm_params: Optional[dict] = None,
):
- azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = (
- self.get_azure_openai_client(
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- client=client,
- _is_async=_is_async,
- litellm_params=litellm_params or {},
- )
+ azure_client: Optional[
+ Union[AzureOpenAI, AsyncAzureOpenAI]
+ ] = self.get_azure_openai_client(
+ api_key=api_key,
+ api_base=api_base,
+ api_version=api_version,
+ client=client,
+ _is_async=_is_async,
+ litellm_params=litellm_params or {},
)
if azure_client is None:
raise ValueError(
@@ -183,15 +183,15 @@ class AzureBatchesAPI(BaseAzureLLM):
client: Optional[AzureOpenAI] = None,
litellm_params: Optional[dict] = None,
):
- azure_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = (
- self.get_azure_openai_client(
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- client=client,
- _is_async=_is_async,
- litellm_params=litellm_params or {},
- )
+ azure_client: Optional[
+ Union[AzureOpenAI, AsyncAzureOpenAI]
+ ] = self.get_azure_openai_client(
+ api_key=api_key,
+ api_base=api_base,
+ api_version=api_version,
+ client=client,
+ _is_async=_is_async,
+ litellm_params=litellm_params or {},
)
if azure_client is None:
raise ValueError(
diff --git a/litellm/llms/azure/chat/gpt_transformation.py b/litellm/llms/azure/chat/gpt_transformation.py
index 7aa4fffab5..238566faf7 100644
--- a/litellm/llms/azure/chat/gpt_transformation.py
+++ b/litellm/llms/azure/chat/gpt_transformation.py
@@ -7,6 +7,10 @@ from litellm.litellm_core_utils.prompt_templates.factory import (
convert_to_azure_openai_messages,
)
from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.types.llms.azure import (
+ API_VERSION_MONTH_SUPPORTED_RESPONSE_FORMAT,
+ API_VERSION_YEAR_SUPPORTED_RESPONSE_FORMAT,
+)
from litellm.types.utils import ModelResponse
from litellm.utils import supports_response_schema
@@ -99,6 +103,8 @@ class AzureOpenAIConfig(BaseConfig):
"extra_headers",
"parallel_tool_calls",
"prediction",
+ "modalities",
+ "audio",
]
def _is_response_format_supported_model(self, model: str) -> bool:
@@ -119,11 +125,22 @@ class AzureOpenAIConfig(BaseConfig):
) -> bool:
"""
- check if api_version is supported for response_format
+ - returns True if the API version is equal to or newer than the supported version
"""
+ api_year = int(api_version_year)
+ api_month = int(api_version_month)
+ supported_year = int(API_VERSION_YEAR_SUPPORTED_RESPONSE_FORMAT)
+ supported_month = int(API_VERSION_MONTH_SUPPORTED_RESPONSE_FORMAT)
- is_supported = int(api_version_year) <= 2024 and int(api_version_month) >= 8
-
- return is_supported
+ # If the year is greater than supported year, it's definitely supported
+ if api_year > supported_year:
+ return True
+ # If the year is less than supported year, it's not supported
+ elif api_year < supported_year:
+ return False
+ # If same year, check if month is >= supported month
+ else:
+ return api_month >= supported_month
def map_openai_params(
self,
@@ -193,6 +210,7 @@ class AzureOpenAIConfig(BaseConfig):
is_response_format_supported_api_version
and _is_response_format_supported_model
)
+
optional_params = self._add_response_format_to_tools(
optional_params=optional_params,
value=value,
@@ -284,6 +302,7 @@ class AzureOpenAIConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/azure/chat/o_series_transformation.py b/litellm/llms/azure/chat/o_series_transformation.py
index 0ca3a28d23..69fb941ca5 100644
--- a/litellm/llms/azure/chat/o_series_transformation.py
+++ b/litellm/llms/azure/chat/o_series_transformation.py
@@ -14,6 +14,7 @@ Translations handled by LiteLLM:
from typing import List, Optional
+import litellm
from litellm import verbose_logger
from litellm.types.llms.openai import AllMessageValues
from litellm.utils import get_model_info
@@ -22,6 +23,27 @@ from ...openai.chat.o_series_transformation import OpenAIOSeriesConfig
class AzureOpenAIO1Config(OpenAIOSeriesConfig):
+ def get_supported_openai_params(self, model: str) -> list:
+ """
+ Get the supported OpenAI params for the Azure O-Series models
+ """
+ all_openai_params = litellm.OpenAIGPTConfig().get_supported_openai_params(
+ model=model
+ )
+ non_supported_params = [
+ "logprobs",
+ "top_p",
+ "presence_penalty",
+ "frequency_penalty",
+ "top_logprobs",
+ ]
+
+ o_series_only_param = ["reasoning_effort"]
+ all_openai_params.extend(o_series_only_param)
+ return [
+ param for param in all_openai_params if param not in non_supported_params
+ ]
+
def should_fake_stream(
self,
model: Optional[str],
@@ -57,7 +79,7 @@ class AzureOpenAIO1Config(OpenAIOSeriesConfig):
return True
def is_o_series_model(self, model: str) -> bool:
- return "o1" in model or "o3" in model or "o_series/" in model
+ return "o1" in model or "o3" in model or "o4" in model or "o_series/" in model
def transform_request(
self,
diff --git a/litellm/llms/azure/common_utils.py b/litellm/llms/azure/common_utils.py
index 909fcd88a5..012f47c851 100644
--- a/litellm/llms/azure/common_utils.py
+++ b/litellm/llms/azure/common_utils.py
@@ -1,6 +1,6 @@
import json
import os
-from typing import Callable, Optional, Union
+from typing import Any, Callable, Dict, Optional, Union
import httpx
from openai import AsyncAzureOpenAI, AzureOpenAI
@@ -9,6 +9,7 @@ import litellm
from litellm._logging import verbose_logger
from litellm.caching.caching import DualCache
from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.llms.openai.common_utils import BaseOpenAILLM
from litellm.secret_managers.get_azure_ad_token_provider import (
get_azure_ad_token_provider,
)
@@ -60,7 +61,7 @@ def process_azure_headers(headers: Union[httpx.Headers, dict]) -> dict:
return {**llm_response_headers, **openai_headers}
-def get_azure_ad_token_from_entrata_id(
+def get_azure_ad_token_from_entra_id(
tenant_id: str,
client_id: str,
client_secret: str,
@@ -80,7 +81,7 @@ def get_azure_ad_token_from_entrata_id(
"""
from azure.identity import ClientSecretCredential, get_bearer_token_provider
- verbose_logger.debug("Getting Azure AD Token from Entrata ID")
+ verbose_logger.debug("Getting Azure AD Token from Entra ID")
if tenant_id.startswith("os.environ/"):
_tenant_id = get_secret_str(tenant_id)
@@ -244,24 +245,37 @@ def select_azure_base_url_or_endpoint(azure_client_params: dict):
return azure_client_params
-class BaseAzureLLM:
+class BaseAzureLLM(BaseOpenAILLM):
def get_azure_openai_client(
self,
- litellm_params: dict,
api_key: Optional[str],
api_base: Optional[str],
api_version: Optional[str] = None,
client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None,
+ litellm_params: Optional[dict] = None,
_is_async: bool = False,
+ model: Optional[str] = None,
) -> Optional[Union[AzureOpenAI, AsyncAzureOpenAI]]:
openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None
+ client_initialization_params: dict = locals()
if client is None:
+ cached_client = self.get_cached_openai_client(
+ client_initialization_params=client_initialization_params,
+ client_type="azure",
+ )
+ if cached_client:
+ if isinstance(cached_client, AzureOpenAI) or isinstance(
+ cached_client, AsyncAzureOpenAI
+ ):
+ return cached_client
+
azure_client_params = self.initialize_azure_sdk_client(
- litellm_params=litellm_params,
+ litellm_params=litellm_params or {},
api_key=api_key,
api_base=api_base,
- model_name="",
+ model_name=model,
api_version=api_version,
+ is_async=_is_async,
)
if _is_async is True:
openai_client = AsyncAzureOpenAI(**azure_client_params)
@@ -269,7 +283,18 @@ class BaseAzureLLM:
openai_client = AzureOpenAI(**azure_client_params) # type: ignore
else:
openai_client = client
+ if api_version is not None and isinstance(
+ openai_client._custom_query, dict
+ ):
+ # set api_version to version passed by user
+ openai_client._custom_query.setdefault("api-version", api_version)
+ # save client in-memory cache
+ self.set_cached_openai_client(
+ openai_client=openai_client,
+ client_initialization_params=client_initialization_params,
+ client_type="azure",
+ )
return openai_client
def initialize_azure_sdk_client(
@@ -277,28 +302,37 @@ class BaseAzureLLM:
litellm_params: dict,
api_key: Optional[str],
api_base: Optional[str],
- model_name: str,
+ model_name: Optional[str],
api_version: Optional[str],
+ is_async: bool,
) -> dict:
-
azure_ad_token_provider: Optional[Callable[[], str]] = None
# If we have api_key, then we have higher priority
azure_ad_token = litellm_params.get("azure_ad_token")
- tenant_id = litellm_params.get("tenant_id")
- client_id = litellm_params.get("client_id")
- client_secret = litellm_params.get("client_secret")
- azure_username = litellm_params.get("azure_username")
- azure_password = litellm_params.get("azure_password")
+ tenant_id = litellm_params.get("tenant_id", os.getenv("AZURE_TENANT_ID"))
+ client_id = litellm_params.get("client_id", os.getenv("AZURE_CLIENT_ID"))
+ client_secret = litellm_params.get(
+ "client_secret", os.getenv("AZURE_CLIENT_SECRET")
+ )
+ azure_username = litellm_params.get(
+ "azure_username", os.getenv("AZURE_USERNAME")
+ )
+ azure_password = litellm_params.get(
+ "azure_password", os.getenv("AZURE_PASSWORD")
+ )
max_retries = litellm_params.get("max_retries")
timeout = litellm_params.get("timeout")
if not api_key and tenant_id and client_id and client_secret:
- verbose_logger.debug("Using Azure AD Token Provider for Azure Auth")
- azure_ad_token_provider = get_azure_ad_token_from_entrata_id(
+ verbose_logger.debug(
+ "Using Azure AD Token Provider from Entra ID for Azure Auth"
+ )
+ azure_ad_token_provider = get_azure_ad_token_from_entra_id(
tenant_id=tenant_id,
client_id=client_id,
client_secret=client_secret,
)
if azure_username and azure_password and client_id:
+ verbose_logger.debug("Using Azure Username and Password for Azure Auth")
azure_ad_token_provider = get_azure_ad_token_from_username_password(
azure_username=azure_username,
azure_password=azure_password,
@@ -306,12 +340,16 @@ class BaseAzureLLM:
)
if azure_ad_token is not None and azure_ad_token.startswith("oidc/"):
+ verbose_logger.debug("Using Azure OIDC Token for Azure Auth")
azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token)
elif (
not api_key
and azure_ad_token_provider is None
and litellm.enable_azure_ad_token_refresh is True
):
+ verbose_logger.debug(
+ "Using Azure AD token provider based on Service Principal with Secret workflow for Azure Auth"
+ )
try:
azure_ad_token_provider = get_azure_ad_token_provider()
except ValueError:
@@ -334,8 +372,13 @@ class BaseAzureLLM:
"api_version": api_version,
"azure_ad_token": azure_ad_token,
"azure_ad_token_provider": azure_ad_token_provider,
- "http_client": litellm.client_session,
}
+ # init http client + SSL Verification settings
+ if is_async is True:
+ azure_client_params["http_client"] = self._get_async_http_client()
+ else:
+ azure_client_params["http_client"] = self._get_sync_http_client()
+
if max_retries is not None:
azure_client_params["max_retries"] = max_retries
if timeout is not None:
@@ -351,3 +394,45 @@ class BaseAzureLLM:
)
return azure_client_params
+
+ def _init_azure_client_for_cloudflare_ai_gateway(
+ self,
+ api_base: str,
+ model: str,
+ api_version: str,
+ max_retries: int,
+ timeout: Union[float, httpx.Timeout],
+ api_key: Optional[str],
+ azure_ad_token: Optional[str],
+ azure_ad_token_provider: Optional[Callable[[], str]],
+ acompletion: bool,
+ client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None,
+ ) -> Union[AzureOpenAI, AsyncAzureOpenAI]:
+ ## build base url - assume api base includes resource name
+ if client is None:
+ if not api_base.endswith("/"):
+ api_base += "/"
+ api_base += f"{model}"
+
+ azure_client_params: Dict[str, Any] = {
+ "api_version": api_version,
+ "base_url": f"{api_base}",
+ "http_client": litellm.client_session,
+ "max_retries": max_retries,
+ "timeout": timeout,
+ }
+ if api_key is not None:
+ azure_client_params["api_key"] = api_key
+ elif azure_ad_token is not None:
+ if azure_ad_token.startswith("oidc/"):
+ azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token)
+
+ azure_client_params["azure_ad_token"] = azure_ad_token
+ if azure_ad_token_provider is not None:
+ azure_client_params["azure_ad_token_provider"] = azure_ad_token_provider
+
+ if acompletion is True:
+ client = AsyncAzureOpenAI(**azure_client_params) # type: ignore
+ else:
+ client = AzureOpenAI(**azure_client_params) # type: ignore
+ return client
diff --git a/litellm/llms/azure/completion/handler.py b/litellm/llms/azure/completion/handler.py
index 4ec5c435da..8301c4d617 100644
--- a/litellm/llms/azure/completion/handler.py
+++ b/litellm/llms/azure/completion/handler.py
@@ -2,7 +2,6 @@ from typing import Any, Callable, Optional
from openai import AsyncAzureOpenAI, AzureOpenAI
-import litellm
from litellm.litellm_core_utils.prompt_templates.factory import prompt_factory
from litellm.utils import CustomStreamWrapper, ModelResponse, TextCompletionResponse
@@ -12,18 +11,6 @@ from ..common_utils import AzureOpenAIError, BaseAzureLLM
openai_text_completion_config = OpenAITextCompletionConfig()
-def select_azure_base_url_or_endpoint(azure_client_params: dict):
- azure_endpoint = azure_client_params.get("azure_endpoint", None)
- if azure_endpoint is not None:
- # see : https://github.com/openai/openai-python/blob/3d61ed42aba652b547029095a7eb269ad4e1e957/src/openai/lib/azure.py#L192
- if "/openai/deployments" in azure_endpoint:
- # this is base_url, not an azure_endpoint
- azure_client_params["base_url"] = azure_endpoint
- azure_client_params.pop("azure_endpoint")
-
- return azure_client_params
-
-
class AzureTextCompletion(BaseAzureLLM):
def __init__(self) -> None:
super().__init__()
@@ -70,39 +57,22 @@ class AzureTextCompletion(BaseAzureLLM):
messages=messages, model=model, custom_llm_provider="azure_text"
)
- azure_client_params = self.initialize_azure_sdk_client(
- litellm_params=litellm_params or {},
- api_key=api_key,
- model_name=model,
- api_version=api_version,
- api_base=api_base,
- )
-
### CHECK IF CLOUDFLARE AI GATEWAY ###
### if so - set the model as part of the base url
if "gateway.ai.cloudflare.com" in api_base:
## build base url - assume api base includes resource name
- if client is None:
- if not api_base.endswith("/"):
- api_base += "/"
- api_base += f"{model}"
-
- azure_client_params = {
- "api_version": api_version,
- "base_url": f"{api_base}",
- "http_client": litellm.client_session,
- "max_retries": max_retries,
- "timeout": timeout,
- }
- if api_key is not None:
- azure_client_params["api_key"] = api_key
- elif azure_ad_token is not None:
- azure_client_params["azure_ad_token"] = azure_ad_token
-
- if acompletion is True:
- client = AsyncAzureOpenAI(**azure_client_params)
- else:
- client = AzureOpenAI(**azure_client_params)
+ client = self._init_azure_client_for_cloudflare_ai_gateway(
+ api_key=api_key,
+ api_version=api_version,
+ api_base=api_base,
+ model=model,
+ client=client,
+ max_retries=max_retries,
+ timeout=timeout,
+ azure_ad_token=azure_ad_token,
+ azure_ad_token_provider=azure_ad_token_provider,
+ acompletion=acompletion,
+ )
data = {"model": None, "prompt": prompt, **optional_params}
else:
@@ -124,7 +94,7 @@ class AzureTextCompletion(BaseAzureLLM):
azure_ad_token=azure_ad_token,
timeout=timeout,
client=client,
- azure_client_params=azure_client_params,
+ litellm_params=litellm_params,
)
else:
return self.acompletion(
@@ -139,7 +109,7 @@ class AzureTextCompletion(BaseAzureLLM):
client=client,
logging_obj=logging_obj,
max_retries=max_retries,
- azure_client_params=azure_client_params,
+ litellm_params=litellm_params,
)
elif "stream" in optional_params and optional_params["stream"] is True:
return self.streaming(
@@ -152,7 +122,6 @@ class AzureTextCompletion(BaseAzureLLM):
azure_ad_token=azure_ad_token,
timeout=timeout,
client=client,
- azure_client_params=azure_client_params,
)
else:
## LOGGING
@@ -174,17 +143,21 @@ class AzureTextCompletion(BaseAzureLLM):
status_code=422, message="max retries must be an int"
)
# init AzureOpenAI Client
- if client is None:
- azure_client = AzureOpenAI(**azure_client_params)
- else:
- azure_client = client
- if api_version is not None and isinstance(
- azure_client._custom_query, dict
- ):
- # set api_version to version passed by user
- azure_client._custom_query.setdefault(
- "api-version", api_version
- )
+ azure_client = self.get_azure_openai_client(
+ api_key=api_key,
+ api_base=api_base,
+ api_version=api_version,
+ client=client,
+ litellm_params=litellm_params,
+ _is_async=False,
+ model=model,
+ )
+
+ if not isinstance(azure_client, AzureOpenAI):
+ raise AzureOpenAIError(
+ status_code=500,
+ message="azure_client is not an instance of AzureOpenAI",
+ )
raw_response = azure_client.completions.with_raw_response.create(
**data, timeout=timeout
@@ -233,21 +206,27 @@ class AzureTextCompletion(BaseAzureLLM):
max_retries: int,
azure_ad_token: Optional[str] = None,
client=None, # this is the AsyncAzureOpenAI
- azure_client_params: dict = {},
+ litellm_params: dict = {},
):
response = None
try:
# init AzureOpenAI Client
# setting Azure client
- if client is None:
- azure_client = AsyncAzureOpenAI(**azure_client_params)
- else:
- azure_client = client
- if api_version is not None and isinstance(
- azure_client._custom_query, dict
- ):
- # set api_version to version passed by user
- azure_client._custom_query.setdefault("api-version", api_version)
+ azure_client = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ _is_async=True,
+ client=client,
+ litellm_params=litellm_params,
+ )
+ if not isinstance(azure_client, AsyncAzureOpenAI):
+ raise AzureOpenAIError(
+ status_code=500,
+ message="azure_client is not an instance of AsyncAzureOpenAI",
+ )
+
## LOGGING
logging_obj.pre_call(
input=data["prompt"],
@@ -290,7 +269,7 @@ class AzureTextCompletion(BaseAzureLLM):
timeout: Any,
azure_ad_token: Optional[str] = None,
client=None,
- azure_client_params: dict = {},
+ litellm_params: dict = {},
):
max_retries = data.pop("max_retries", 2)
if not isinstance(max_retries, int):
@@ -298,13 +277,21 @@ class AzureTextCompletion(BaseAzureLLM):
status_code=422, message="max retries must be an int"
)
# init AzureOpenAI Client
- if client is None:
- azure_client = AzureOpenAI(**azure_client_params)
- else:
- azure_client = client
- if api_version is not None and isinstance(azure_client._custom_query, dict):
- # set api_version to version passed by user
- azure_client._custom_query.setdefault("api-version", api_version)
+ azure_client = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ _is_async=False,
+ client=client,
+ litellm_params=litellm_params,
+ )
+ if not isinstance(azure_client, AzureOpenAI):
+ raise AzureOpenAIError(
+ status_code=500,
+ message="azure_client is not an instance of AzureOpenAI",
+ )
+
## LOGGING
logging_obj.pre_call(
input=data["prompt"],
@@ -339,19 +326,24 @@ class AzureTextCompletion(BaseAzureLLM):
timeout: Any,
azure_ad_token: Optional[str] = None,
client=None,
- azure_client_params: dict = {},
+ litellm_params: dict = {},
):
try:
# init AzureOpenAI Client
- if client is None:
- azure_client = AsyncAzureOpenAI(**azure_client_params)
- else:
- azure_client = client
- if api_version is not None and isinstance(
- azure_client._custom_query, dict
- ):
- # set api_version to version passed by user
- azure_client._custom_query.setdefault("api-version", api_version)
+ azure_client = self.get_azure_openai_client(
+ api_version=api_version,
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ _is_async=True,
+ client=client,
+ litellm_params=litellm_params,
+ )
+ if not isinstance(azure_client, AsyncAzureOpenAI):
+ raise AzureOpenAIError(
+ status_code=500,
+ message="azure_client is not an instance of AsyncAzureOpenAI",
+ )
## LOGGING
logging_obj.pre_call(
input=data["prompt"],
diff --git a/litellm/llms/azure/files/handler.py b/litellm/llms/azure/files/handler.py
index d45ac9a315..50c122ccf2 100644
--- a/litellm/llms/azure/files/handler.py
+++ b/litellm/llms/azure/files/handler.py
@@ -28,11 +28,11 @@ class AzureOpenAIFilesAPI(BaseAzureLLM):
self,
create_file_data: CreateFileRequest,
openai_client: AsyncAzureOpenAI,
- ) -> FileObject:
+ ) -> OpenAIFileObject:
verbose_logger.debug("create_file_data=%s", create_file_data)
response = await openai_client.files.create(**create_file_data)
verbose_logger.debug("create_file_response=%s", response)
- return response
+ return OpenAIFileObject(**response.model_dump())
def create_file(
self,
@@ -45,17 +45,16 @@ class AzureOpenAIFilesAPI(BaseAzureLLM):
max_retries: Optional[int],
client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None,
litellm_params: Optional[dict] = None,
- ) -> Union[FileObject, Coroutine[Any, Any, FileObject]]:
-
- openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = (
- self.get_azure_openai_client(
- litellm_params=litellm_params or {},
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- client=client,
- _is_async=_is_async,
- )
+ ) -> Union[OpenAIFileObject, Coroutine[Any, Any, OpenAIFileObject]]:
+ openai_client: Optional[
+ Union[AzureOpenAI, AsyncAzureOpenAI]
+ ] = self.get_azure_openai_client(
+ litellm_params=litellm_params or {},
+ api_key=api_key,
+ api_base=api_base,
+ api_version=api_version,
+ client=client,
+ _is_async=_is_async,
)
if openai_client is None:
raise ValueError(
@@ -67,11 +66,11 @@ class AzureOpenAIFilesAPI(BaseAzureLLM):
raise ValueError(
"AzureOpenAI client is not an instance of AsyncAzureOpenAI. Make sure you passed an AsyncAzureOpenAI client."
)
- return self.acreate_file( # type: ignore
+ return self.acreate_file(
create_file_data=create_file_data, openai_client=openai_client
)
- response = openai_client.files.create(**create_file_data)
- return response
+ response = cast(AzureOpenAI, openai_client).files.create(**create_file_data)
+ return OpenAIFileObject(**response.model_dump())
async def afile_content(
self,
@@ -95,15 +94,15 @@ class AzureOpenAIFilesAPI(BaseAzureLLM):
) -> Union[
HttpxBinaryResponseContent, Coroutine[Any, Any, HttpxBinaryResponseContent]
]:
- openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = (
- self.get_azure_openai_client(
- litellm_params=litellm_params or {},
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- client=client,
- _is_async=_is_async,
- )
+ openai_client: Optional[
+ Union[AzureOpenAI, AsyncAzureOpenAI]
+ ] = self.get_azure_openai_client(
+ litellm_params=litellm_params or {},
+ api_key=api_key,
+ api_base=api_base,
+ api_version=api_version,
+ client=client,
+ _is_async=_is_async,
)
if openai_client is None:
raise ValueError(
@@ -145,15 +144,15 @@ class AzureOpenAIFilesAPI(BaseAzureLLM):
client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None,
litellm_params: Optional[dict] = None,
):
- openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = (
- self.get_azure_openai_client(
- litellm_params=litellm_params or {},
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- client=client,
- _is_async=_is_async,
- )
+ openai_client: Optional[
+ Union[AzureOpenAI, AsyncAzureOpenAI]
+ ] = self.get_azure_openai_client(
+ litellm_params=litellm_params or {},
+ api_key=api_key,
+ api_base=api_base,
+ api_version=api_version,
+ client=client,
+ _is_async=_is_async,
)
if openai_client is None:
raise ValueError(
@@ -197,15 +196,15 @@ class AzureOpenAIFilesAPI(BaseAzureLLM):
client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None,
litellm_params: Optional[dict] = None,
):
- openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = (
- self.get_azure_openai_client(
- litellm_params=litellm_params or {},
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- client=client,
- _is_async=_is_async,
- )
+ openai_client: Optional[
+ Union[AzureOpenAI, AsyncAzureOpenAI]
+ ] = self.get_azure_openai_client(
+ litellm_params=litellm_params or {},
+ api_key=api_key,
+ api_base=api_base,
+ api_version=api_version,
+ client=client,
+ _is_async=_is_async,
)
if openai_client is None:
raise ValueError(
@@ -251,15 +250,15 @@ class AzureOpenAIFilesAPI(BaseAzureLLM):
client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = None,
litellm_params: Optional[dict] = None,
):
- openai_client: Optional[Union[AzureOpenAI, AsyncAzureOpenAI]] = (
- self.get_azure_openai_client(
- litellm_params=litellm_params or {},
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- client=client,
- _is_async=_is_async,
- )
+ openai_client: Optional[
+ Union[AzureOpenAI, AsyncAzureOpenAI]
+ ] = self.get_azure_openai_client(
+ litellm_params=litellm_params or {},
+ api_key=api_key,
+ api_base=api_base,
+ api_version=api_version,
+ client=client,
+ _is_async=_is_async,
)
if openai_client is None:
raise ValueError(
diff --git a/litellm/llms/azure/fine_tuning/handler.py b/litellm/llms/azure/fine_tuning/handler.py
index 3d7cc336fb..429b834989 100644
--- a/litellm/llms/azure/fine_tuning/handler.py
+++ b/litellm/llms/azure/fine_tuning/handler.py
@@ -25,14 +25,7 @@ class AzureOpenAIFineTuningAPI(OpenAIFineTuningAPI, BaseAzureLLM):
_is_async: bool = False,
api_version: Optional[str] = None,
litellm_params: Optional[dict] = None,
- ) -> Optional[
- Union[
- OpenAI,
- AsyncOpenAI,
- AzureOpenAI,
- AsyncAzureOpenAI,
- ]
- ]:
+ ) -> Optional[Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI,]]:
# Override to use Azure-specific client initialization
if isinstance(client, OpenAI) or isinstance(client, AsyncOpenAI):
client = None
diff --git a/litellm/llms/azure/responses/transformation.py b/litellm/llms/azure/responses/transformation.py
new file mode 100644
index 0000000000..499d21cb0e
--- /dev/null
+++ b/litellm/llms/azure/responses/transformation.py
@@ -0,0 +1,138 @@
+from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, cast
+
+import httpx
+
+import litellm
+from litellm._logging import verbose_logger
+from litellm.llms.openai.responses.transformation import OpenAIResponsesAPIConfig
+from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import *
+from litellm.types.responses.main import *
+from litellm.types.router import GenericLiteLLMParams
+from litellm.utils import _add_path_to_api_base
+
+if TYPE_CHECKING:
+ from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+ LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+ LiteLLMLoggingObj = Any
+
+
+class AzureOpenAIResponsesAPIConfig(OpenAIResponsesAPIConfig):
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ api_key: Optional[str] = None,
+ ) -> dict:
+ api_key = (
+ api_key
+ or litellm.api_key
+ or litellm.azure_key
+ or get_secret_str("AZURE_OPENAI_API_KEY")
+ or get_secret_str("AZURE_API_KEY")
+ )
+
+ headers.update(
+ {
+ "Authorization": f"Bearer {api_key}",
+ }
+ )
+ return headers
+
+ def get_complete_url(
+ self,
+ api_base: Optional[str],
+ litellm_params: dict,
+ ) -> str:
+ """
+ Constructs a complete URL for the API request.
+
+ Args:
+ - api_base: Base URL, e.g.,
+ "https://litellm8397336933.openai.azure.com"
+ OR
+ "https://litellm8397336933.openai.azure.com/openai/responses?api-version=2024-05-01-preview"
+ - model: Model name.
+ - optional_params: Additional query parameters, including "api_version".
+ - stream: If streaming is required (optional).
+
+ Returns:
+ - A complete URL string, e.g.,
+ "https://litellm8397336933.openai.azure.com/openai/responses?api-version=2024-05-01-preview"
+ """
+ api_base = api_base or litellm.api_base or get_secret_str("AZURE_API_BASE")
+ if api_base is None:
+ raise ValueError(
+ f"api_base is required for Azure AI Studio. Please set the api_base parameter. Passed `api_base={api_base}`"
+ )
+ original_url = httpx.URL(api_base)
+
+ # Extract api_version or use default
+ api_version = cast(Optional[str], litellm_params.get("api_version"))
+
+ # Create a new dictionary with existing params
+ query_params = dict(original_url.params)
+
+ # Add api_version if needed
+ if "api-version" not in query_params and api_version:
+ query_params["api-version"] = api_version
+
+ # Add the path to the base URL
+ if "/openai/responses" not in api_base:
+ new_url = _add_path_to_api_base(
+ api_base=api_base, ending_path="/openai/responses"
+ )
+ else:
+ new_url = api_base
+
+ # Use the new query_params dictionary
+ final_url = httpx.URL(new_url).copy_with(params=query_params)
+
+ return str(final_url)
+
+ #########################################################
+ ########## DELETE RESPONSE API TRANSFORMATION ##############
+ #########################################################
+ def transform_delete_response_api_request(
+ self,
+ response_id: str,
+ api_base: str,
+ litellm_params: GenericLiteLLMParams,
+ headers: dict,
+ ) -> Tuple[str, Dict]:
+ """
+ Transform the delete response API request into a URL and data
+
+ Azure OpenAI API expects the following request:
+ - DELETE /openai/responses/{response_id}?api-version=xxx
+
+ This function handles URLs with query parameters by inserting the response_id
+ at the correct location (before any query parameters).
+ """
+ from urllib.parse import urlparse, urlunparse
+
+ # Parse the URL to separate its components
+ parsed_url = urlparse(api_base)
+
+ # Insert the response_id at the end of the path component
+ # Remove trailing slash if present to avoid double slashes
+ path = parsed_url.path.rstrip("/")
+ new_path = f"{path}/{response_id}"
+
+ # Reconstruct the URL with all original components but with the modified path
+ delete_url = urlunparse(
+ (
+ parsed_url.scheme, # http, https
+ parsed_url.netloc, # domain name, port
+ new_path, # path with response_id added
+ parsed_url.params, # parameters
+ parsed_url.query, # query string
+ parsed_url.fragment, # fragment
+ )
+ )
+
+ data: Dict = {}
+ verbose_logger.debug(f"delete response url={delete_url}")
+ return delete_url, data
diff --git a/litellm/llms/azure_ai/chat/transformation.py b/litellm/llms/azure_ai/chat/transformation.py
index 154f345537..1adc56804f 100644
--- a/litellm/llms/azure_ai/chat/transformation.py
+++ b/litellm/llms/azure_ai/chat/transformation.py
@@ -1,3 +1,4 @@
+import enum
from typing import Any, List, Optional, Tuple, cast
from urllib.parse import urlparse
@@ -19,6 +20,10 @@ from litellm.types.utils import ModelResponse, ProviderField
from litellm.utils import _add_path_to_api_base, supports_tool_choice
+class AzureFoundryErrorStrings(str, enum.Enum):
+ SET_EXTRA_PARAMETERS_TO_PASS_THROUGH = "Set extra-parameters to 'pass-through'"
+
+
class AzureAIStudioConfig(OpenAIConfig):
def get_supported_openai_params(self, model: str) -> List:
model_supports_tool_choice = True # azure ai supports this by default
@@ -39,6 +44,7 @@ class AzureAIStudioConfig(OpenAIConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
@@ -65,6 +71,7 @@ class AzureAIStudioConfig(OpenAIConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -145,7 +152,6 @@ class AzureAIStudioConfig(OpenAIConfig):
2. If message contains an image or audio, send as is (user-intended)
"""
for message in messages:
-
# Do nothing if the message contains an image or audio
if _audio_or_image_in_message_content(message):
continue
@@ -239,12 +245,18 @@ class AzureAIStudioConfig(OpenAIConfig):
) -> bool:
should_drop_params = litellm_params.get("drop_params") or litellm.drop_params
error_text = e.response.text
+
if should_drop_params and "Extra inputs are not permitted" in error_text:
return True
elif (
"unknown field: parameter index is not a valid field" in error_text
): # remove index from tool calls
return True
+ elif (
+ AzureFoundryErrorStrings.SET_EXTRA_PARAMETERS_TO_PASS_THROUGH.value
+ in error_text
+ ): # remove extra-parameters from tool calls
+ return True
return super().should_retry_llm_api_inside_llm_translation_on_http_error(
e=e, litellm_params=litellm_params
)
@@ -264,5 +276,46 @@ class AzureAIStudioConfig(OpenAIConfig):
litellm.remove_index_from_tool_calls(
messages=_messages,
)
+ elif (
+ AzureFoundryErrorStrings.SET_EXTRA_PARAMETERS_TO_PASS_THROUGH.value
+ in e.response.text
+ ):
+ request_data = self._drop_extra_params_from_request_data(
+ request_data, e.response.text
+ )
data = drop_params_from_unprocessable_entity_error(e=e, data=request_data)
return data
+
+ def _drop_extra_params_from_request_data(
+ self, request_data: dict, error_text: str
+ ) -> dict:
+ params_to_drop = self._extract_params_to_drop_from_error_text(error_text)
+ if params_to_drop:
+ for param in params_to_drop:
+ if param in request_data:
+ request_data.pop(param, None)
+ return request_data
+
+ def _extract_params_to_drop_from_error_text(
+ self, error_text: str
+ ) -> Optional[List[str]]:
+ """
+ Error text looks like this"
+ "Extra parameters ['stream_options', 'extra-parameters'] are not allowed when extra-parameters is not set or set to be 'error'.
+ """
+ import re
+
+ # Extract parameters within square brackets
+ match = re.search(r"\[(.*?)\]", error_text)
+ if not match:
+ return []
+
+ # Parse the extracted string into a list of parameter names
+ params_str = match.group(1)
+ params = []
+ for param in params_str.split(","):
+ # Clean up the parameter name (remove quotes, spaces)
+ clean_param = param.strip().strip("'").strip('"')
+ if clean_param:
+ params.append(clean_param)
+ return params
diff --git a/litellm/llms/azure_ai/embed/cohere_transformation.py b/litellm/llms/azure_ai/embed/cohere_transformation.py
index 38b0dbbe23..64433c21b6 100644
--- a/litellm/llms/azure_ai/embed/cohere_transformation.py
+++ b/litellm/llms/azure_ai/embed/cohere_transformation.py
@@ -22,7 +22,6 @@ class AzureAICohereConfig:
pass
def _map_azure_model_group(self, model: str) -> str:
-
if model == "offer-cohere-embed-multili-paygo":
return "Cohere-embed-v3-multilingual"
elif model == "offer-cohere-embed-english-paygo":
diff --git a/litellm/llms/azure_ai/embed/handler.py b/litellm/llms/azure_ai/embed/handler.py
index f33c979ca2..da39c5f3b8 100644
--- a/litellm/llms/azure_ai/embed/handler.py
+++ b/litellm/llms/azure_ai/embed/handler.py
@@ -17,7 +17,6 @@ from .cohere_transformation import AzureAICohereConfig
class AzureAIEmbedding(OpenAIChatCompletion):
-
def _process_response(
self,
image_embedding_responses: Optional[List],
@@ -145,7 +144,6 @@ class AzureAIEmbedding(OpenAIChatCompletion):
api_base: Optional[str] = None,
client=None,
) -> EmbeddingResponse:
-
(
image_embeddings_request,
v1_embeddings_request,
diff --git a/litellm/llms/azure_ai/rerank/transformation.py b/litellm/llms/azure_ai/rerank/transformation.py
index 842511f30d..4465e0d70a 100644
--- a/litellm/llms/azure_ai/rerank/transformation.py
+++ b/litellm/llms/azure_ai/rerank/transformation.py
@@ -17,6 +17,7 @@ class AzureAIRerankConfig(CohereRerankConfig):
"""
Azure AI Rerank - Follows the same Spec as Cohere Rerank
"""
+
def get_complete_url(self, api_base: Optional[str], model: str) -> str:
if api_base is None:
raise ValueError(
diff --git a/litellm/llms/base.py b/litellm/llms/base.py
index deced222ca..abc314bba0 100644
--- a/litellm/llms/base.py
+++ b/litellm/llms/base.py
@@ -9,7 +9,6 @@ from litellm.types.utils import ModelResponse, TextCompletionResponse
class BaseLLM:
-
_client_session: Optional[httpx.Client] = None
def process_response(
diff --git a/litellm/llms/base_llm/audio_transcription/transformation.py b/litellm/llms/base_llm/audio_transcription/transformation.py
index e550c574e2..cf88fed30d 100644
--- a/litellm/llms/base_llm/audio_transcription/transformation.py
+++ b/litellm/llms/base_llm/audio_transcription/transformation.py
@@ -1,5 +1,5 @@
from abc import ABC, abstractmethod
-from typing import TYPE_CHECKING, Any, List, Optional
+from typing import TYPE_CHECKING, Any, List, Optional, Union
import httpx
@@ -8,7 +8,7 @@ from litellm.types.llms.openai import (
AllMessageValues,
OpenAIAudioTranscriptionOptionalParams,
)
-from litellm.types.utils import ModelResponse
+from litellm.types.utils import FileTypes, ModelResponse
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
@@ -28,6 +28,7 @@ class BaseAudioTranscriptionConfig(BaseConfig, ABC):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -42,6 +43,18 @@ class BaseAudioTranscriptionConfig(BaseConfig, ABC):
"""
return api_base or ""
+ @abstractmethod
+ def transform_audio_transcription_request(
+ self,
+ model: str,
+ audio_file: FileTypes,
+ optional_params: dict,
+ litellm_params: dict,
+ ) -> Union[dict, bytes]:
+ raise NotImplementedError(
+ "AudioTranscriptionConfig needs a request transformation for audio transcription models"
+ )
+
def transform_request(
self,
model: str,
diff --git a/litellm/llms/base_llm/base_model_iterator.py b/litellm/llms/base_llm/base_model_iterator.py
index 67b1466c2a..4cf757d6cd 100644
--- a/litellm/llms/base_llm/base_model_iterator.py
+++ b/litellm/llms/base_llm/base_model_iterator.py
@@ -1,8 +1,16 @@
import json
from abc import abstractmethod
-from typing import Optional, Union
+from typing import List, Optional, Union, cast
-from litellm.types.utils import GenericStreamingChunk, ModelResponseStream
+import litellm
+from litellm.types.utils import (
+ Choices,
+ Delta,
+ GenericStreamingChunk,
+ ModelResponse,
+ ModelResponseStream,
+ StreamingChoices,
+)
class BaseModelResponseIterator:
@@ -33,6 +41,18 @@ class BaseModelResponseIterator:
self, str_line: str
) -> Union[GenericStreamingChunk, ModelResponseStream]:
# chunk is a str at this point
+
+ stripped_chunk = litellm.CustomStreamWrapper._strip_sse_data_from_chunk(
+ str_line
+ )
+ try:
+ if stripped_chunk is not None:
+ stripped_json_chunk: Optional[dict] = json.loads(stripped_chunk)
+ else:
+ stripped_json_chunk = None
+ except json.JSONDecodeError:
+ stripped_json_chunk = None
+
if "[DONE]" in str_line:
return GenericStreamingChunk(
text="",
@@ -42,9 +62,8 @@ class BaseModelResponseIterator:
index=0,
tool_use=None,
)
- elif str_line.startswith("data:"):
- data_json = json.loads(str_line[5:])
- return self.chunk_parser(chunk=data_json)
+ elif stripped_json_chunk:
+ return self.chunk_parser(chunk=stripped_json_chunk)
else:
return GenericStreamingChunk(
text="",
@@ -85,6 +104,7 @@ class BaseModelResponseIterator:
async def __anext__(self):
try:
chunk = await self.async_response_iterator.__anext__()
+
except StopAsyncIteration:
raise StopAsyncIteration
except ValueError as e:
@@ -99,13 +119,68 @@ class BaseModelResponseIterator:
str_line = str_line[index:]
# chunk is a str at this point
- return self._handle_string_chunk(str_line=str_line)
+ chunk = self._handle_string_chunk(str_line=str_line)
+
+ return chunk
except StopAsyncIteration:
raise StopAsyncIteration
except ValueError as e:
raise RuntimeError(f"Error parsing chunk: {e},\nReceived chunk: {chunk}")
+class MockResponseIterator: # for returning ai21 streaming responses
+ def __init__(
+ self, model_response: ModelResponse, json_mode: Optional[bool] = False
+ ):
+ self.model_response = model_response
+ self.json_mode = json_mode
+ self.is_done = False
+
+ # Sync iterator
+ def __iter__(self):
+ return self
+
+ def _chunk_parser(self, chunk_data: ModelResponse) -> ModelResponseStream:
+ try:
+ streaming_choices: List[StreamingChoices] = []
+ for choice in chunk_data.choices:
+ streaming_choices.append(
+ StreamingChoices(
+ index=choice.index,
+ delta=Delta(
+ **cast(Choices, choice).message.model_dump(),
+ ),
+ finish_reason=choice.finish_reason,
+ )
+ )
+ processed_chunk = ModelResponseStream(
+ id=chunk_data.id,
+ object="chat.completion",
+ created=chunk_data.created,
+ model=chunk_data.model,
+ choices=streaming_choices,
+ )
+ return processed_chunk
+ except Exception as e:
+ raise ValueError(f"Failed to decode chunk: {chunk_data}. Error: {e}")
+
+ def __next__(self):
+ if self.is_done:
+ raise StopIteration
+ self.is_done = True
+ return self._chunk_parser(self.model_response)
+
+ # Async iterator
+ def __aiter__(self):
+ return self
+
+ async def __anext__(self):
+ if self.is_done:
+ raise StopAsyncIteration
+ self.is_done = True
+ return self._chunk_parser(self.model_response)
+
+
class FakeStreamResponseIterator:
def __init__(self, model_response, json_mode: Optional[bool] = False):
self.model_response = model_response
diff --git a/litellm/llms/base_llm/base_utils.py b/litellm/llms/base_llm/base_utils.py
index 919cdbfd02..712f5de8cc 100644
--- a/litellm/llms/base_llm/base_utils.py
+++ b/litellm/llms/base_llm/base_utils.py
@@ -3,6 +3,7 @@ Utility functions for base LLM classes.
"""
import copy
+import json
from abc import ABC, abstractmethod
from typing import List, Optional, Type, Union
@@ -10,8 +11,8 @@ from openai.lib import _parsing, _pydantic
from pydantic import BaseModel
from litellm._logging import verbose_logger
-from litellm.types.llms.openai import AllMessageValues
-from litellm.types.utils import ProviderSpecificModelInfo
+from litellm.types.llms.openai import AllMessageValues, ChatCompletionToolCallChunk
+from litellm.types.utils import Message, ProviderSpecificModelInfo
class BaseLLMModelInfo(ABC):
@@ -19,11 +20,19 @@ class BaseLLMModelInfo(ABC):
self,
model: str,
) -> Optional[ProviderSpecificModelInfo]:
+ """
+ Default values all models of this provider support.
+ """
return None
@abstractmethod
- def get_models(self) -> List[str]:
- pass
+ def get_models(
+ self, api_key: Optional[str] = None, api_base: Optional[str] = None
+ ) -> List[str]:
+ """
+ Returns a list of models supported by this provider.
+ """
+ return []
@staticmethod
@abstractmethod
@@ -35,6 +44,19 @@ class BaseLLMModelInfo(ABC):
def get_api_base(api_base: Optional[str] = None) -> Optional[str]:
pass
+ @abstractmethod
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ pass
+
@staticmethod
@abstractmethod
def get_base_model(model: str) -> Optional[str]:
@@ -47,6 +69,32 @@ class BaseLLMModelInfo(ABC):
pass
+def _convert_tool_response_to_message(
+ tool_calls: List[ChatCompletionToolCallChunk],
+) -> Optional[Message]:
+ """
+ In JSON mode, Anthropic API returns JSON schema as a tool call, we need to convert it to a message to follow the OpenAI format
+
+ """
+ ## HANDLE JSON MODE - anthropic returns single function call
+ json_mode_content_str: Optional[str] = tool_calls[0]["function"].get("arguments")
+ try:
+ if json_mode_content_str is not None:
+ args = json.loads(json_mode_content_str)
+ if isinstance(args, dict) and (values := args.get("values")) is not None:
+ _message = Message(content=json.dumps(values))
+ return _message
+ else:
+ # a lot of the times the `values` key is not present in the tool response
+ # relevant issue: https://github.com/BerriAI/litellm/issues/6741
+ _message = Message(content=json.dumps(args))
+ return _message
+ except json.JSONDecodeError:
+ # json decode error does occur, return the original tool response str
+ return Message(content=json_mode_content_str)
+ return None
+
+
def _dict_to_response_format_helper(
response_format: dict, ref_template: Optional[str] = None
) -> dict:
diff --git a/litellm/llms/base_llm/chat/transformation.py b/litellm/llms/base_llm/chat/transformation.py
index 1b5a6bc58e..fa278c805e 100644
--- a/litellm/llms/base_llm/chat/transformation.py
+++ b/litellm/llms/base_llm/chat/transformation.py
@@ -13,12 +13,13 @@ from typing import (
Optional,
Type,
Union,
+ cast,
)
import httpx
from pydantic import BaseModel
-from litellm.constants import RESPONSE_FORMAT_TOOL_NAME
+from litellm.constants import DEFAULT_MAX_TOKENS, RESPONSE_FORMAT_TOOL_NAME
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.types.llms.openai import (
AllMessageValues,
@@ -102,6 +103,32 @@ class BaseConfig(ABC):
) -> Optional[dict]:
return type_to_response_format_param(response_format=response_format)
+ def is_thinking_enabled(self, non_default_params: dict) -> bool:
+ return (
+ non_default_params.get("thinking", {}).get("type") == "enabled"
+ or non_default_params.get("reasoning_effort") is not None
+ )
+
+ def update_optional_params_with_thinking_tokens(
+ self, non_default_params: dict, optional_params: dict
+ ):
+ """
+ Handles scenario where max tokens is not specified. For anthropic models (anthropic api/bedrock/vertex ai), this requires having the max tokens being set and being greater than the thinking token budget.
+
+ Checks 'non_default_params' for 'thinking' and 'max_tokens'
+
+ if 'thinking' is enabled and 'max_tokens' is not specified, set 'max_tokens' to the thinking token budget + DEFAULT_MAX_TOKENS
+ """
+ is_thinking_enabled = self.is_thinking_enabled(optional_params)
+ if is_thinking_enabled and "max_tokens" not in non_default_params:
+ thinking_token_budget = cast(dict, optional_params["thinking"]).get(
+ "budget_tokens", None
+ )
+ if thinking_token_budget is not None:
+ optional_params["max_tokens"] = (
+ thinking_token_budget + DEFAULT_MAX_TOKENS
+ )
+
def should_fake_stream(
self,
model: Optional[str],
@@ -194,7 +221,6 @@ class BaseConfig(ABC):
json_schema = value["json_schema"]["schema"]
if json_schema and not is_response_format_supported:
-
_tool_choice = ChatCompletionToolChoiceObjectParam(
type="function",
function=ChatCompletionToolChoiceFunctionParam(
@@ -236,6 +262,7 @@ class BaseConfig(ABC):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
@@ -268,6 +295,7 @@ class BaseConfig(ABC):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
diff --git a/litellm/llms/base_llm/completion/transformation.py b/litellm/llms/base_llm/completion/transformation.py
index 9432f02da1..2309634f18 100644
--- a/litellm/llms/base_llm/completion/transformation.py
+++ b/litellm/llms/base_llm/completion/transformation.py
@@ -29,6 +29,7 @@ class BaseTextCompletionConfig(BaseConfig, ABC):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
diff --git a/litellm/llms/base_llm/embedding/transformation.py b/litellm/llms/base_llm/embedding/transformation.py
index 68c0a7c05a..c03a8235b4 100644
--- a/litellm/llms/base_llm/embedding/transformation.py
+++ b/litellm/llms/base_llm/embedding/transformation.py
@@ -43,6 +43,7 @@ class BaseEmbeddingConfig(BaseConfig, ABC):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
diff --git a/litellm/llms/base_llm/files/transformation.py b/litellm/llms/base_llm/files/transformation.py
new file mode 100644
index 0000000000..9925004c89
--- /dev/null
+++ b/litellm/llms/base_llm/files/transformation.py
@@ -0,0 +1,101 @@
+from abc import abstractmethod
+from typing import TYPE_CHECKING, Any, List, Optional, Union
+
+import httpx
+
+from litellm.types.llms.openai import (
+ AllMessageValues,
+ CreateFileRequest,
+ OpenAICreateFileRequestOptionalParams,
+ OpenAIFileObject,
+)
+from litellm.types.utils import LlmProviders, ModelResponse
+
+from ..chat.transformation import BaseConfig
+
+if TYPE_CHECKING:
+ from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+ LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+ LiteLLMLoggingObj = Any
+
+
+class BaseFilesConfig(BaseConfig):
+ @property
+ @abstractmethod
+ def custom_llm_provider(self) -> LlmProviders:
+ pass
+
+ @abstractmethod
+ def get_supported_openai_params(
+ self, model: str
+ ) -> List[OpenAICreateFileRequestOptionalParams]:
+ pass
+
+ def get_complete_file_url(
+ self,
+ api_base: Optional[str],
+ api_key: Optional[str],
+ model: str,
+ optional_params: dict,
+ litellm_params: dict,
+ data: CreateFileRequest,
+ ):
+ return self.get_complete_url(
+ api_base=api_base,
+ api_key=api_key,
+ model=model,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
+ )
+
+ @abstractmethod
+ def transform_create_file_request(
+ self,
+ model: str,
+ create_file_data: CreateFileRequest,
+ optional_params: dict,
+ litellm_params: dict,
+ ) -> Union[dict, str, bytes]:
+ pass
+
+ @abstractmethod
+ def transform_create_file_response(
+ self,
+ model: Optional[str],
+ raw_response: httpx.Response,
+ logging_obj: LiteLLMLoggingObj,
+ litellm_params: dict,
+ ) -> OpenAIFileObject:
+ pass
+
+ def transform_request(
+ self,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ headers: dict,
+ ) -> dict:
+ raise NotImplementedError(
+ "AudioTranscriptionConfig does not need a request transformation for audio transcription models"
+ )
+
+ def transform_response(
+ self,
+ model: str,
+ raw_response: httpx.Response,
+ model_response: ModelResponse,
+ logging_obj: LiteLLMLoggingObj,
+ request_data: dict,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ encoding: Any,
+ api_key: Optional[str] = None,
+ json_mode: Optional[bool] = None,
+ ) -> ModelResponse:
+ raise NotImplementedError(
+ "AudioTranscriptionConfig does not need a response transformation for audio transcription models"
+ )
diff --git a/litellm/llms/base_llm/image_variations/transformation.py b/litellm/llms/base_llm/image_variations/transformation.py
index 4d1cd6eebb..60444d0fb7 100644
--- a/litellm/llms/base_llm/image_variations/transformation.py
+++ b/litellm/llms/base_llm/image_variations/transformation.py
@@ -34,6 +34,7 @@ class BaseImageVariationConfig(BaseConfig, ABC):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -64,6 +65,7 @@ class BaseImageVariationConfig(BaseConfig, ABC):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/base_llm/responses/transformation.py b/litellm/llms/base_llm/responses/transformation.py
index c41d63842b..15ce8cba3f 100644
--- a/litellm/llms/base_llm/responses/transformation.py
+++ b/litellm/llms/base_llm/responses/transformation.py
@@ -1,16 +1,16 @@
import types
from abc import ABC, abstractmethod
-from typing import TYPE_CHECKING, Any, Dict, Optional, Union
+from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import httpx
from litellm.types.llms.openai import (
ResponseInputParam,
ResponsesAPIOptionalRequestParams,
- ResponsesAPIRequestParams,
ResponsesAPIResponse,
ResponsesAPIStreamingResponse,
)
+from litellm.types.responses.main import *
from litellm.types.router import GenericLiteLLMParams
if TYPE_CHECKING:
@@ -59,7 +59,6 @@ class BaseResponsesAPIConfig(ABC):
model: str,
drop_params: bool,
) -> Dict:
-
pass
@abstractmethod
@@ -75,8 +74,7 @@ class BaseResponsesAPIConfig(ABC):
def get_complete_url(
self,
api_base: Optional[str],
- model: str,
- stream: Optional[bool] = None,
+ litellm_params: dict,
) -> str:
"""
OPTIONAL
@@ -97,7 +95,7 @@ class BaseResponsesAPIConfig(ABC):
response_api_optional_request_params: Dict,
litellm_params: GenericLiteLLMParams,
headers: dict,
- ) -> ResponsesAPIRequestParams:
+ ) -> Dict:
pass
@abstractmethod
@@ -121,6 +119,31 @@ class BaseResponsesAPIConfig(ABC):
"""
pass
+ #########################################################
+ ########## DELETE RESPONSE API TRANSFORMATION ##############
+ #########################################################
+ @abstractmethod
+ def transform_delete_response_api_request(
+ self,
+ response_id: str,
+ api_base: str,
+ litellm_params: GenericLiteLLMParams,
+ headers: dict,
+ ) -> Tuple[str, Dict]:
+ pass
+
+ @abstractmethod
+ def transform_delete_response_api_response(
+ self,
+ raw_response: httpx.Response,
+ logging_obj: LiteLLMLoggingObj,
+ ) -> DeleteResponseResult:
+ pass
+
+ #########################################################
+ ########## END DELETE RESPONSE API TRANSFORMATION ##########
+ #########################################################
+
def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
) -> BaseLLMException:
@@ -131,3 +154,12 @@ class BaseResponsesAPIConfig(ABC):
message=error_message,
headers=headers,
)
+
+ def should_fake_stream(
+ self,
+ model: Optional[str],
+ stream: Optional[bool],
+ custom_llm_provider: Optional[str] = None,
+ ) -> bool:
+ """Returns True if litellm should fake a stream for the given model and stream value"""
+ return False
diff --git a/litellm/llms/bedrock/base_aws_llm.py b/litellm/llms/bedrock/base_aws_llm.py
index 5482d80687..133ef6a952 100644
--- a/litellm/llms/bedrock/base_aws_llm.py
+++ b/litellm/llms/bedrock/base_aws_llm.py
@@ -9,7 +9,7 @@ from pydantic import BaseModel
from litellm._logging import verbose_logger
from litellm.caching.caching import DualCache
-from litellm.constants import BEDROCK_INVOKE_PROVIDERS_LITERAL
+from litellm.constants import BEDROCK_INVOKE_PROVIDERS_LITERAL, BEDROCK_MAX_POLICY_SIZE
from litellm.litellm_core_utils.dd_tracing import tracer
from litellm.secret_managers.main import get_secret
@@ -381,7 +381,7 @@ class BaseAWSLLM:
"region_name": aws_region_name,
}
- if sts_response["PackedPolicySize"] > 75:
+ if sts_response["PackedPolicySize"] > BEDROCK_MAX_POLICY_SIZE:
verbose_logger.warning(
f"The policy size is greater than 75% of the allowed size, PackedPolicySize: {sts_response['PackedPolicySize']}"
)
diff --git a/litellm/llms/bedrock/chat/converse_handler.py b/litellm/llms/bedrock/chat/converse_handler.py
index a4230177b5..7f529c637a 100644
--- a/litellm/llms/bedrock/chat/converse_handler.py
+++ b/litellm/llms/bedrock/chat/converse_handler.py
@@ -81,7 +81,6 @@ def make_sync_call(
class BedrockConverseLLM(BaseAWSLLM):
-
def __init__(self) -> None:
super().__init__()
@@ -114,7 +113,6 @@ class BedrockConverseLLM(BaseAWSLLM):
fake_stream: bool = False,
json_mode: Optional[bool] = False,
) -> CustomStreamWrapper:
-
request_data = await litellm.AmazonConverseConfig()._async_transform_request(
model=model,
messages=messages,
@@ -179,7 +177,6 @@ class BedrockConverseLLM(BaseAWSLLM):
headers: dict = {},
client: Optional[AsyncHTTPHandler] = None,
) -> Union[ModelResponse, CustomStreamWrapper]:
-
request_data = await litellm.AmazonConverseConfig()._async_transform_request(
model=model,
messages=messages,
@@ -265,7 +262,6 @@ class BedrockConverseLLM(BaseAWSLLM):
extra_headers: Optional[dict] = None,
client: Optional[Union[AsyncHTTPHandler, HTTPHandler]] = None,
):
-
## SETUP ##
stream = optional_params.pop("stream", None)
unencoded_model_id = optional_params.pop("model_id", None)
@@ -301,9 +297,9 @@ class BedrockConverseLLM(BaseAWSLLM):
aws_sts_endpoint = optional_params.pop("aws_sts_endpoint", None)
optional_params.pop("aws_region_name", None)
- litellm_params["aws_region_name"] = (
- aws_region_name # [DO NOT DELETE] important for async calls
- )
+ litellm_params[
+ "aws_region_name"
+ ] = aws_region_name # [DO NOT DELETE] important for async calls
credentials: Credentials = self.get_credentials(
aws_access_key_id=aws_access_key_id,
diff --git a/litellm/llms/bedrock/chat/converse_transformation.py b/litellm/llms/bedrock/chat/converse_transformation.py
index bb874cfe38..31d7542cb4 100644
--- a/litellm/llms/bedrock/chat/converse_transformation.py
+++ b/litellm/llms/bedrock/chat/converse_transformation.py
@@ -17,10 +17,12 @@ from litellm.litellm_core_utils.prompt_templates.factory import (
_bedrock_converse_messages_pt,
_bedrock_tools_pt,
)
+from litellm.llms.anthropic.chat.transformation import AnthropicConfig
from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMException
from litellm.types.llms.bedrock import *
from litellm.types.llms.openai import (
AllMessageValues,
+ ChatCompletionRedactedThinkingBlock,
ChatCompletionResponseMessage,
ChatCompletionSystemMessage,
ChatCompletionThinkingBlock,
@@ -29,6 +31,7 @@ from litellm.types.llms.openai import (
ChatCompletionToolParam,
ChatCompletionToolParamFunctionChunk,
ChatCompletionUserMessage,
+ OpenAIChatCompletionToolParam,
OpenAIMessageContentListBlock,
)
from litellm.types.utils import ModelResponse, PromptTokensDetailsWrapper, Usage
@@ -66,6 +69,13 @@ class AmazonConverseConfig(BaseConfig):
def custom_llm_provider(self) -> Optional[str]:
return "bedrock_converse"
+ @classmethod
+ def get_config_blocks(cls) -> dict:
+ return {
+ "guardrailConfig": GuardrailConfigBlock,
+ "performanceConfig": PerformanceConfigBlock,
+ }
+
@classmethod
def get_config(cls):
return {
@@ -121,6 +131,7 @@ class AmazonConverseConfig(BaseConfig):
"claude-3-7" in model
): # [TODO]: move to a 'supports_reasoning_content' param from model cost map
supported_params.append("thinking")
+ supported_params.append("reasoning_effort")
return supported_params
def map_tool_choice_values(
@@ -202,17 +213,34 @@ class AmazonConverseConfig(BaseConfig):
)
return _tool
+ def _apply_tool_call_transformation(
+ self,
+ tools: List[OpenAIChatCompletionToolParam],
+ model: str,
+ non_default_params: dict,
+ optional_params: dict,
+ ):
+ optional_params = self._add_tools_to_optional_params(
+ optional_params=optional_params, tools=tools
+ )
+
+ if (
+ "meta.llama3-3-70b-instruct-v1:0" in model
+ and non_default_params.get("stream", False) is True
+ ):
+ optional_params["fake_stream"] = True
+
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
- messages: Optional[List[AllMessageValues]] = None,
) -> dict:
+ is_thinking_enabled = self.is_thinking_enabled(non_default_params)
+
for param, value in non_default_params.items():
if param == "response_format" and isinstance(value, dict):
-
ignore_response_format_types = ["text"]
if value["type"] in ignore_response_format_types: # value is a no-op
continue
@@ -247,8 +275,11 @@ class AmazonConverseConfig(BaseConfig):
optional_params = self._add_tools_to_optional_params(
optional_params=optional_params, tools=[_tool]
)
- if litellm.utils.supports_tool_choice(
- model=model, custom_llm_provider=self.custom_llm_provider
+ if (
+ litellm.utils.supports_tool_choice(
+ model=model, custom_llm_provider=self.custom_llm_provider
+ )
+ and not is_thinking_enabled
):
optional_params["tool_choice"] = ToolChoiceValuesBlock(
tool=SpecificToolChoiceBlock(
@@ -273,8 +304,11 @@ class AmazonConverseConfig(BaseConfig):
if param == "top_p":
optional_params["topP"] = value
if param == "tools" and isinstance(value, list):
- optional_params = self._add_tools_to_optional_params(
- optional_params=optional_params, tools=value
+ self._apply_tool_call_transformation(
+ tools=cast(List[OpenAIChatCompletionToolParam], value),
+ model=model,
+ non_default_params=non_default_params,
+ optional_params=optional_params,
)
if param == "tool_choice":
_tool_choice_value = self.map_tool_choice_values(
@@ -284,6 +318,15 @@ class AmazonConverseConfig(BaseConfig):
optional_params["tool_choice"] = _tool_choice_value
if param == "thinking":
optional_params["thinking"] = value
+ elif param == "reasoning_effort" and isinstance(value, str):
+ optional_params["thinking"] = AnthropicConfig._map_reasoning_effort(
+ value
+ )
+
+ self.update_optional_params_with_thinking_tokens(
+ non_default_params=non_default_params, optional_params=optional_params
+ )
+
return optional_params
@overload
@@ -333,25 +376,27 @@ class AmazonConverseConfig(BaseConfig):
system_content_blocks: List[SystemContentBlock] = []
for idx, message in enumerate(messages):
if message["role"] == "system":
- _system_content_block: Optional[SystemContentBlock] = None
- _cache_point_block: Optional[SystemContentBlock] = None
- if isinstance(message["content"], str) and len(message["content"]) > 0:
- _system_content_block = SystemContentBlock(text=message["content"])
- _cache_point_block = self._get_cache_point_block(
+ system_prompt_indices.append(idx)
+ if isinstance(message["content"], str) and message["content"]:
+ system_content_blocks.append(
+ SystemContentBlock(text=message["content"])
+ )
+ cache_block = self._get_cache_point_block(
message, block_type="system"
)
+ if cache_block:
+ system_content_blocks.append(cache_block)
elif isinstance(message["content"], list):
for m in message["content"]:
- if m.get("type", "") == "text" and len(m["text"]) > 0:
- _system_content_block = SystemContentBlock(text=m["text"])
- _cache_point_block = self._get_cache_point_block(
+ if m.get("type") == "text" and m.get("text"):
+ system_content_blocks.append(
+ SystemContentBlock(text=m["text"])
+ )
+ cache_block = self._get_cache_point_block(
m, block_type="system"
)
- if _system_content_block is not None:
- system_content_blocks.append(_system_content_block)
- if _cache_point_block is not None:
- system_content_blocks.append(_cache_point_block)
- system_prompt_indices.append(idx)
+ if cache_block:
+ system_content_blocks.append(cache_block)
if len(system_prompt_indices) > 0:
for idx in reversed(system_prompt_indices):
messages.pop(idx)
@@ -386,7 +431,6 @@ class AmazonConverseConfig(BaseConfig):
optional_params: dict,
messages: Optional[List[AllMessageValues]] = None,
) -> CommonRequestObject:
-
## VALIDATE REQUEST
"""
Bedrock doesn't support tool calling without `tools=` param specified.
@@ -412,11 +456,11 @@ class AmazonConverseConfig(BaseConfig):
AmazonConverseConfig.__annotations__.keys()
) + ["top_k"]
supported_tool_call_params = ["tools", "tool_choice"]
- supported_guardrail_params = ["guardrailConfig"]
+ supported_config_params = list(self.get_config_blocks().keys())
total_supported_params = (
supported_converse_params
+ supported_tool_call_params
- + supported_guardrail_params
+ + supported_config_params
)
inference_params.pop("json_mode", None) # used for handling json_schema
@@ -455,12 +499,11 @@ class AmazonConverseConfig(BaseConfig):
),
}
- # Guardrail Config
- guardrail_config: Optional[GuardrailConfigBlock] = None
- request_guardrails_config = inference_params.pop("guardrailConfig", None)
- if request_guardrails_config is not None:
- guardrail_config = GuardrailConfigBlock(**request_guardrails_config)
- data["guardrailConfig"] = guardrail_config
+ # Handle all config blocks
+ for config_name, config_class in self.get_config_blocks().items():
+ config_value = inference_params.pop(config_name, None)
+ if config_value is not None:
+ data[config_name] = config_class(**config_value) # type: ignore
# Tool Config
if bedrock_tool_config is not None:
@@ -587,9 +630,11 @@ class AmazonConverseConfig(BaseConfig):
def _transform_thinking_blocks(
self, thinking_blocks: List[BedrockConverseReasoningContentBlock]
- ) -> List[ChatCompletionThinkingBlock]:
+ ) -> List[Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]]:
"""Return a consistent format for thinking blocks between Anthropic and Bedrock."""
- thinking_blocks_list: List[ChatCompletionThinkingBlock] = []
+ thinking_blocks_list: List[
+ Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]
+ ] = []
for block in thinking_blocks:
if "reasoningText" in block:
_thinking_block = ChatCompletionThinkingBlock(type="thinking")
@@ -600,6 +645,11 @@ class AmazonConverseConfig(BaseConfig):
if _signature is not None:
_thinking_block["signature"] = _signature
thinking_blocks_list.append(_thinking_block)
+ elif "redactedContent" in block:
+ _redacted_block = ChatCompletionRedactedThinkingBlock(
+ type="redacted_thinking", data=block["redactedContent"]
+ )
+ thinking_blocks_list.append(_redacted_block)
return thinking_blocks_list
def _transform_usage(self, usage: ConverseTokenUsageBlock) -> Usage:
@@ -613,8 +663,10 @@ class AmazonConverseConfig(BaseConfig):
cache_read_input_tokens = usage["cacheReadInputTokens"]
input_tokens += cache_read_input_tokens
if "cacheWriteInputTokens" in usage:
+ """
+ Do not increment prompt_tokens with cacheWriteInputTokens
+ """
cache_creation_input_tokens = usage["cacheWriteInputTokens"]
- input_tokens += cache_creation_input_tokens
prompt_tokens_details = PromptTokensDetailsWrapper(
cached_tokens=cache_read_input_tokens
@@ -702,9 +754,9 @@ class AmazonConverseConfig(BaseConfig):
chat_completion_message: ChatCompletionResponseMessage = {"role": "assistant"}
content_str = ""
tools: List[ChatCompletionToolCallChunk] = []
- reasoningContentBlocks: Optional[List[BedrockConverseReasoningContentBlock]] = (
- None
- )
+ reasoningContentBlocks: Optional[
+ List[BedrockConverseReasoningContentBlock]
+ ] = None
if message is not None:
for idx, content in enumerate(message["content"]):
@@ -714,7 +766,6 @@ class AmazonConverseConfig(BaseConfig):
if "text" in content:
content_str += content["text"]
if "toolUse" in content:
-
## check tool name was formatted by litellm
_response_tool_name = content["toolUse"]["name"]
response_tool_name = get_bedrock_tool_name(
@@ -741,12 +792,12 @@ class AmazonConverseConfig(BaseConfig):
chat_completion_message["provider_specific_fields"] = {
"reasoningContentBlocks": reasoningContentBlocks,
}
- chat_completion_message["reasoning_content"] = (
- self._transform_reasoning_content(reasoningContentBlocks)
- )
- chat_completion_message["thinking_blocks"] = (
- self._transform_thinking_blocks(reasoningContentBlocks)
- )
+ chat_completion_message[
+ "reasoning_content"
+ ] = self._transform_reasoning_content(reasoningContentBlocks)
+ chat_completion_message[
+ "thinking_blocks"
+ ] = self._transform_thinking_blocks(reasoningContentBlocks)
chat_completion_message["content"] = content_str
if json_mode is True and tools is not None and len(tools) == 1:
# to support 'json_schema' logic on bedrock models
@@ -792,6 +843,7 @@ class AmazonConverseConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/bedrock/chat/invoke_handler.py b/litellm/llms/bedrock/chat/invoke_handler.py
index 9fa791e069..dfd1658543 100644
--- a/litellm/llms/bedrock/chat/invoke_handler.py
+++ b/litellm/llms/bedrock/chat/invoke_handler.py
@@ -50,6 +50,7 @@ from litellm.llms.custom_httpx.http_handler import (
)
from litellm.types.llms.bedrock import *
from litellm.types.llms.openai import (
+ ChatCompletionRedactedThinkingBlock,
ChatCompletionThinkingBlock,
ChatCompletionToolCallChunk,
ChatCompletionToolCallFunctionChunk,
@@ -496,9 +497,9 @@ class BedrockLLM(BaseAWSLLM):
content=None,
)
model_response.choices[0].message = _message # type: ignore
- model_response._hidden_params["original_response"] = (
- outputText # allow user to access raw anthropic tool calling response
- )
+ model_response._hidden_params[
+ "original_response"
+ ] = outputText # allow user to access raw anthropic tool calling response
if (
_is_function_call is True
and stream is not None
@@ -806,9 +807,9 @@ class BedrockLLM(BaseAWSLLM):
): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in
inference_params[k] = v
if stream is True:
- inference_params["stream"] = (
- True # cohere requires stream = True in inference params
- )
+ inference_params[
+ "stream"
+ ] = True # cohere requires stream = True in inference params
data = json.dumps({"prompt": prompt, **inference_params})
elif provider == "anthropic":
if model.startswith("anthropic.claude-3"):
@@ -1205,7 +1206,6 @@ class BedrockLLM(BaseAWSLLM):
def get_response_stream_shape():
global _response_stream_shape_cache
if _response_stream_shape_cache is None:
-
from botocore.loaders import Loader
from botocore.model import ServiceModel
@@ -1256,19 +1256,33 @@ class AWSEventStreamDecoder:
def translate_thinking_blocks(
self, thinking_block: BedrockConverseReasoningContentBlockDelta
- ) -> Optional[List[ChatCompletionThinkingBlock]]:
+ ) -> Optional[
+ List[Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]]
+ ]:
"""
Translate the thinking blocks to a string
"""
- thinking_blocks_list: List[ChatCompletionThinkingBlock] = []
- _thinking_block = ChatCompletionThinkingBlock(type="thinking")
+ thinking_blocks_list: List[
+ Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]
+ ] = []
+ _thinking_block: Optional[
+ Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]
+ ] = None
+
if "text" in thinking_block:
+ _thinking_block = ChatCompletionThinkingBlock(type="thinking")
_thinking_block["thinking"] = thinking_block["text"]
elif "signature" in thinking_block:
+ _thinking_block = ChatCompletionThinkingBlock(type="thinking")
_thinking_block["signature"] = thinking_block["signature"]
_thinking_block["thinking"] = "" # consistent with anthropic response
- thinking_blocks_list.append(_thinking_block)
+ elif "redactedContent" in thinking_block:
+ _thinking_block = ChatCompletionRedactedThinkingBlock(
+ type="redacted_thinking", data=thinking_block["redactedContent"]
+ )
+ if _thinking_block is not None:
+ thinking_blocks_list.append(_thinking_block)
return thinking_blocks_list
def converse_chunk_parser(self, chunk_data: dict) -> ModelResponseStream:
@@ -1280,31 +1294,44 @@ class AWSEventStreamDecoder:
usage: Optional[Usage] = None
provider_specific_fields: dict = {}
reasoning_content: Optional[str] = None
- thinking_blocks: Optional[List[ChatCompletionThinkingBlock]] = None
+ thinking_blocks: Optional[
+ List[
+ Union[
+ ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock
+ ]
+ ]
+ ] = None
index = int(chunk_data.get("contentBlockIndex", 0))
if "start" in chunk_data:
start_obj = ContentBlockStartEvent(**chunk_data["start"])
self.content_blocks = [] # reset
- if (
- start_obj is not None
- and "toolUse" in start_obj
- and start_obj["toolUse"] is not None
- ):
- ## check tool name was formatted by litellm
- _response_tool_name = start_obj["toolUse"]["name"]
- response_tool_name = get_bedrock_tool_name(
- response_tool_name=_response_tool_name
- )
- tool_use = {
- "id": start_obj["toolUse"]["toolUseId"],
- "type": "function",
- "function": {
- "name": response_tool_name,
- "arguments": "",
- },
- "index": index,
- }
+ if start_obj is not None:
+ if "toolUse" in start_obj and start_obj["toolUse"] is not None:
+ ## check tool name was formatted by litellm
+ _response_tool_name = start_obj["toolUse"]["name"]
+ response_tool_name = get_bedrock_tool_name(
+ response_tool_name=_response_tool_name
+ )
+ tool_use = {
+ "id": start_obj["toolUse"]["toolUseId"],
+ "type": "function",
+ "function": {
+ "name": response_tool_name,
+ "arguments": "",
+ },
+ "index": index,
+ }
+ elif (
+ "reasoningContent" in start_obj
+ and start_obj["reasoningContent"] is not None
+ ): # redacted thinking can be in start object
+ thinking_blocks = self.translate_thinking_blocks(
+ start_obj["reasoningContent"]
+ )
+ provider_specific_fields = {
+ "reasoningContent": start_obj["reasoningContent"],
+ }
elif "delta" in chunk_data:
delta_obj = ContentBlockDeltaEvent(**chunk_data["delta"])
self.content_blocks.append(delta_obj)
@@ -1354,6 +1381,7 @@ class AWSEventStreamDecoder:
finish_reason = map_finish_reason(chunk_data.get("stopReason", "stop"))
elif "usage" in chunk_data:
usage = converse_config._transform_usage(chunk_data.get("usage", {}))
+
model_response_provider_specific_fields = {}
if "trace" in chunk_data:
trace = chunk_data.get("trace")
@@ -1538,7 +1566,6 @@ class AmazonDeepSeekR1StreamDecoder(AWSEventStreamDecoder):
model: str,
sync_stream: bool,
) -> None:
-
super().__init__(model=model)
from litellm.llms.bedrock.chat.invoke_transformations.amazon_deepseek_transformation import (
AmazonDeepseekR1ResponseIterator,
diff --git a/litellm/llms/bedrock/chat/invoke_transformations/amazon_cohere_transformation.py b/litellm/llms/bedrock/chat/invoke_transformations/amazon_cohere_transformation.py
index e9479c8f32..9c2c95e6ce 100644
--- a/litellm/llms/bedrock/chat/invoke_transformations/amazon_cohere_transformation.py
+++ b/litellm/llms/bedrock/chat/invoke_transformations/amazon_cohere_transformation.py
@@ -1,13 +1,13 @@
import types
from typing import List, Optional
-from litellm.llms.base_llm.chat.transformation import BaseConfig
from litellm.llms.bedrock.chat.invoke_transformations.base_invoke_transformation import (
AmazonInvokeConfig,
)
+from litellm.llms.cohere.chat.transformation import CohereChatConfig
-class AmazonCohereConfig(AmazonInvokeConfig, BaseConfig):
+class AmazonCohereConfig(AmazonInvokeConfig, CohereChatConfig):
"""
Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=command
@@ -19,7 +19,6 @@ class AmazonCohereConfig(AmazonInvokeConfig, BaseConfig):
"""
max_tokens: Optional[int] = None
- temperature: Optional[float] = None
return_likelihood: Optional[str] = None
def __init__(
@@ -55,11 +54,10 @@ class AmazonCohereConfig(AmazonInvokeConfig, BaseConfig):
}
def get_supported_openai_params(self, model: str) -> List[str]:
- return [
- "max_tokens",
- "temperature",
- "stream",
- ]
+ supported_params = CohereChatConfig.get_supported_openai_params(
+ self, model=model
+ )
+ return supported_params
def map_openai_params(
self,
@@ -68,11 +66,10 @@ class AmazonCohereConfig(AmazonInvokeConfig, BaseConfig):
model: str,
drop_params: bool,
) -> dict:
- for k, v in non_default_params.items():
- if k == "stream":
- optional_params["stream"] = v
- if k == "temperature":
- optional_params["temperature"] = v
- if k == "max_tokens":
- optional_params["max_tokens"] = v
- return optional_params
+ return CohereChatConfig.map_openai_params(
+ self,
+ non_default_params=non_default_params,
+ optional_params=optional_params,
+ model=model,
+ drop_params=drop_params,
+ )
diff --git a/litellm/llms/bedrock/chat/invoke_transformations/amazon_nova_transformation.py b/litellm/llms/bedrock/chat/invoke_transformations/amazon_nova_transformation.py
index 9d41beceff..a81d55f0ad 100644
--- a/litellm/llms/bedrock/chat/invoke_transformations/amazon_nova_transformation.py
+++ b/litellm/llms/bedrock/chat/invoke_transformations/amazon_nova_transformation.py
@@ -6,14 +6,21 @@ Inherits from `AmazonConverseConfig`
Nova + Invoke API Tutorial: https://docs.aws.amazon.com/nova/latest/userguide/using-invoke-api.html
"""
-from typing import List
+from typing import Any, List, Optional
+
+import httpx
import litellm
+from litellm.litellm_core_utils.litellm_logging import Logging
from litellm.types.llms.bedrock import BedrockInvokeNovaRequest
from litellm.types.llms.openai import AllMessageValues
+from litellm.types.utils import ModelResponse
+
+from ..converse_transformation import AmazonConverseConfig
+from .base_invoke_transformation import AmazonInvokeConfig
-class AmazonInvokeNovaConfig(litellm.AmazonConverseConfig):
+class AmazonInvokeNovaConfig(AmazonInvokeConfig, AmazonConverseConfig):
"""
Config for sending `nova` requests to `/bedrock/invoke/`
"""
@@ -21,6 +28,20 @@ class AmazonInvokeNovaConfig(litellm.AmazonConverseConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
+ def get_supported_openai_params(self, model: str) -> list:
+ return AmazonConverseConfig.get_supported_openai_params(self, model)
+
+ def map_openai_params(
+ self,
+ non_default_params: dict,
+ optional_params: dict,
+ model: str,
+ drop_params: bool,
+ ) -> dict:
+ return AmazonConverseConfig.map_openai_params(
+ self, non_default_params, optional_params, model, drop_params
+ )
+
def transform_request(
self,
model: str,
@@ -29,7 +50,8 @@ class AmazonInvokeNovaConfig(litellm.AmazonConverseConfig):
litellm_params: dict,
headers: dict,
) -> dict:
- _transformed_nova_request = super().transform_request(
+ _transformed_nova_request = AmazonConverseConfig.transform_request(
+ self,
model=model,
messages=messages,
optional_params=optional_params,
@@ -45,6 +67,35 @@ class AmazonInvokeNovaConfig(litellm.AmazonConverseConfig):
)
return bedrock_invoke_nova_request
+ def transform_response(
+ self,
+ model: str,
+ raw_response: httpx.Response,
+ model_response: ModelResponse,
+ logging_obj: Logging,
+ request_data: dict,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ encoding: Any,
+ api_key: Optional[str] = None,
+ json_mode: Optional[bool] = None,
+ ) -> litellm.ModelResponse:
+ return AmazonConverseConfig.transform_response(
+ self,
+ model,
+ raw_response,
+ model_response,
+ logging_obj,
+ request_data,
+ messages,
+ optional_params,
+ litellm_params,
+ encoding,
+ api_key,
+ json_mode,
+ )
+
def _filter_allowed_fields(
self, bedrock_invoke_nova_request: BedrockInvokeNovaRequest
) -> dict:
diff --git a/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py b/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py
index 133eb659df..67194e83e7 100644
--- a/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py
+++ b/litellm/llms/bedrock/chat/invoke_transformations/base_invoke_transformation.py
@@ -74,6 +74,7 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -225,9 +226,9 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM):
): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in
inference_params[k] = v
if stream is True:
- inference_params["stream"] = (
- True # cohere requires stream = True in inference params
- )
+ inference_params[
+ "stream"
+ ] = True # cohere requires stream = True in inference params
request_data = {"prompt": prompt, **inference_params}
elif provider == "anthropic":
return litellm.AmazonAnthropicClaude3Config().transform_request(
@@ -311,7 +312,6 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM):
api_key: Optional[str] = None,
json_mode: Optional[bool] = None,
) -> ModelResponse:
-
try:
completion_response = raw_response.json()
except Exception:
@@ -442,6 +442,7 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/bedrock/common_utils.py b/litellm/llms/bedrock/common_utils.py
index 54be359897..69a249b842 100644
--- a/litellm/llms/bedrock/common_utils.py
+++ b/litellm/llms/bedrock/common_utils.py
@@ -44,7 +44,18 @@ class AmazonBedrockGlobalConfig:
)
def get_ap_regions(self) -> List[str]:
- return ["ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1"]
+ """
+ Source: https://www.aws-services.info/bedrock.html
+ """
+ return [
+ "ap-northeast-1", # Asia Pacific (Tokyo)
+ "ap-northeast-2", # Asia Pacific (Seoul)
+ "ap-northeast-3", # Asia Pacific (Osaka)
+ "ap-south-1", # Asia Pacific (Mumbai)
+ "ap-south-2", # Asia Pacific (Hyderabad)
+ "ap-southeast-1", # Asia Pacific (Singapore)
+ "ap-southeast-2", # Asia Pacific (Sydney)
+ ]
def get_sa_regions(self) -> List[str]:
return ["sa-east-1"]
@@ -54,10 +65,14 @@ class AmazonBedrockGlobalConfig:
Source: https://www.aws-services.info/bedrock.html
"""
return [
- "eu-west-1",
- "eu-west-2",
- "eu-west-3",
- "eu-central-1",
+ "eu-west-1", # Europe (Ireland)
+ "eu-west-2", # Europe (London)
+ "eu-west-3", # Europe (Paris)
+ "eu-central-1", # Europe (Frankfurt)
+ "eu-central-2", # Europe (Zurich)
+ "eu-south-1", # Europe (Milan)
+ "eu-south-2", # Europe (Spain)
+ "eu-north-1", # Europe (Stockholm)
]
def get_ca_regions(self) -> List[str]:
@@ -68,11 +83,12 @@ class AmazonBedrockGlobalConfig:
Source: https://www.aws-services.info/bedrock.html
"""
return [
- "us-east-2",
- "us-east-1",
- "us-west-1",
- "us-west-2",
- "us-gov-west-1",
+ "us-east-1", # US East (N. Virginia)
+ "us-east-2", # US East (Ohio)
+ "us-west-1", # US West (N. California)
+ "us-west-2", # US West (Oregon)
+ "us-gov-east-1", # AWS GovCloud (US-East)
+ "us-gov-west-1", # AWS GovCloud (US-West)
]
@@ -314,7 +330,6 @@ def get_bedrock_tool_name(response_tool_name: str) -> str:
class BedrockModelInfo(BaseLLMModelInfo):
-
global_config = AmazonBedrockGlobalConfig()
all_global_regions = global_config.get_all_regions()
@@ -336,13 +351,7 @@ class BedrockModelInfo(BaseLLMModelInfo):
return model
@staticmethod
- def get_base_model(model: str) -> str:
- """
- Get the base model from the given model name.
-
- Handle model names like - "us.meta.llama3-2-11b-instruct-v1:0" -> "meta.llama3-2-11b-instruct-v1"
- AND "meta.llama3-2-11b-instruct-v1:0" -> "meta.llama3-2-11b-instruct-v1"
- """
+ def get_non_litellm_routing_model_name(model: str) -> str:
if model.startswith("bedrock/"):
model = model.split("/", 1)[1]
@@ -352,6 +361,18 @@ class BedrockModelInfo(BaseLLMModelInfo):
if model.startswith("invoke/"):
model = model.split("/", 1)[1]
+ return model
+
+ @staticmethod
+ def get_base_model(model: str) -> str:
+ """
+ Get the base model from the given model name.
+
+ Handle model names like - "us.meta.llama3-2-11b-instruct-v1:0" -> "meta.llama3-2-11b-instruct-v1"
+ AND "meta.llama3-2-11b-instruct-v1:0" -> "meta.llama3-2-11b-instruct-v1"
+ """
+
+ model = BedrockModelInfo.get_non_litellm_routing_model_name(model=model)
model = BedrockModelInfo.extract_model_name_from_arn(model)
potential_region = model.split(".", 1)[0]
@@ -386,12 +407,16 @@ class BedrockModelInfo(BaseLLMModelInfo):
Get the bedrock route for the given model.
"""
base_model = BedrockModelInfo.get_base_model(model)
+ alt_model = BedrockModelInfo.get_non_litellm_routing_model_name(model=model)
if "invoke/" in model:
return "invoke"
elif "converse_like" in model:
return "converse_like"
elif "converse/" in model:
return "converse"
- elif base_model in litellm.bedrock_converse_models:
+ elif (
+ base_model in litellm.bedrock_converse_models
+ or alt_model in litellm.bedrock_converse_models
+ ):
return "converse"
return "invoke"
diff --git a/litellm/llms/bedrock/embed/amazon_titan_multimodal_transformation.py b/litellm/llms/bedrock/embed/amazon_titan_multimodal_transformation.py
index 6c1147f24a..338029adc3 100644
--- a/litellm/llms/bedrock/embed/amazon_titan_multimodal_transformation.py
+++ b/litellm/llms/bedrock/embed/amazon_titan_multimodal_transformation.py
@@ -33,9 +33,9 @@ class AmazonTitanMultimodalEmbeddingG1Config:
) -> dict:
for k, v in non_default_params.items():
if k == "dimensions":
- optional_params["embeddingConfig"] = (
- AmazonTitanMultimodalEmbeddingConfig(outputEmbeddingLength=v)
- )
+ optional_params[
+ "embeddingConfig"
+ ] = AmazonTitanMultimodalEmbeddingConfig(outputEmbeddingLength=v)
return optional_params
def _transform_request(
@@ -58,7 +58,6 @@ class AmazonTitanMultimodalEmbeddingG1Config:
def _transform_response(
self, response_list: List[dict], model: str
) -> EmbeddingResponse:
-
total_prompt_tokens = 0
transformed_responses: List[Embedding] = []
for index, response in enumerate(response_list):
diff --git a/litellm/llms/bedrock/image/amazon_nova_canvas_transformation.py b/litellm/llms/bedrock/image/amazon_nova_canvas_transformation.py
index de46edb923..b331dd1b1d 100644
--- a/litellm/llms/bedrock/image/amazon_nova_canvas_transformation.py
+++ b/litellm/llms/bedrock/image/amazon_nova_canvas_transformation.py
@@ -1,11 +1,16 @@
import types
-from typing import List, Optional
+from typing import Any, Dict, List, Optional
from openai.types.image import Image
from litellm.types.llms.bedrock import (
- AmazonNovaCanvasTextToImageRequest, AmazonNovaCanvasTextToImageResponse,
- AmazonNovaCanvasTextToImageParams, AmazonNovaCanvasRequestBase,
+ AmazonNovaCanvasColorGuidedGenerationParams,
+ AmazonNovaCanvasColorGuidedRequest,
+ AmazonNovaCanvasImageGenerationConfig,
+ AmazonNovaCanvasRequestBase,
+ AmazonNovaCanvasTextToImageParams,
+ AmazonNovaCanvasTextToImageRequest,
+ AmazonNovaCanvasTextToImageResponse,
)
from litellm.types.utils import ImageResponse
@@ -22,7 +27,7 @@ class AmazonNovaCanvasConfig:
k: v
for k, v in cls.__dict__.items()
if not k.startswith("__")
- and not isinstance(
+ and not isinstance(
v,
(
types.FunctionType,
@@ -31,13 +36,12 @@ class AmazonNovaCanvasConfig:
staticmethod,
),
)
- and v is not None
+ and v is not None
}
@classmethod
def get_supported_openai_params(cls, model: Optional[str] = None) -> List:
- """
- """
+ """ """
return ["n", "size", "quality"]
@classmethod
@@ -55,7 +59,7 @@ class AmazonNovaCanvasConfig:
@classmethod
def transform_request_body(
- cls, text: str, optional_params: dict
+ cls, text: str, optional_params: dict
) -> AmazonNovaCanvasRequestBase:
"""
Transform the request body for Amazon Nova Canvas model
@@ -64,11 +68,64 @@ class AmazonNovaCanvasConfig:
image_generation_config = optional_params.pop("imageGenerationConfig", {})
image_generation_config = {**image_generation_config, **optional_params}
if task_type == "TEXT_IMAGE":
- text_to_image_params = image_generation_config.pop("textToImageParams", {})
- text_to_image_params = {"text" :text, **text_to_image_params}
- text_to_image_params = AmazonNovaCanvasTextToImageParams(**text_to_image_params)
- return AmazonNovaCanvasTextToImageRequest(textToImageParams=text_to_image_params, taskType=task_type,
- imageGenerationConfig=image_generation_config)
+ text_to_image_params: Dict[str, Any] = image_generation_config.pop(
+ "textToImageParams", {}
+ )
+ text_to_image_params = {"text": text, **text_to_image_params}
+ try:
+ text_to_image_params_typed = AmazonNovaCanvasTextToImageParams(
+ **text_to_image_params # type: ignore
+ )
+ except Exception as e:
+ raise ValueError(
+ f"Error transforming text to image params: {e}. Got params: {text_to_image_params}, Expected params: {AmazonNovaCanvasTextToImageParams.__annotations__}"
+ )
+
+ try:
+ image_generation_config_typed = AmazonNovaCanvasImageGenerationConfig(
+ **image_generation_config
+ )
+ except Exception as e:
+ raise ValueError(
+ f"Error transforming image generation config: {e}. Got params: {image_generation_config}, Expected params: {AmazonNovaCanvasImageGenerationConfig.__annotations__}"
+ )
+
+ return AmazonNovaCanvasTextToImageRequest(
+ textToImageParams=text_to_image_params_typed,
+ taskType=task_type,
+ imageGenerationConfig=image_generation_config_typed,
+ )
+ if task_type == "COLOR_GUIDED_GENERATION":
+ color_guided_generation_params: Dict[
+ str, Any
+ ] = image_generation_config.pop("colorGuidedGenerationParams", {})
+ color_guided_generation_params = {
+ "text": text,
+ **color_guided_generation_params,
+ }
+ try:
+ color_guided_generation_params_typed = AmazonNovaCanvasColorGuidedGenerationParams(
+ **color_guided_generation_params # type: ignore
+ )
+ except Exception as e:
+ raise ValueError(
+ f"Error transforming color guided generation params: {e}. Got params: {color_guided_generation_params}, Expected params: {AmazonNovaCanvasColorGuidedGenerationParams.__annotations__}"
+ )
+
+ try:
+ image_generation_config_typed = AmazonNovaCanvasImageGenerationConfig(
+ **image_generation_config
+ )
+ except Exception as e:
+ raise ValueError(
+ f"Error transforming image generation config: {e}. Got params: {image_generation_config}, Expected params: {AmazonNovaCanvasImageGenerationConfig.__annotations__}"
+ )
+
+ return AmazonNovaCanvasColorGuidedRequest(
+ taskType=task_type,
+ colorGuidedGenerationParams=color_guided_generation_params_typed,
+ imageGenerationConfig=image_generation_config_typed,
+ )
raise NotImplementedError(f"Task type {task_type} is not supported")
@classmethod
@@ -79,7 +136,9 @@ class AmazonNovaCanvasConfig:
_size = non_default_params.get("size")
if _size is not None:
width, height = _size.split("x")
- optional_params["width"], optional_params["height"] = int(width), int(height)
+ optional_params["width"], optional_params["height"] = int(width), int(
+ height
+ )
if non_default_params.get("n") is not None:
optional_params["numberOfImages"] = non_default_params.get("n")
if non_default_params.get("quality") is not None:
@@ -91,7 +150,7 @@ class AmazonNovaCanvasConfig:
@classmethod
def transform_response_dict_to_openai_response(
- cls, model_response: ImageResponse, response_dict: dict
+ cls, model_response: ImageResponse, response_dict: dict
) -> ImageResponse:
"""
Transform the response dict to the OpenAI response
diff --git a/litellm/llms/bedrock/image/image_handler.py b/litellm/llms/bedrock/image/image_handler.py
index 8f7762e547..27258aa20f 100644
--- a/litellm/llms/bedrock/image/image_handler.py
+++ b/litellm/llms/bedrock/image/image_handler.py
@@ -267,7 +267,11 @@ class BedrockImageGeneration(BaseAWSLLM):
**inference_params,
}
elif provider == "amazon":
- return dict(litellm.AmazonNovaCanvasConfig.transform_request_body(text=prompt, optional_params=optional_params))
+ return dict(
+ litellm.AmazonNovaCanvasConfig.transform_request_body(
+ text=prompt, optional_params=optional_params
+ )
+ )
else:
raise BedrockError(
status_code=422, message=f"Unsupported model={model}, passed in"
@@ -303,8 +307,11 @@ class BedrockImageGeneration(BaseAWSLLM):
config_class = (
litellm.AmazonStability3Config
if litellm.AmazonStability3Config._is_stability_3_model(model=model)
- else litellm.AmazonNovaCanvasConfig if litellm.AmazonNovaCanvasConfig._is_nova_model(model=model)
- else litellm.AmazonStabilityConfig
+ else (
+ litellm.AmazonNovaCanvasConfig
+ if litellm.AmazonNovaCanvasConfig._is_nova_model(model=model)
+ else litellm.AmazonStabilityConfig
+ )
)
config_class.transform_response_dict_to_openai_response(
model_response=model_response,
diff --git a/litellm/llms/bedrock/rerank/handler.py b/litellm/llms/bedrock/rerank/handler.py
index cd8be6912c..f5a532bec1 100644
--- a/litellm/llms/bedrock/rerank/handler.py
+++ b/litellm/llms/bedrock/rerank/handler.py
@@ -60,7 +60,6 @@ class BedrockRerankHandler(BaseAWSLLM):
extra_headers: Optional[dict] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
) -> RerankResponse:
-
request_data = RerankRequest(
model=model,
query=query,
diff --git a/litellm/llms/bedrock/rerank/transformation.py b/litellm/llms/bedrock/rerank/transformation.py
index a5380febe9..be8250a967 100644
--- a/litellm/llms/bedrock/rerank/transformation.py
+++ b/litellm/llms/bedrock/rerank/transformation.py
@@ -29,7 +29,6 @@ from litellm.types.rerank import (
class BedrockRerankConfig:
-
def _transform_sources(
self, documents: List[Union[str, dict]]
) -> List[BedrockRerankSource]:
diff --git a/litellm/llms/clarifai/chat/transformation.py b/litellm/llms/clarifai/chat/transformation.py
index 916da73883..73be89fc6e 100644
--- a/litellm/llms/clarifai/chat/transformation.py
+++ b/litellm/llms/clarifai/chat/transformation.py
@@ -118,6 +118,7 @@ class ClarifaiConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/cloudflare/chat/transformation.py b/litellm/llms/cloudflare/chat/transformation.py
index 83c7483df9..9e59782bf7 100644
--- a/litellm/llms/cloudflare/chat/transformation.py
+++ b/litellm/llms/cloudflare/chat/transformation.py
@@ -60,6 +60,7 @@ class CloudflareChatConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
@@ -77,6 +78,7 @@ class CloudflareChatConfig(BaseConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
diff --git a/litellm/llms/codestral/completion/handler.py b/litellm/llms/codestral/completion/handler.py
index fc6d2886a9..555f7fccfb 100644
--- a/litellm/llms/codestral/completion/handler.py
+++ b/litellm/llms/codestral/completion/handler.py
@@ -314,7 +314,6 @@ class CodestralTextCompletion:
return _response
### SYNC COMPLETION
else:
-
response = litellm.module_level_client.post(
url=completion_url,
headers=headers,
@@ -352,13 +351,11 @@ class CodestralTextCompletion:
logger_fn=None,
headers={},
) -> TextCompletionResponse:
-
async_handler = get_async_httpx_client(
llm_provider=litellm.LlmProviders.TEXT_COMPLETION_CODESTRAL,
params={"timeout": timeout},
)
try:
-
response = await async_handler.post(
api_base, headers=headers, data=json.dumps(data)
)
diff --git a/litellm/llms/codestral/completion/transformation.py b/litellm/llms/codestral/completion/transformation.py
index 5955e91deb..fc7b6f5dbb 100644
--- a/litellm/llms/codestral/completion/transformation.py
+++ b/litellm/llms/codestral/completion/transformation.py
@@ -78,7 +78,6 @@ class CodestralTextCompletionConfig(OpenAITextCompletionConfig):
return optional_params
def _chunk_parser(self, chunk_data: str) -> GenericStreamingChunk:
-
text = ""
is_finished = False
finish_reason = None
diff --git a/litellm/llms/cohere/chat/transformation.py b/litellm/llms/cohere/chat/transformation.py
index 3ceec2dbba..5dd44aca80 100644
--- a/litellm/llms/cohere/chat/transformation.py
+++ b/litellm/llms/cohere/chat/transformation.py
@@ -54,7 +54,8 @@ class CohereChatConfig(BaseConfig):
search_queries_only (bool, optional): When true, the response will only contain a list of generated search queries.
documents (List[Dict[str, str]], optional): A list of relevant documents that the model can cite.
temperature (float, optional): A non-negative float that tunes the degree of randomness in generation.
- max_tokens (int, optional): The maximum number of tokens the model will generate as part of the response.
+ max_tokens [DEPRECATED - use max_completion_tokens] (int, optional): The maximum number of tokens the model will generate as part of the response.
+ max_completion_tokens (int, optional): The maximum number of tokens the model will generate as part of the response.
k (int, optional): Ensures only the top k most likely tokens are considered for generation at each step.
p (float, optional): Ensures that only the most likely tokens, with total probability mass of p, are considered for generation.
frequency_penalty (float, optional): Used to reduce repetitiveness of generated tokens.
@@ -75,6 +76,7 @@ class CohereChatConfig(BaseConfig):
documents: Optional[list] = None
temperature: Optional[int] = None
max_tokens: Optional[int] = None
+ max_completion_tokens: Optional[int] = None
k: Optional[int] = None
p: Optional[int] = None
frequency_penalty: Optional[int] = None
@@ -96,6 +98,7 @@ class CohereChatConfig(BaseConfig):
documents: Optional[list] = None,
temperature: Optional[int] = None,
max_tokens: Optional[int] = None,
+ max_completion_tokens: Optional[int] = None,
k: Optional[int] = None,
p: Optional[int] = None,
frequency_penalty: Optional[int] = None,
@@ -115,6 +118,7 @@ class CohereChatConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
@@ -131,6 +135,7 @@ class CohereChatConfig(BaseConfig):
"stream",
"temperature",
"max_tokens",
+ "max_completion_tokens",
"top_p",
"frequency_penalty",
"presence_penalty",
@@ -156,6 +161,8 @@ class CohereChatConfig(BaseConfig):
optional_params["temperature"] = value
if param == "max_tokens":
optional_params["max_tokens"] = value
+ if param == "max_completion_tokens":
+ optional_params["max_tokens"] = value
if param == "n":
optional_params["num_generations"] = value
if param == "top_p":
@@ -180,7 +187,6 @@ class CohereChatConfig(BaseConfig):
litellm_params: dict,
headers: dict,
) -> dict:
-
## Load Config
for k, v in litellm.CohereChatConfig.get_config().items():
if (
@@ -222,7 +228,6 @@ class CohereChatConfig(BaseConfig):
api_key: Optional[str] = None,
json_mode: Optional[bool] = None,
) -> ModelResponse:
-
try:
raw_response_json = raw_response.json()
model_response.choices[0].message.content = raw_response_json["text"] # type: ignore
diff --git a/litellm/llms/cohere/chat/v2_transformation.py b/litellm/llms/cohere/chat/v2_transformation.py
new file mode 100644
index 0000000000..76948e7f8b
--- /dev/null
+++ b/litellm/llms/cohere/chat/v2_transformation.py
@@ -0,0 +1,356 @@
+import time
+from typing import TYPE_CHECKING, Any, AsyncIterator, Iterator, List, Optional, Union
+
+import httpx
+
+import litellm
+from litellm.litellm_core_utils.prompt_templates.factory import cohere_messages_pt_v2
+from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMException
+from litellm.types.llms.cohere import CohereV2ChatResponse
+from litellm.types.llms.openai import AllMessageValues, ChatCompletionToolCallChunk
+from litellm.types.utils import ModelResponse, Usage
+
+from ..common_utils import CohereError
+from ..common_utils import ModelResponseIterator as CohereModelResponseIterator
+from ..common_utils import validate_environment as cohere_validate_environment
+
+if TYPE_CHECKING:
+ from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+ LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+ LiteLLMLoggingObj = Any
+
+
+class CohereV2ChatConfig(BaseConfig):
+ """
+ Configuration class for Cohere's API interface.
+
+ Args:
+ preamble (str, optional): When specified, the default Cohere preamble will be replaced with the provided one.
+ chat_history (List[Dict[str, str]], optional): A list of previous messages between the user and the model.
+ generation_id (str, optional): Unique identifier for the generated reply.
+ response_id (str, optional): Unique identifier for the response.
+ conversation_id (str, optional): An alternative to chat_history, creates or resumes a persisted conversation.
+ prompt_truncation (str, optional): Dictates how the prompt will be constructed. Options: 'AUTO', 'AUTO_PRESERVE_ORDER', 'OFF'.
+ connectors (List[Dict[str, str]], optional): List of connectors (e.g., web-search) to enrich the model's reply.
+ search_queries_only (bool, optional): When true, the response will only contain a list of generated search queries.
+ documents (List[Dict[str, str]], optional): A list of relevant documents that the model can cite.
+ temperature (float, optional): A non-negative float that tunes the degree of randomness in generation.
+ max_tokens (int, optional): The maximum number of tokens the model will generate as part of the response.
+ k (int, optional): Ensures only the top k most likely tokens are considered for generation at each step.
+ p (float, optional): Ensures that only the most likely tokens, with total probability mass of p, are considered for generation.
+ frequency_penalty (float, optional): Used to reduce repetitiveness of generated tokens.
+ presence_penalty (float, optional): Used to reduce repetitiveness of generated tokens.
+ tools (List[Dict[str, str]], optional): A list of available tools (functions) that the model may suggest invoking.
+ tool_results (List[Dict[str, Any]], optional): A list of results from invoking tools.
+ seed (int, optional): A seed to assist reproducibility of the model's response.
+ """
+
+ preamble: Optional[str] = None
+ chat_history: Optional[list] = None
+ generation_id: Optional[str] = None
+ response_id: Optional[str] = None
+ conversation_id: Optional[str] = None
+ prompt_truncation: Optional[str] = None
+ connectors: Optional[list] = None
+ search_queries_only: Optional[bool] = None
+ documents: Optional[list] = None
+ temperature: Optional[int] = None
+ max_tokens: Optional[int] = None
+ k: Optional[int] = None
+ p: Optional[int] = None
+ frequency_penalty: Optional[int] = None
+ presence_penalty: Optional[int] = None
+ tools: Optional[list] = None
+ tool_results: Optional[list] = None
+ seed: Optional[int] = None
+
+ def __init__(
+ self,
+ preamble: Optional[str] = None,
+ chat_history: Optional[list] = None,
+ generation_id: Optional[str] = None,
+ response_id: Optional[str] = None,
+ conversation_id: Optional[str] = None,
+ prompt_truncation: Optional[str] = None,
+ connectors: Optional[list] = None,
+ search_queries_only: Optional[bool] = None,
+ documents: Optional[list] = None,
+ temperature: Optional[int] = None,
+ max_tokens: Optional[int] = None,
+ k: Optional[int] = None,
+ p: Optional[int] = None,
+ frequency_penalty: Optional[int] = None,
+ presence_penalty: Optional[int] = None,
+ tools: Optional[list] = None,
+ tool_results: Optional[list] = None,
+ seed: Optional[int] = None,
+ ) -> None:
+ locals_ = locals()
+ for key, value in locals_.items():
+ if key != "self" and value is not None:
+ setattr(self.__class__, key, value)
+
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ return cohere_validate_environment(
+ headers=headers,
+ model=model,
+ messages=messages,
+ optional_params=optional_params,
+ api_key=api_key,
+ )
+
+ def get_supported_openai_params(self, model: str) -> List[str]:
+ return [
+ "stream",
+ "temperature",
+ "max_tokens",
+ "top_p",
+ "frequency_penalty",
+ "presence_penalty",
+ "stop",
+ "n",
+ "tools",
+ "tool_choice",
+ "seed",
+ "extra_headers",
+ ]
+
+ def map_openai_params(
+ self,
+ non_default_params: dict,
+ optional_params: dict,
+ model: str,
+ drop_params: bool,
+ ) -> dict:
+ for param, value in non_default_params.items():
+ if param == "stream":
+ optional_params["stream"] = value
+ if param == "temperature":
+ optional_params["temperature"] = value
+ if param == "max_tokens":
+ optional_params["max_tokens"] = value
+ if param == "n":
+ optional_params["num_generations"] = value
+ if param == "top_p":
+ optional_params["p"] = value
+ if param == "frequency_penalty":
+ optional_params["frequency_penalty"] = value
+ if param == "presence_penalty":
+ optional_params["presence_penalty"] = value
+ if param == "stop":
+ optional_params["stop_sequences"] = value
+ if param == "tools":
+ optional_params["tools"] = value
+ if param == "seed":
+ optional_params["seed"] = value
+ return optional_params
+
+ def transform_request(
+ self,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ headers: dict,
+ ) -> dict:
+ ## Load Config
+ for k, v in litellm.CohereChatConfig.get_config().items():
+ if (
+ k not in optional_params
+ ): # completion(top_k=3) > cohere_config(top_k=3) <- allows for dynamic variables to be passed in
+ optional_params[k] = v
+
+ most_recent_message, chat_history = cohere_messages_pt_v2(
+ messages=messages, model=model, llm_provider="cohere_chat"
+ )
+
+ ## Handle Tool Calling
+ if "tools" in optional_params:
+ _is_function_call = True
+ cohere_tools = self._construct_cohere_tool(tools=optional_params["tools"])
+ optional_params["tools"] = cohere_tools
+ if isinstance(most_recent_message, dict):
+ optional_params["tool_results"] = [most_recent_message]
+ elif isinstance(most_recent_message, str):
+ optional_params["message"] = most_recent_message
+
+ ## check if chat history message is 'user' and 'tool_results' is given -> force_single_step=True, else cohere api fails
+ if len(chat_history) > 0 and chat_history[-1]["role"] == "USER":
+ optional_params["force_single_step"] = True
+
+ return optional_params
+
+ def transform_response(
+ self,
+ model: str,
+ raw_response: httpx.Response,
+ model_response: ModelResponse,
+ logging_obj: LiteLLMLoggingObj,
+ request_data: dict,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ encoding: Any,
+ api_key: Optional[str] = None,
+ json_mode: Optional[bool] = None,
+ ) -> ModelResponse:
+ try:
+ raw_response_json = raw_response.json()
+ except Exception:
+ raise CohereError(
+ message=raw_response.text, status_code=raw_response.status_code
+ )
+
+ try:
+ cohere_v2_chat_response = CohereV2ChatResponse(**raw_response_json) # type: ignore
+ except Exception:
+ raise CohereError(message=raw_response.text, status_code=422)
+
+ cohere_content = cohere_v2_chat_response["message"].get("content", None)
+ if cohere_content is not None:
+ model_response.choices[0].message.content = "".join( # type: ignore
+ [
+ content.get("text", "")
+ for content in cohere_content
+ if content is not None
+ ]
+ )
+
+ ## ADD CITATIONS
+ if "citations" in cohere_v2_chat_response:
+ setattr(model_response, "citations", cohere_v2_chat_response["citations"])
+
+ ## Tool calling response
+ cohere_tools_response = cohere_v2_chat_response["message"].get("tool_calls", [])
+ if cohere_tools_response is not None and cohere_tools_response != []:
+ # convert cohere_tools_response to OpenAI response format
+ tool_calls: List[ChatCompletionToolCallChunk] = []
+ for index, tool in enumerate(cohere_tools_response):
+ tool_call: ChatCompletionToolCallChunk = {
+ **tool, # type: ignore
+ "index": index,
+ }
+ tool_calls.append(tool_call)
+ _message = litellm.Message(
+ tool_calls=tool_calls,
+ content=None,
+ )
+ model_response.choices[0].message = _message # type: ignore
+
+ ## CALCULATING USAGE - use cohere `billed_units` for returning usage
+ token_usage = cohere_v2_chat_response["usage"].get("tokens", {})
+ prompt_tokens = token_usage.get("input_tokens", 0)
+ completion_tokens = token_usage.get("output_tokens", 0)
+
+ model_response.created = int(time.time())
+ model_response.model = model
+ usage = Usage(
+ prompt_tokens=prompt_tokens,
+ completion_tokens=completion_tokens,
+ total_tokens=prompt_tokens + completion_tokens,
+ )
+ setattr(model_response, "usage", usage)
+ return model_response
+
+ def _construct_cohere_tool(
+ self,
+ tools: Optional[list] = None,
+ ):
+ if tools is None:
+ tools = []
+ cohere_tools = []
+ for tool in tools:
+ cohere_tool = self._translate_openai_tool_to_cohere(tool)
+ cohere_tools.append(cohere_tool)
+ return cohere_tools
+
+ def _translate_openai_tool_to_cohere(
+ self,
+ openai_tool: dict,
+ ):
+ # cohere tools look like this
+ """
+ {
+ "name": "query_daily_sales_report",
+ "description": "Connects to a database to retrieve overall sales volumes and sales information for a given day.",
+ "parameter_definitions": {
+ "day": {
+ "description": "Retrieves sales data for this day, formatted as YYYY-MM-DD.",
+ "type": "str",
+ "required": True
+ }
+ }
+ }
+ """
+
+ # OpenAI tools look like this
+ """
+ {
+ "type": "function",
+ "function": {
+ "name": "get_current_weather",
+ "description": "Get the current weather in a given location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA",
+ },
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
+ },
+ "required": ["location"],
+ },
+ },
+ }
+ """
+ cohere_tool = {
+ "name": openai_tool["function"]["name"],
+ "description": openai_tool["function"]["description"],
+ "parameter_definitions": {},
+ }
+
+ for param_name, param_def in openai_tool["function"]["parameters"][
+ "properties"
+ ].items():
+ required_params = (
+ openai_tool.get("function", {})
+ .get("parameters", {})
+ .get("required", [])
+ )
+ cohere_param_def = {
+ "description": param_def.get("description", ""),
+ "type": param_def.get("type", ""),
+ "required": param_name in required_params,
+ }
+ cohere_tool["parameter_definitions"][param_name] = cohere_param_def
+
+ return cohere_tool
+
+ def get_model_response_iterator(
+ self,
+ streaming_response: Union[Iterator[str], AsyncIterator[str], ModelResponse],
+ sync_stream: bool,
+ json_mode: Optional[bool] = False,
+ ):
+ return CohereModelResponseIterator(
+ streaming_response=streaming_response,
+ sync_stream=sync_stream,
+ json_mode=json_mode,
+ )
+
+ def get_error_class(
+ self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
+ ) -> BaseLLMException:
+ return CohereError(status_code=status_code, message=error_message)
diff --git a/litellm/llms/cohere/common_utils.py b/litellm/llms/cohere/common_utils.py
index 11ff73efc2..6dbe52d575 100644
--- a/litellm/llms/cohere/common_utils.py
+++ b/litellm/llms/cohere/common_utils.py
@@ -104,19 +104,28 @@ class ModelResponseIterator:
raise RuntimeError(f"Error receiving chunk from stream: {e}")
try:
- str_line = chunk
- if isinstance(chunk, bytes): # Handle binary data
- str_line = chunk.decode("utf-8") # Convert bytes to string
- index = str_line.find("data:")
- if index != -1:
- str_line = str_line[index:]
- data_json = json.loads(str_line)
- return self.chunk_parser(chunk=data_json)
+ return self.convert_str_chunk_to_generic_chunk(chunk=chunk)
except StopIteration:
raise StopIteration
except ValueError as e:
raise RuntimeError(f"Error parsing chunk: {e},\nReceived chunk: {chunk}")
+ def convert_str_chunk_to_generic_chunk(self, chunk: str) -> GenericStreamingChunk:
+ """
+ Convert a string chunk to a GenericStreamingChunk
+
+ Note: This is used for Cohere pass through streaming logging
+ """
+ str_line = chunk
+ if isinstance(chunk, bytes): # Handle binary data
+ str_line = chunk.decode("utf-8") # Convert bytes to string
+ index = str_line.find("data:")
+ if index != -1:
+ str_line = str_line[index:]
+
+ data_json = json.loads(str_line)
+ return self.chunk_parser(chunk=data_json)
+
# Async iterator
def __aiter__(self):
self.async_response_iterator = self.streaming_response.__aiter__()
@@ -131,15 +140,7 @@ class ModelResponseIterator:
raise RuntimeError(f"Error receiving chunk from stream: {e}")
try:
- str_line = chunk
- if isinstance(chunk, bytes): # Handle binary data
- str_line = chunk.decode("utf-8") # Convert bytes to string
- index = str_line.find("data:")
- if index != -1:
- str_line = str_line[index:]
-
- data_json = json.loads(str_line)
- return self.chunk_parser(chunk=data_json)
+ return self.convert_str_chunk_to_generic_chunk(chunk=chunk)
except StopAsyncIteration:
raise StopAsyncIteration
except ValueError as e:
diff --git a/litellm/llms/cohere/completion/transformation.py b/litellm/llms/cohere/completion/transformation.py
index bdfcda020e..f96ef89d3c 100644
--- a/litellm/llms/cohere/completion/transformation.py
+++ b/litellm/llms/cohere/completion/transformation.py
@@ -101,6 +101,7 @@ class CohereTextConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/cohere/embed/handler.py b/litellm/llms/cohere/embed/handler.py
index e7f22ea72a..7a25bf7e54 100644
--- a/litellm/llms/cohere/embed/handler.py
+++ b/litellm/llms/cohere/embed/handler.py
@@ -56,7 +56,6 @@ async def async_embedding(
encoding: Callable,
client: Optional[AsyncHTTPHandler] = None,
):
-
## LOGGING
logging_obj.pre_call(
input=input,
diff --git a/litellm/llms/cohere/embed/transformation.py b/litellm/llms/cohere/embed/transformation.py
index 22e157a0fd..837dd5e006 100644
--- a/litellm/llms/cohere/embed/transformation.py
+++ b/litellm/llms/cohere/embed/transformation.py
@@ -72,7 +72,6 @@ class CohereEmbeddingConfig:
return transformed_request
def _calculate_usage(self, input: List[str], encoding: Any, meta: dict) -> Usage:
-
input_tokens = 0
text_tokens: Optional[int] = meta.get("billed_units", {}).get("input_tokens")
@@ -111,7 +110,6 @@ class CohereEmbeddingConfig:
encoding: Any,
input: list,
) -> EmbeddingResponse:
-
response_json = response.json()
## LOGGING
logging_obj.post_call(
diff --git a/litellm/llms/cohere/rerank/transformation.py b/litellm/llms/cohere/rerank/transformation.py
index f3624d9216..22782c1300 100644
--- a/litellm/llms/cohere/rerank/transformation.py
+++ b/litellm/llms/cohere/rerank/transformation.py
@@ -148,4 +148,4 @@ class CohereRerankConfig(BaseRerankConfig):
def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
) -> BaseLLMException:
- return CohereError(message=error_message, status_code=status_code)
\ No newline at end of file
+ return CohereError(message=error_message, status_code=status_code)
diff --git a/litellm/llms/cohere/rerank_v2/transformation.py b/litellm/llms/cohere/rerank_v2/transformation.py
index a93cb982a7..74e760460d 100644
--- a/litellm/llms/cohere/rerank_v2/transformation.py
+++ b/litellm/llms/cohere/rerank_v2/transformation.py
@@ -3,6 +3,7 @@ from typing import Any, Dict, List, Optional, Union
from litellm.llms.cohere.rerank.transformation import CohereRerankConfig
from litellm.types.rerank import OptionalRerankParams, RerankRequest
+
class CohereRerankV2Config(CohereRerankConfig):
"""
Reference: https://docs.cohere.com/v2/reference/rerank
@@ -77,4 +78,4 @@ class CohereRerankV2Config(CohereRerankConfig):
return_documents=optional_rerank_params.get("return_documents", None),
max_tokens_per_doc=optional_rerank_params.get("max_tokens_per_doc", None),
)
- return rerank_request.model_dump(exclude_none=True)
\ No newline at end of file
+ return rerank_request.model_dump(exclude_none=True)
diff --git a/litellm/llms/custom_httpx/aiohttp_handler.py b/litellm/llms/custom_httpx/aiohttp_handler.py
index c865fee17e..13141fc19a 100644
--- a/litellm/llms/custom_httpx/aiohttp_handler.py
+++ b/litellm/llms/custom_httpx/aiohttp_handler.py
@@ -32,7 +32,6 @@ DEFAULT_TIMEOUT = 600
class BaseLLMAIOHTTPHandler:
-
def __init__(self):
self.client_session: Optional[aiohttp.ClientSession] = None
@@ -110,7 +109,6 @@ class BaseLLMAIOHTTPHandler:
content: Any = None,
params: Optional[dict] = None,
) -> httpx.Response:
-
max_retry_on_unprocessable_entity_error = (
provider_config.max_retry_on_unprocessable_entity_error
)
@@ -220,6 +218,10 @@ class BaseLLMAIOHTTPHandler:
provider_config = ProviderConfigManager.get_provider_chat_config(
model=model, provider=litellm.LlmProviders(custom_llm_provider)
)
+ if provider_config is None:
+ raise ValueError(
+ f"Provider config not found for model: {model} and provider: {custom_llm_provider}"
+ )
# get config from model, custom llm provider
headers = provider_config.validate_environment(
api_key=api_key,
@@ -227,11 +229,13 @@ class BaseLLMAIOHTTPHandler:
model=model,
messages=messages,
optional_params=optional_params,
+ litellm_params=litellm_params,
api_base=api_base,
)
api_base = provider_config.get_complete_url(
api_base=api_base,
+ api_key=api_key,
model=model,
optional_params=optional_params,
litellm_params=litellm_params,
@@ -482,6 +486,7 @@ class BaseLLMAIOHTTPHandler:
api_base = provider_config.get_complete_url(
api_base=api_base,
+ api_key=api_key,
model=model,
optional_params=optional_params,
litellm_params=litellm_params,
@@ -494,6 +499,7 @@ class BaseLLMAIOHTTPHandler:
model=model,
messages=[{"role": "user", "content": "test"}],
optional_params=optional_params,
+ litellm_params=litellm_params,
api_base=api_base,
)
@@ -521,7 +527,6 @@ class BaseLLMAIOHTTPHandler:
data=data,
headers=headers,
model_response=model_response,
- api_key=api_key,
logging_obj=logging_obj,
model=model,
timeout=timeout,
diff --git a/litellm/llms/custom_httpx/http_handler.py b/litellm/llms/custom_httpx/http_handler.py
index 736b85dc53..f99e04ab9d 100644
--- a/litellm/llms/custom_httpx/http_handler.py
+++ b/litellm/llms/custom_httpx/http_handler.py
@@ -1,5 +1,6 @@
import asyncio
import os
+import ssl
import time
from typing import TYPE_CHECKING, Any, Callable, List, Mapping, Optional, Union
@@ -7,6 +8,7 @@ import httpx
from httpx import USE_CLIENT_DEFAULT, AsyncHTTPTransport, HTTPTransport
import litellm
+from litellm.constants import _DEFAULT_TTL_FOR_HTTPX_CLIENTS
from litellm.litellm_core_utils.logging_utils import track_llm_api_timing
from litellm.types.llms.custom_http import *
@@ -30,7 +32,6 @@ headers = {
# https://www.python-httpx.org/advanced/timeouts
_DEFAULT_TIMEOUT = httpx.Timeout(timeout=5.0, connect=5.0)
-_DEFAULT_TTL_FOR_HTTPX_CLIENTS = 3600 # 1 hour, re-use the same httpx client for 1 hour
def mask_sensitive_info(error_message):
@@ -94,7 +95,7 @@ class AsyncHTTPHandler:
event_hooks: Optional[Mapping[str, List[Callable[..., Any]]]] = None,
concurrent_limit=1000,
client_alias: Optional[str] = None, # name for client in logs
- ssl_verify: Optional[Union[bool, str]] = None,
+ ssl_verify: Optional[VerifyTypes] = None,
):
self.timeout = timeout
self.event_hooks = event_hooks
@@ -111,13 +112,32 @@ class AsyncHTTPHandler:
timeout: Optional[Union[float, httpx.Timeout]],
concurrent_limit: int,
event_hooks: Optional[Mapping[str, List[Callable[..., Any]]]],
- ssl_verify: Optional[Union[bool, str]] = None,
+ ssl_verify: Optional[VerifyTypes] = None,
) -> httpx.AsyncClient:
-
# SSL certificates (a.k.a CA bundle) used to verify the identity of requested hosts.
# /path/to/certificate.pem
if ssl_verify is None:
ssl_verify = os.getenv("SSL_VERIFY", litellm.ssl_verify)
+
+ ssl_security_level = os.getenv("SSL_SECURITY_LEVEL")
+
+ # If ssl_verify is not False and we need a lower security level
+ if (
+ not ssl_verify
+ and ssl_security_level
+ and isinstance(ssl_security_level, str)
+ ):
+ # Create a custom SSL context with reduced security level
+ custom_ssl_context = ssl.create_default_context()
+ custom_ssl_context.set_ciphers(ssl_security_level)
+
+ # If ssl_verify is a path to a CA bundle, load it into our custom context
+ if isinstance(ssl_verify, str) and os.path.exists(ssl_verify):
+ custom_ssl_context.load_verify_locations(cafile=ssl_verify)
+
+ # Use our custom SSL context instead of the original ssl_verify value
+ ssl_verify = custom_ssl_context
+
# An SSL certificate used by the requested host to authenticate the client.
# /path/to/client.pem
cert = os.getenv("SSL_CERTIFICATE", litellm.ssl_certificate)
@@ -172,7 +192,7 @@ class AsyncHTTPHandler:
async def post(
self,
url: str,
- data: Optional[Union[dict, str]] = None, # type: ignore
+ data: Optional[Union[dict, str, bytes]] = None, # type: ignore
json: Optional[dict] = None,
params: Optional[dict] = None,
headers: Optional[dict] = None,
@@ -407,7 +427,7 @@ class AsyncHTTPHandler:
self,
url: str,
client: httpx.AsyncClient,
- data: Optional[Union[dict, str]] = None, # type: ignore
+ data: Optional[Union[dict, str, bytes]] = None, # type: ignore
json: Optional[dict] = None,
params: Optional[dict] = None,
headers: Optional[dict] = None,
@@ -507,7 +527,7 @@ class HTTPHandler:
def post(
self,
url: str,
- data: Optional[Union[dict, str]] = None,
+ data: Optional[Union[dict, str, bytes]] = None,
json: Optional[Union[dict, str, List]] = None,
params: Optional[dict] = None,
headers: Optional[dict] = None,
@@ -553,7 +573,6 @@ class HTTPHandler:
setattr(e, "text", error_text)
setattr(e, "status_code", e.response.status_code)
-
raise e
except Exception as e:
raise e
@@ -569,7 +588,6 @@ class HTTPHandler:
timeout: Optional[Union[float, httpx.Timeout]] = None,
):
try:
-
if timeout is not None:
req = self.client.build_request(
"PATCH", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
@@ -588,7 +606,6 @@ class HTTPHandler:
llm_provider="litellm-httpx-handler",
)
except httpx.HTTPStatusError as e:
-
if stream is True:
setattr(e, "message", mask_sensitive_info(e.response.read()))
setattr(e, "text", mask_sensitive_info(e.response.read()))
@@ -614,7 +631,6 @@ class HTTPHandler:
timeout: Optional[Union[float, httpx.Timeout]] = None,
):
try:
-
if timeout is not None:
req = self.client.build_request(
"PUT", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
@@ -634,6 +650,49 @@ class HTTPHandler:
except Exception as e:
raise e
+ def delete(
+ self,
+ url: str,
+ data: Optional[Union[dict, str]] = None, # type: ignore
+ json: Optional[dict] = None,
+ params: Optional[dict] = None,
+ headers: Optional[dict] = None,
+ timeout: Optional[Union[float, httpx.Timeout]] = None,
+ stream: bool = False,
+ ):
+ try:
+ if timeout is not None:
+ req = self.client.build_request(
+ "DELETE", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
+ )
+ else:
+ req = self.client.build_request(
+ "DELETE", url, data=data, json=json, params=params, headers=headers # type: ignore
+ )
+ response = self.client.send(req, stream=stream)
+ response.raise_for_status()
+ return response
+ except httpx.TimeoutException:
+ raise litellm.Timeout(
+ message=f"Connection timed out after {timeout} seconds.",
+ model="default-model-name",
+ llm_provider="litellm-httpx-handler",
+ )
+ except httpx.HTTPStatusError as e:
+ if stream is True:
+ setattr(e, "message", mask_sensitive_info(e.response.read()))
+ setattr(e, "text", mask_sensitive_info(e.response.read()))
+ else:
+ error_text = mask_sensitive_info(e.response.text)
+ setattr(e, "message", error_text)
+ setattr(e, "text", error_text)
+
+ setattr(e, "status_code", e.response.status_code)
+
+ raise e
+ except Exception as e:
+ raise e
+
def __del__(self) -> None:
try:
self.close()
diff --git a/litellm/llms/custom_httpx/llm_http_handler.py b/litellm/llms/custom_httpx/llm_http_handler.py
index 01fe36acda..1958ef0b60 100644
--- a/litellm/llms/custom_httpx/llm_http_handler.py
+++ b/litellm/llms/custom_httpx/llm_http_handler.py
@@ -1,4 +1,3 @@
-import io
import json
from typing import TYPE_CHECKING, Any, Coroutine, Dict, Optional, Tuple, Union
@@ -8,8 +7,14 @@ import litellm
import litellm.litellm_core_utils
import litellm.types
import litellm.types.utils
+from litellm._logging import verbose_logger
+from litellm.llms.base_llm.audio_transcription.transformation import (
+ BaseAudioTranscriptionConfig,
+)
+from litellm.llms.base_llm.base_model_iterator import MockResponseIterator
from litellm.llms.base_llm.chat.transformation import BaseConfig
from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig
+from litellm.llms.base_llm.files.transformation import BaseFilesConfig
from litellm.llms.base_llm.rerank.transformation import BaseRerankConfig
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.llms.custom_httpx.http_handler import (
@@ -20,11 +25,18 @@ from litellm.llms.custom_httpx.http_handler import (
)
from litellm.responses.streaming_iterator import (
BaseResponsesAPIStreamingIterator,
+ MockResponsesAPIStreamingIterator,
ResponsesAPIStreamingIterator,
SyncResponsesAPIStreamingIterator,
)
-from litellm.types.llms.openai import ResponseInputParam, ResponsesAPIResponse
+from litellm.types.llms.openai import (
+ CreateFileRequest,
+ OpenAIFileObject,
+ ResponseInputParam,
+ ResponsesAPIResponse,
+)
from litellm.types.rerank import OptionalRerankParams, RerankResponse
+from litellm.types.responses.main import DeleteResponseResult
from litellm.types.router import GenericLiteLLMParams
from litellm.types.utils import EmbeddingResponse, FileTypes, TranscriptionResponse
from litellm.utils import CustomStreamWrapper, ModelResponse, ProviderConfigManager
@@ -38,7 +50,6 @@ else:
class BaseLLMHTTPHandler:
-
async def _make_common_async_call(
self,
async_httpx_client: AsyncHTTPHandler,
@@ -106,7 +117,6 @@ class BaseLLMHTTPHandler:
logging_obj: LiteLLMLoggingObj,
stream: bool = False,
) -> httpx.Response:
-
max_retry_on_unprocessable_entity_error = (
provider_config.max_retry_on_unprocessable_entity_error
)
@@ -220,12 +230,22 @@ class BaseLLMHTTPHandler:
api_key: Optional[str] = None,
headers: Optional[dict] = {},
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
+ provider_config: Optional[BaseConfig] = None,
):
json_mode: bool = optional_params.pop("json_mode", False)
+ extra_body: Optional[dict] = optional_params.pop("extra_body", None)
+ fake_stream = fake_stream or optional_params.pop("fake_stream", False)
- provider_config = ProviderConfigManager.get_provider_chat_config(
- model=model, provider=litellm.LlmProviders(custom_llm_provider)
+ provider_config = (
+ provider_config
+ or ProviderConfigManager.get_provider_chat_config(
+ model=model, provider=litellm.LlmProviders(custom_llm_provider)
+ )
)
+ if provider_config is None:
+ raise ValueError(
+ f"Provider config not found for model: {model} and provider: {custom_llm_provider}"
+ )
# get config from model, custom llm provider
headers = provider_config.validate_environment(
@@ -235,10 +255,12 @@ class BaseLLMHTTPHandler:
messages=messages,
optional_params=optional_params,
api_base=api_base,
+ litellm_params=litellm_params,
)
api_base = provider_config.get_complete_url(
api_base=api_base,
+ api_key=api_key,
model=model,
optional_params=optional_params,
stream=stream,
@@ -253,6 +275,9 @@ class BaseLLMHTTPHandler:
headers=headers,
)
+ if extra_body is not None:
+ data = {**data, **extra_body}
+
headers = provider_config.sign_request(
headers=headers,
optional_params=optional_params,
@@ -299,6 +324,7 @@ class BaseLLMHTTPHandler:
),
litellm_params=litellm_params,
json_mode=json_mode,
+ optional_params=optional_params,
)
else:
@@ -359,6 +385,8 @@ class BaseLLMHTTPHandler:
else None
),
litellm_params=litellm_params,
+ json_mode=json_mode,
+ optional_params=optional_params,
)
return CustomStreamWrapper(
completion_stream=completion_stream,
@@ -407,10 +435,12 @@ class BaseLLMHTTPHandler:
model: str,
messages: list,
logging_obj,
+ optional_params: dict,
litellm_params: dict,
timeout: Union[float, httpx.Timeout],
fake_stream: bool = False,
client: Optional[HTTPHandler] = None,
+ json_mode: bool = False,
) -> Tuple[Any, dict]:
if client is None or not isinstance(client, HTTPHandler):
sync_httpx_client = _get_httpx_client(
@@ -437,12 +467,27 @@ class BaseLLMHTTPHandler:
)
if fake_stream is True:
- completion_stream = provider_config.get_model_response_iterator(
- streaming_response=response.json(), sync_stream=True
+ model_response: ModelResponse = provider_config.transform_response(
+ model=model,
+ raw_response=response,
+ model_response=litellm.ModelResponse(),
+ logging_obj=logging_obj,
+ request_data=data,
+ messages=messages,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
+ encoding=None,
+ json_mode=json_mode,
+ )
+
+ completion_stream: Any = MockResponseIterator(
+ model_response=model_response, json_mode=json_mode
)
else:
completion_stream = provider_config.get_model_response_iterator(
- streaming_response=response.iter_lines(), sync_stream=True
+ streaming_response=response.iter_lines(),
+ sync_stream=True,
+ json_mode=json_mode,
)
# LOGGING
@@ -467,6 +512,7 @@ class BaseLLMHTTPHandler:
logging_obj: LiteLLMLoggingObj,
data: dict,
litellm_params: dict,
+ optional_params: dict,
fake_stream: bool = False,
client: Optional[AsyncHTTPHandler] = None,
json_mode: Optional[bool] = None,
@@ -485,6 +531,7 @@ class BaseLLMHTTPHandler:
)
completion_stream, _response_headers = await self.make_async_call_stream_helper(
+ model=model,
custom_llm_provider=custom_llm_provider,
provider_config=provider_config,
api_base=api_base,
@@ -496,6 +543,8 @@ class BaseLLMHTTPHandler:
fake_stream=fake_stream,
client=client,
litellm_params=litellm_params,
+ optional_params=optional_params,
+ json_mode=json_mode,
)
streamwrapper = CustomStreamWrapper(
completion_stream=completion_stream,
@@ -507,6 +556,7 @@ class BaseLLMHTTPHandler:
async def make_async_call_stream_helper(
self,
+ model: str,
custom_llm_provider: str,
provider_config: BaseConfig,
api_base: str,
@@ -516,8 +566,10 @@ class BaseLLMHTTPHandler:
logging_obj: LiteLLMLoggingObj,
timeout: Union[float, httpx.Timeout],
litellm_params: dict,
+ optional_params: dict,
fake_stream: bool = False,
client: Optional[AsyncHTTPHandler] = None,
+ json_mode: Optional[bool] = None,
) -> Tuple[Any, httpx.Headers]:
"""
Helper function for making an async call with stream.
@@ -548,8 +600,21 @@ class BaseLLMHTTPHandler:
)
if fake_stream is True:
- completion_stream = provider_config.get_model_response_iterator(
- streaming_response=response.json(), sync_stream=False
+ model_response: ModelResponse = provider_config.transform_response(
+ model=model,
+ raw_response=response,
+ model_response=litellm.ModelResponse(),
+ logging_obj=logging_obj,
+ request_data=data,
+ messages=messages,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
+ encoding=None,
+ json_mode=json_mode,
+ )
+
+ completion_stream: Any = MockResponseIterator(
+ model_response=model_response, json_mode=json_mode
)
else:
completion_stream = provider_config.get_model_response_iterator(
@@ -574,8 +639,12 @@ class BaseLLMHTTPHandler:
"""
Some providers like Bedrock invoke do not support the stream parameter in the request body, we only pass `stream` in the request body the provider supports it.
"""
+
if fake_stream is True:
- return data
+ # remove 'stream' from data
+ new_data = data.copy()
+ new_data.pop("stream", None)
+ return new_data
if provider_config.supports_stream_param_in_request_body is True:
data["stream"] = True
return data
@@ -596,7 +665,6 @@ class BaseLLMHTTPHandler:
aembedding: bool = False,
headers={},
) -> EmbeddingResponse:
-
provider_config = ProviderConfigManager.get_provider_embedding_config(
model=model, provider=litellm.LlmProviders(custom_llm_provider)
)
@@ -607,10 +675,12 @@ class BaseLLMHTTPHandler:
model=model,
messages=[],
optional_params=optional_params,
+ litellm_params=litellm_params,
)
api_base = provider_config.get_complete_url(
api_base=api_base,
+ api_key=api_key,
model=model,
optional_params=optional_params,
litellm_params=litellm_params,
@@ -739,7 +809,6 @@ class BaseLLMHTTPHandler:
api_base: Optional[str] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
) -> RerankResponse:
-
# get config from model, custom llm provider
headers = provider_config.validate_environment(
api_key=api_key,
@@ -825,7 +894,6 @@ class BaseLLMHTTPHandler:
timeout: Optional[Union[float, httpx.Timeout]] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
) -> RerankResponse:
-
if client is None or not isinstance(client, AsyncHTTPHandler):
async_httpx_client = get_async_httpx_client(
llm_provider=litellm.LlmProviders(custom_llm_provider)
@@ -851,54 +919,12 @@ class BaseLLMHTTPHandler:
request_data=request_data,
)
- def handle_audio_file(self, audio_file: FileTypes) -> bytes:
- """
- Processes the audio file input based on its type and returns the binary data.
-
- Args:
- audio_file: Can be a file path (str), a tuple (filename, file_content), or binary data (bytes).
-
- Returns:
- The binary data of the audio file.
- """
- binary_data: bytes # Explicitly declare the type
-
- # Handle the audio file based on type
- if isinstance(audio_file, str):
- # If it's a file path
- with open(audio_file, "rb") as f:
- binary_data = f.read() # `f.read()` always returns `bytes`
- elif isinstance(audio_file, tuple):
- # Handle tuple case
- _, file_content = audio_file[:2]
- if isinstance(file_content, str):
- with open(file_content, "rb") as f:
- binary_data = f.read() # `f.read()` always returns `bytes`
- elif isinstance(file_content, bytes):
- binary_data = file_content
- else:
- raise TypeError(
- f"Unexpected type in tuple: {type(file_content)}. Expected str or bytes."
- )
- elif isinstance(audio_file, bytes):
- # Assume it's already binary data
- binary_data = audio_file
- elif isinstance(audio_file, io.BufferedReader) or isinstance(
- audio_file, io.BytesIO
- ):
- # Handle file-like objects
- binary_data = audio_file.read()
-
- else:
- raise TypeError(f"Unsupported type for audio_file: {type(audio_file)}")
-
- return binary_data
-
def audio_transcriptions(
self,
model: str,
audio_file: FileTypes,
optional_params: dict,
+ litellm_params: dict,
model_response: TranscriptionResponse,
timeout: float,
max_retries: int,
@@ -909,11 +935,8 @@ class BaseLLMHTTPHandler:
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
atranscription: bool = False,
headers: dict = {},
- litellm_params: dict = {},
+ provider_config: Optional[BaseAudioTranscriptionConfig] = None,
) -> TranscriptionResponse:
- provider_config = ProviderConfigManager.get_provider_audio_transcription_config(
- model=model, provider=litellm.LlmProviders(custom_llm_provider)
- )
if provider_config is None:
raise ValueError(
f"No provider config found for model: {model} and provider: {custom_llm_provider}"
@@ -924,6 +947,7 @@ class BaseLLMHTTPHandler:
model=model,
messages=[],
optional_params=optional_params,
+ litellm_params=litellm_params,
)
if client is None or not isinstance(client, HTTPHandler):
@@ -931,13 +955,25 @@ class BaseLLMHTTPHandler:
complete_url = provider_config.get_complete_url(
api_base=api_base,
+ api_key=api_key,
model=model,
optional_params=optional_params,
litellm_params=litellm_params,
)
# Handle the audio file based on type
- binary_data = self.handle_audio_file(audio_file)
+ data = provider_config.transform_audio_transcription_request(
+ model=model,
+ audio_file=audio_file,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
+ )
+ binary_data: Optional[bytes] = None
+ json_data: Optional[dict] = None
+ if isinstance(data, bytes):
+ binary_data = data
+ else:
+ json_data = data
try:
# Make the POST request
@@ -945,6 +981,7 @@ class BaseLLMHTTPHandler:
url=complete_url,
headers=headers,
content=binary_data,
+ json=json_data,
timeout=timeout,
)
except Exception as e:
@@ -978,6 +1015,8 @@ class BaseLLMHTTPHandler:
timeout: Optional[Union[float, httpx.Timeout]] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
_is_async: bool = False,
+ fake_stream: bool = False,
+ litellm_metadata: Optional[Dict[str, Any]] = None,
) -> Union[
ResponsesAPIResponse,
BaseResponsesAPIStreamingIterator,
@@ -1003,6 +1042,8 @@ class BaseLLMHTTPHandler:
extra_body=extra_body,
timeout=timeout,
client=client if isinstance(client, AsyncHTTPHandler) else None,
+ fake_stream=fake_stream,
+ litellm_metadata=litellm_metadata,
)
if client is None or not isinstance(client, HTTPHandler):
@@ -1021,9 +1062,12 @@ class BaseLLMHTTPHandler:
if extra_headers:
headers.update(extra_headers)
+ # Check if streaming is requested
+ stream = response_api_optional_request_params.get("stream", False)
+
api_base = responses_api_provider_config.get_complete_url(
api_base=litellm_params.api_base,
- model=model,
+ litellm_params=dict(litellm_params),
)
data = responses_api_provider_config.transform_responses_api_request(
@@ -1045,26 +1089,40 @@ class BaseLLMHTTPHandler:
},
)
- # Check if streaming is requested
- stream = response_api_optional_request_params.get("stream", False)
-
try:
if stream:
# For streaming, use stream=True in the request
+ if fake_stream is True:
+ stream, data = self._prepare_fake_stream_request(
+ stream=stream,
+ data=data,
+ fake_stream=fake_stream,
+ )
response = sync_httpx_client.post(
url=api_base,
headers=headers,
data=json.dumps(data),
timeout=timeout
or response_api_optional_request_params.get("timeout"),
- stream=True,
+ stream=stream,
)
+ if fake_stream is True:
+ return MockResponsesAPIStreamingIterator(
+ response=response,
+ model=model,
+ logging_obj=logging_obj,
+ responses_api_provider_config=responses_api_provider_config,
+ litellm_metadata=litellm_metadata,
+ custom_llm_provider=custom_llm_provider,
+ )
return SyncResponsesAPIStreamingIterator(
response=response,
model=model,
logging_obj=logging_obj,
responses_api_provider_config=responses_api_provider_config,
+ litellm_metadata=litellm_metadata,
+ custom_llm_provider=custom_llm_provider,
)
else:
# For non-streaming requests
@@ -1100,6 +1158,8 @@ class BaseLLMHTTPHandler:
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
+ fake_stream: bool = False,
+ litellm_metadata: Optional[Dict[str, Any]] = None,
) -> Union[ResponsesAPIResponse, BaseResponsesAPIStreamingIterator]:
"""
Async version of the responses API handler.
@@ -1122,9 +1182,12 @@ class BaseLLMHTTPHandler:
if extra_headers:
headers.update(extra_headers)
+ # Check if streaming is requested
+ stream = response_api_optional_request_params.get("stream", False)
+
api_base = responses_api_provider_config.get_complete_url(
api_base=litellm_params.api_base,
- model=model,
+ litellm_params=dict(litellm_params),
)
data = responses_api_provider_config.transform_responses_api_request(
@@ -1146,27 +1209,43 @@ class BaseLLMHTTPHandler:
},
)
- # Check if streaming is requested
- stream = response_api_optional_request_params.get("stream", False)
-
try:
if stream:
# For streaming, we need to use stream=True in the request
+ if fake_stream is True:
+ stream, data = self._prepare_fake_stream_request(
+ stream=stream,
+ data=data,
+ fake_stream=fake_stream,
+ )
+
response = await async_httpx_client.post(
url=api_base,
headers=headers,
data=json.dumps(data),
timeout=timeout
or response_api_optional_request_params.get("timeout"),
- stream=True,
+ stream=stream,
)
+ if fake_stream is True:
+ return MockResponsesAPIStreamingIterator(
+ response=response,
+ model=model,
+ logging_obj=logging_obj,
+ responses_api_provider_config=responses_api_provider_config,
+ litellm_metadata=litellm_metadata,
+ custom_llm_provider=custom_llm_provider,
+ )
+
# Return the streaming iterator
return ResponsesAPIStreamingIterator(
response=response,
model=model,
logging_obj=logging_obj,
responses_api_provider_config=responses_api_provider_config,
+ litellm_metadata=litellm_metadata,
+ custom_llm_provider=custom_llm_provider,
)
else:
# For non-streaming, proceed as before
@@ -1177,6 +1256,7 @@ class BaseLLMHTTPHandler:
timeout=timeout
or response_api_optional_request_params.get("timeout"),
)
+
except Exception as e:
raise self._handle_error(
e=e,
@@ -1189,6 +1269,382 @@ class BaseLLMHTTPHandler:
logging_obj=logging_obj,
)
+ async def async_delete_response_api_handler(
+ self,
+ response_id: str,
+ responses_api_provider_config: BaseResponsesAPIConfig,
+ litellm_params: GenericLiteLLMParams,
+ logging_obj: LiteLLMLoggingObj,
+ custom_llm_provider: Optional[str],
+ extra_headers: Optional[Dict[str, Any]] = None,
+ extra_body: Optional[Dict[str, Any]] = None,
+ timeout: Optional[Union[float, httpx.Timeout]] = None,
+ client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
+ _is_async: bool = False,
+ ) -> DeleteResponseResult:
+ """
+ Async version of the delete response API handler.
+ Uses async HTTP client to make requests.
+ """
+ if client is None or not isinstance(client, AsyncHTTPHandler):
+ async_httpx_client = get_async_httpx_client(
+ llm_provider=litellm.LlmProviders(custom_llm_provider),
+ params={"ssl_verify": litellm_params.get("ssl_verify", None)},
+ )
+ else:
+ async_httpx_client = client
+
+ headers = responses_api_provider_config.validate_environment(
+ api_key=litellm_params.api_key,
+ headers=extra_headers or {},
+ model="None",
+ )
+
+ if extra_headers:
+ headers.update(extra_headers)
+
+ api_base = responses_api_provider_config.get_complete_url(
+ api_base=litellm_params.api_base,
+ litellm_params=dict(litellm_params),
+ )
+
+ url, data = responses_api_provider_config.transform_delete_response_api_request(
+ response_id=response_id,
+ api_base=api_base,
+ litellm_params=litellm_params,
+ headers=headers,
+ )
+
+ ## LOGGING
+ logging_obj.pre_call(
+ input=input,
+ api_key="",
+ additional_args={
+ "complete_input_dict": data,
+ "api_base": api_base,
+ "headers": headers,
+ },
+ )
+
+ try:
+ response = await async_httpx_client.delete(
+ url=url, headers=headers, data=json.dumps(data), timeout=timeout
+ )
+
+ except Exception as e:
+ raise self._handle_error(
+ e=e,
+ provider_config=responses_api_provider_config,
+ )
+
+ return responses_api_provider_config.transform_delete_response_api_response(
+ raw_response=response,
+ logging_obj=logging_obj,
+ )
+
+ def delete_response_api_handler(
+ self,
+ response_id: str,
+ responses_api_provider_config: BaseResponsesAPIConfig,
+ litellm_params: GenericLiteLLMParams,
+ logging_obj: LiteLLMLoggingObj,
+ custom_llm_provider: Optional[str],
+ extra_headers: Optional[Dict[str, Any]] = None,
+ extra_body: Optional[Dict[str, Any]] = None,
+ timeout: Optional[Union[float, httpx.Timeout]] = None,
+ client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
+ _is_async: bool = False,
+ ) -> Union[DeleteResponseResult, Coroutine[Any, Any, DeleteResponseResult]]:
+ """
+ Async version of the responses API handler.
+ Uses async HTTP client to make requests.
+ """
+ if _is_async:
+ return self.async_delete_response_api_handler(
+ response_id=response_id,
+ responses_api_provider_config=responses_api_provider_config,
+ litellm_params=litellm_params,
+ logging_obj=logging_obj,
+ custom_llm_provider=custom_llm_provider,
+ extra_headers=extra_headers,
+ extra_body=extra_body,
+ timeout=timeout,
+ client=client,
+ )
+ if client is None or not isinstance(client, HTTPHandler):
+ sync_httpx_client = _get_httpx_client(
+ params={"ssl_verify": litellm_params.get("ssl_verify", None)}
+ )
+ else:
+ sync_httpx_client = client
+
+ headers = responses_api_provider_config.validate_environment(
+ api_key=litellm_params.api_key,
+ headers=extra_headers or {},
+ model="None",
+ )
+
+ if extra_headers:
+ headers.update(extra_headers)
+
+ api_base = responses_api_provider_config.get_complete_url(
+ api_base=litellm_params.api_base,
+ litellm_params=dict(litellm_params),
+ )
+
+ url, data = responses_api_provider_config.transform_delete_response_api_request(
+ response_id=response_id,
+ api_base=api_base,
+ litellm_params=litellm_params,
+ headers=headers,
+ )
+
+ ## LOGGING
+ logging_obj.pre_call(
+ input=input,
+ api_key="",
+ additional_args={
+ "complete_input_dict": data,
+ "api_base": api_base,
+ "headers": headers,
+ },
+ )
+
+ try:
+ response = sync_httpx_client.delete(
+ url=url, headers=headers, data=json.dumps(data), timeout=timeout
+ )
+
+ except Exception as e:
+ raise self._handle_error(
+ e=e,
+ provider_config=responses_api_provider_config,
+ )
+
+ return responses_api_provider_config.transform_delete_response_api_response(
+ raw_response=response,
+ logging_obj=logging_obj,
+ )
+
+ def create_file(
+ self,
+ create_file_data: CreateFileRequest,
+ litellm_params: dict,
+ provider_config: BaseFilesConfig,
+ headers: dict,
+ api_base: Optional[str],
+ api_key: Optional[str],
+ logging_obj: LiteLLMLoggingObj,
+ _is_async: bool = False,
+ client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
+ timeout: Optional[Union[float, httpx.Timeout]] = None,
+ ) -> Union[OpenAIFileObject, Coroutine[Any, Any, OpenAIFileObject]]:
+ """
+ Creates a file using Gemini's two-step upload process
+ """
+ # get config from model, custom llm provider
+ headers = provider_config.validate_environment(
+ api_key=api_key,
+ headers=headers,
+ model="",
+ messages=[],
+ optional_params={},
+ litellm_params=litellm_params,
+ )
+
+ api_base = provider_config.get_complete_file_url(
+ api_base=api_base,
+ api_key=api_key,
+ model="",
+ optional_params={},
+ litellm_params=litellm_params,
+ data=create_file_data,
+ )
+ if api_base is None:
+ raise ValueError("api_base is required for create_file")
+
+ # Get the transformed request data for both steps
+ transformed_request = provider_config.transform_create_file_request(
+ model="",
+ create_file_data=create_file_data,
+ litellm_params=litellm_params,
+ optional_params={},
+ )
+
+ if _is_async:
+ return self.async_create_file(
+ transformed_request=transformed_request,
+ litellm_params=litellm_params,
+ provider_config=provider_config,
+ headers=headers,
+ api_base=api_base,
+ logging_obj=logging_obj,
+ client=client,
+ timeout=timeout,
+ )
+
+ if client is None or not isinstance(client, HTTPHandler):
+ sync_httpx_client = _get_httpx_client()
+ else:
+ sync_httpx_client = client
+
+ if isinstance(transformed_request, str) or isinstance(
+ transformed_request, bytes
+ ):
+ upload_response = sync_httpx_client.post(
+ url=api_base,
+ headers=headers,
+ data=transformed_request,
+ timeout=timeout,
+ )
+ else:
+ try:
+ # Step 1: Initial request to get upload URL
+ initial_response = sync_httpx_client.post(
+ url=api_base,
+ headers={
+ **headers,
+ **transformed_request["initial_request"]["headers"],
+ },
+ data=json.dumps(transformed_request["initial_request"]["data"]),
+ timeout=timeout,
+ )
+
+ # Extract upload URL from response headers
+ upload_url = initial_response.headers.get("X-Goog-Upload-URL")
+
+ if not upload_url:
+ raise ValueError("Failed to get upload URL from initial request")
+
+ # Step 2: Upload the actual file
+ upload_response = sync_httpx_client.post(
+ url=upload_url,
+ headers=transformed_request["upload_request"]["headers"],
+ data=transformed_request["upload_request"]["data"],
+ timeout=timeout,
+ )
+ except Exception as e:
+ raise self._handle_error(
+ e=e,
+ provider_config=provider_config,
+ )
+
+ return provider_config.transform_create_file_response(
+ model=None,
+ raw_response=upload_response,
+ logging_obj=logging_obj,
+ litellm_params=litellm_params,
+ )
+
+ async def async_create_file(
+ self,
+ transformed_request: Union[bytes, str, dict],
+ litellm_params: dict,
+ provider_config: BaseFilesConfig,
+ headers: dict,
+ api_base: str,
+ logging_obj: LiteLLMLoggingObj,
+ client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
+ timeout: Optional[Union[float, httpx.Timeout]] = None,
+ ):
+ """
+ Creates a file using Gemini's two-step upload process
+ """
+ if client is None or not isinstance(client, AsyncHTTPHandler):
+ async_httpx_client = get_async_httpx_client(
+ llm_provider=provider_config.custom_llm_provider
+ )
+ else:
+ async_httpx_client = client
+
+ if isinstance(transformed_request, str) or isinstance(
+ transformed_request, bytes
+ ):
+ upload_response = await async_httpx_client.post(
+ url=api_base,
+ headers=headers,
+ data=transformed_request,
+ timeout=timeout,
+ )
+ else:
+ try:
+ # Step 1: Initial request to get upload URL
+ initial_response = await async_httpx_client.post(
+ url=api_base,
+ headers={
+ **headers,
+ **transformed_request["initial_request"]["headers"],
+ },
+ data=json.dumps(transformed_request["initial_request"]["data"]),
+ timeout=timeout,
+ )
+
+ # Extract upload URL from response headers
+ upload_url = initial_response.headers.get("X-Goog-Upload-URL")
+
+ if not upload_url:
+ raise ValueError("Failed to get upload URL from initial request")
+
+ # Step 2: Upload the actual file
+ upload_response = await async_httpx_client.post(
+ url=upload_url,
+ headers=transformed_request["upload_request"]["headers"],
+ data=transformed_request["upload_request"]["data"],
+ timeout=timeout,
+ )
+ except Exception as e:
+ verbose_logger.exception(f"Error creating file: {e}")
+ raise self._handle_error(
+ e=e,
+ provider_config=provider_config,
+ )
+
+ return provider_config.transform_create_file_response(
+ model=None,
+ raw_response=upload_response,
+ logging_obj=logging_obj,
+ litellm_params=litellm_params,
+ )
+
+ def list_files(self):
+ """
+ Lists all files
+ """
+ pass
+
+ def delete_file(self):
+ """
+ Deletes a file
+ """
+ pass
+
+ def retrieve_file(self):
+ """
+ Returns the metadata of the file
+ """
+ pass
+
+ def retrieve_file_content(self):
+ """
+ Returns the content of the file
+ """
+ pass
+
+ def _prepare_fake_stream_request(
+ self,
+ stream: bool,
+ data: dict,
+ fake_stream: bool,
+ ) -> Tuple[bool, dict]:
+ """
+ Handles preparing a request when `fake_stream` is True.
+ """
+ if fake_stream is True:
+ stream = False
+ data.pop("stream", None)
+ return stream, data
+ return stream, data
+
def _handle_error(
self,
e: Exception,
diff --git a/litellm/llms/databricks/chat/handler.py b/litellm/llms/databricks/chat/handler.py
deleted file mode 100644
index abb714746c..0000000000
--- a/litellm/llms/databricks/chat/handler.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""
-Handles the chat completion request for Databricks
-"""
-
-from typing import Callable, List, Optional, Union, cast
-
-from httpx._config import Timeout
-
-from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
-from litellm.types.llms.openai import AllMessageValues
-from litellm.types.utils import CustomStreamingDecoder
-from litellm.utils import ModelResponse
-
-from ...openai_like.chat.handler import OpenAILikeChatHandler
-from ..common_utils import DatabricksBase
-from .transformation import DatabricksConfig
-
-
-class DatabricksChatCompletion(OpenAILikeChatHandler, DatabricksBase):
- def __init__(self, **kwargs):
- super().__init__(**kwargs)
-
- def completion(
- self,
- *,
- model: str,
- messages: list,
- api_base: str,
- custom_llm_provider: str,
- custom_prompt_dict: dict,
- model_response: ModelResponse,
- print_verbose: Callable,
- encoding,
- api_key: Optional[str],
- logging_obj,
- optional_params: dict,
- acompletion=None,
- litellm_params=None,
- logger_fn=None,
- headers: Optional[dict] = None,
- timeout: Optional[Union[float, Timeout]] = None,
- client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
- custom_endpoint: Optional[bool] = None,
- streaming_decoder: Optional[CustomStreamingDecoder] = None,
- fake_stream: bool = False,
- ):
- messages = DatabricksConfig()._transform_messages(
- messages=cast(List[AllMessageValues], messages), model=model
- )
- api_base, headers = self.databricks_validate_environment(
- api_base=api_base,
- api_key=api_key,
- endpoint_type="chat_completions",
- custom_endpoint=custom_endpoint,
- headers=headers,
- )
-
- if optional_params.get("stream") is True:
- fake_stream = DatabricksConfig()._should_fake_stream(optional_params)
- else:
- fake_stream = False
-
- return super().completion(
- model=model,
- messages=messages,
- api_base=api_base,
- custom_llm_provider=custom_llm_provider,
- custom_prompt_dict=custom_prompt_dict,
- model_response=model_response,
- print_verbose=print_verbose,
- encoding=encoding,
- api_key=api_key,
- logging_obj=logging_obj,
- optional_params=optional_params,
- acompletion=acompletion,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- headers=headers,
- timeout=timeout,
- client=client,
- custom_endpoint=True,
- streaming_decoder=streaming_decoder,
- fake_stream=fake_stream,
- )
diff --git a/litellm/llms/databricks/chat/transformation.py b/litellm/llms/databricks/chat/transformation.py
index 94e0203459..7eb3d82963 100644
--- a/litellm/llms/databricks/chat/transformation.py
+++ b/litellm/llms/databricks/chat/transformation.py
@@ -2,21 +2,69 @@
Translates from OpenAI's `/v1/chat/completions` to Databricks' `/chat/completions`
"""
-from typing import List, Optional, Union
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ AsyncIterator,
+ Iterator,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ cast,
+)
+import httpx
from pydantic import BaseModel
+from litellm.constants import RESPONSE_FORMAT_TOOL_NAME
+from litellm.litellm_core_utils.llm_response_utils.convert_dict_to_response import (
+ _handle_invalid_parallel_tool_calls,
+ _should_convert_tool_call_to_json_mode,
+)
from litellm.litellm_core_utils.prompt_templates.common_utils import (
handle_messages_with_content_list_to_str_conversion,
strip_name_from_messages,
)
-from litellm.types.llms.openai import AllMessageValues
-from litellm.types.utils import ProviderField
+from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator
+from litellm.types.llms.anthropic import AllAnthropicToolsValues
+from litellm.types.llms.databricks import (
+ AllDatabricksContentValues,
+ DatabricksChoice,
+ DatabricksFunction,
+ DatabricksResponse,
+ DatabricksTool,
+)
+from litellm.types.llms.openai import (
+ AllMessageValues,
+ ChatCompletionRedactedThinkingBlock,
+ ChatCompletionThinkingBlock,
+ ChatCompletionToolChoiceFunctionParam,
+ ChatCompletionToolChoiceObjectParam,
+)
+from litellm.types.utils import (
+ ChatCompletionMessageToolCall,
+ Choices,
+ Message,
+ ModelResponse,
+ ModelResponseStream,
+ ProviderField,
+ Usage,
+)
+from ...anthropic.chat.transformation import AnthropicConfig
from ...openai_like.chat.transformation import OpenAILikeChatConfig
+from ..common_utils import DatabricksBase, DatabricksException
+
+if TYPE_CHECKING:
+ from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
+
+ LiteLLMLoggingObj = _LiteLLMLoggingObj
+else:
+ LiteLLMLoggingObj = Any
-class DatabricksConfig(OpenAILikeChatConfig):
+class DatabricksConfig(DatabricksBase, OpenAILikeChatConfig, AnthropicConfig):
"""
Reference: https://docs.databricks.com/en/machine-learning/foundation-models/api-reference.html#chat-request
"""
@@ -63,6 +111,40 @@ class DatabricksConfig(OpenAILikeChatConfig):
),
]
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ api_base, headers = self.databricks_validate_environment(
+ api_base=api_base,
+ api_key=api_key,
+ endpoint_type="chat_completions",
+ custom_endpoint=False,
+ headers=headers,
+ )
+ # Ensure Content-Type header is set
+ headers["Content-Type"] = "application/json"
+ return headers
+
+ def get_complete_url(
+ self,
+ api_base: Optional[str],
+ api_key: Optional[str],
+ model: str,
+ optional_params: dict,
+ litellm_params: dict,
+ stream: Optional[bool] = None,
+ ) -> str:
+ api_base = self._get_api_base(api_base)
+ complete_url = f"{api_base}/chat/completions"
+ return complete_url
+
def get_supported_openai_params(self, model: Optional[str] = None) -> list:
return [
"stream",
@@ -75,8 +157,116 @@ class DatabricksConfig(OpenAILikeChatConfig):
"response_format",
"tools",
"tool_choice",
+ "reasoning_effort",
+ "thinking",
]
+ def convert_anthropic_tool_to_databricks_tool(
+ self, tool: Optional[AllAnthropicToolsValues]
+ ) -> Optional[DatabricksTool]:
+ if tool is None:
+ return None
+
+ return DatabricksTool(
+ type="function",
+ function=DatabricksFunction(
+ name=tool["name"],
+ parameters=cast(dict, tool.get("input_schema") or {}),
+ ),
+ )
+
+ def _map_openai_to_dbrx_tool(self, model: str, tools: List) -> List[DatabricksTool]:
+ # if not claude, send as is
+ if "claude" not in model:
+ return tools
+
+ # if claude, convert to anthropic tool and then to databricks tool
+ anthropic_tools = self._map_tools(tools=tools)
+ databricks_tools = [
+ cast(DatabricksTool, self.convert_anthropic_tool_to_databricks_tool(tool))
+ for tool in anthropic_tools
+ ]
+ return databricks_tools
+
+ def map_response_format_to_databricks_tool(
+ self,
+ model: str,
+ value: Optional[dict],
+ optional_params: dict,
+ is_thinking_enabled: bool,
+ ) -> Optional[DatabricksTool]:
+ if value is None:
+ return None
+
+ tool = self.map_response_format_to_anthropic_tool(
+ value, optional_params, is_thinking_enabled
+ )
+
+ databricks_tool = self.convert_anthropic_tool_to_databricks_tool(tool)
+ return databricks_tool
+
+ def map_openai_params(
+ self,
+ non_default_params: dict,
+ optional_params: dict,
+ model: str,
+ drop_params: bool,
+ replace_max_completion_tokens_with_max_tokens: bool = True,
+ ) -> dict:
+ is_thinking_enabled = self.is_thinking_enabled(non_default_params)
+ mapped_params = super().map_openai_params(
+ non_default_params, optional_params, model, drop_params
+ )
+ if "tools" in mapped_params:
+ mapped_params["tools"] = self._map_openai_to_dbrx_tool(
+ model=model, tools=mapped_params["tools"]
+ )
+ if (
+ "max_completion_tokens" in non_default_params
+ and replace_max_completion_tokens_with_max_tokens
+ ):
+ mapped_params["max_tokens"] = non_default_params[
+ "max_completion_tokens"
+ ] # most openai-compatible providers support 'max_tokens' not 'max_completion_tokens'
+ mapped_params.pop("max_completion_tokens", None)
+
+ if "response_format" in non_default_params and "claude" in model:
+ _tool = self.map_response_format_to_databricks_tool(
+ model,
+ non_default_params["response_format"],
+ mapped_params,
+ is_thinking_enabled,
+ )
+
+ if _tool is not None:
+ self._add_tools_to_optional_params(
+ optional_params=optional_params, tools=[_tool]
+ )
+ optional_params["json_mode"] = True
+ if not is_thinking_enabled:
+ _tool_choice = ChatCompletionToolChoiceObjectParam(
+ type="function",
+ function=ChatCompletionToolChoiceFunctionParam(
+ name=RESPONSE_FORMAT_TOOL_NAME
+ ),
+ )
+ optional_params["tool_choice"] = _tool_choice
+ optional_params.pop(
+ "response_format", None
+ ) # unsupported for claude models - if json_schema -> convert to tool call
+
+ if "reasoning_effort" in non_default_params and "claude" in model:
+ optional_params["thinking"] = AnthropicConfig._map_reasoning_effort(
+ non_default_params.get("reasoning_effort")
+ )
+ optional_params.pop("reasoning_effort", None)
+ ## handle thinking tokens
+ self.update_optional_params_with_thinking_tokens(
+ non_default_params=non_default_params, optional_params=mapped_params
+ )
+
+ return mapped_params
+
def _should_fake_stream(self, optional_params: dict) -> bool:
"""
Databricks doesn't support 'response_format' while streaming
@@ -104,3 +294,273 @@ class DatabricksConfig(OpenAILikeChatConfig):
new_messages = handle_messages_with_content_list_to_str_conversion(new_messages)
new_messages = strip_name_from_messages(new_messages)
return super()._transform_messages(messages=new_messages, model=model)
+
+ @staticmethod
+ def extract_content_str(
+ content: Optional[AllDatabricksContentValues],
+ ) -> Optional[str]:
+ if content is None:
+ return None
+ if isinstance(content, str):
+ return content
+ elif isinstance(content, list):
+ content_str = ""
+ for item in content:
+ if item["type"] == "text":
+ content_str += item["text"]
+ return content_str
+ else:
+ raise Exception(f"Unsupported content type: {type(content)}")
+
+ @staticmethod
+ def extract_reasoning_content(
+ content: Optional[AllDatabricksContentValues],
+ ) -> Tuple[
+ Optional[str],
+ Optional[
+ List[
+ Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]
+ ]
+ ],
+ ]:
+ """
+ Extract and return the reasoning content and thinking blocks
+ """
+ if content is None:
+ return None, None
+ thinking_blocks: Optional[
+ List[
+ Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]
+ ]
+ ] = None
+ reasoning_content: Optional[str] = None
+ if isinstance(content, list):
+ for item in content:
+ if item["type"] == "reasoning":
+ for sum in item["summary"]:
+ if reasoning_content is None:
+ reasoning_content = ""
+ reasoning_content += sum["text"]
+ thinking_block = ChatCompletionThinkingBlock(
+ type="thinking",
+ thinking=sum["text"],
+ signature=sum["signature"],
+ )
+ if thinking_blocks is None:
+ thinking_blocks = []
+ thinking_blocks.append(thinking_block)
+ return reasoning_content, thinking_blocks
+
+ def _transform_choices(
+ self, choices: List[DatabricksChoice], json_mode: Optional[bool] = None
+ ) -> List[Choices]:
+ transformed_choices = []
+
+ for choice in choices:
+ ## HANDLE JSON MODE - anthropic returns single function call]
+ tool_calls = choice["message"].get("tool_calls", None)
+ if tool_calls is not None:
+ _openai_tool_calls = []
+ for _tc in tool_calls:
+ _openai_tc = ChatCompletionMessageToolCall(**_tc) # type: ignore
+ _openai_tool_calls.append(_openai_tc)
+ fixed_tool_calls = _handle_invalid_parallel_tool_calls(
+ _openai_tool_calls
+ )
+
+ if fixed_tool_calls is not None:
+ tool_calls = fixed_tool_calls
+
+ translated_message: Optional[Message] = None
+ finish_reason: Optional[str] = None
+ if tool_calls and _should_convert_tool_call_to_json_mode(
+ tool_calls=tool_calls,
+ convert_tool_call_to_json_mode=json_mode,
+ ):
+ # to support response_format on claude models
+ json_mode_content_str: Optional[str] = (
+ str(tool_calls[0]["function"].get("arguments", "")) or None
+ )
+ if json_mode_content_str is not None:
+ translated_message = Message(content=json_mode_content_str)
+ finish_reason = "stop"
+
+ if translated_message is None:
+ ## get the content str
+ content_str = DatabricksConfig.extract_content_str(
+ choice["message"]["content"]
+ )
+
+ ## get the reasoning content
+ (
+ reasoning_content,
+ thinking_blocks,
+ ) = DatabricksConfig.extract_reasoning_content(
+ choice["message"].get("content")
+ )
+
+ translated_message = Message(
+ role="assistant",
+ content=content_str,
+ reasoning_content=reasoning_content,
+ thinking_blocks=thinking_blocks,
+ tool_calls=choice["message"].get("tool_calls"),
+ )
+
+ if finish_reason is None:
+ finish_reason = choice["finish_reason"]
+
+ translated_choice = Choices(
+ finish_reason=finish_reason,
+ index=choice["index"],
+ message=translated_message,
+ logprobs=None,
+ enhancements=None,
+ )
+
+ transformed_choices.append(translated_choice)
+
+ return transformed_choices
+
+ def transform_response(
+ self,
+ model: str,
+ raw_response: httpx.Response,
+ model_response: ModelResponse,
+ logging_obj: LiteLLMLoggingObj,
+ request_data: dict,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ encoding: Any,
+ api_key: Optional[str] = None,
+ json_mode: Optional[bool] = None,
+ ) -> ModelResponse:
+ ## LOGGING
+ logging_obj.post_call(
+ input=messages,
+ api_key=api_key,
+ original_response=raw_response.text,
+ additional_args={"complete_input_dict": request_data},
+ )
+
+ ## RESPONSE OBJECT
+ try:
+ completion_response = DatabricksResponse(**raw_response.json()) # type: ignore
+ except Exception as e:
+ response_headers = getattr(raw_response, "headers", None)
+ raise DatabricksException(
+ message="Unable to get json response - {}, Original Response: {}".format(
+ str(e), raw_response.text
+ ),
+ status_code=raw_response.status_code,
+ headers=response_headers,
+ )
+
+ model_response.model = completion_response["model"]
+ model_response.id = completion_response["id"]
+ model_response.created = completion_response["created"]
+ setattr(model_response, "usage", Usage(**completion_response["usage"]))
+
+ model_response.choices = self._transform_choices( # type: ignore
+ choices=completion_response["choices"],
+ json_mode=json_mode,
+ )
+
+ return model_response
+
+ def get_model_response_iterator(
+ self,
+ streaming_response: Union[Iterator[str], AsyncIterator[str], ModelResponse],
+ sync_stream: bool,
+ json_mode: Optional[bool] = False,
+ ):
+ return DatabricksChatResponseIterator(
+ streaming_response=streaming_response,
+ sync_stream=sync_stream,
+ json_mode=json_mode,
+ )
+
+
+class DatabricksChatResponseIterator(BaseModelResponseIterator):
+ def __init__(
+ self,
+ streaming_response: Union[Iterator[str], AsyncIterator[str], ModelResponse],
+ sync_stream: bool,
+ json_mode: Optional[bool] = False,
+ ):
+ super().__init__(streaming_response, sync_stream)
+
+ self.json_mode = json_mode
+ self._last_function_name = None # Track the last seen function name
+
+ def chunk_parser(self, chunk: dict) -> ModelResponseStream:
+ try:
+ translated_choices = []
+ for choice in chunk["choices"]:
+ tool_calls = choice["delta"].get("tool_calls")
+ if tool_calls and self.json_mode:
+ # 1. Check if the function name is set and == RESPONSE_FORMAT_TOOL_NAME
+ # 2. If no function name, just args -> check last function name (saved via state variable)
+ # 3. Convert args to json
+ # 4. Convert json to message
+ # 5. Set content to message.content
+ # 6. Set tool_calls to None
+ from litellm.constants import RESPONSE_FORMAT_TOOL_NAME
+ from litellm.llms.base_llm.base_utils import (
+ _convert_tool_response_to_message,
+ )
+
+ # Check if this chunk has a function name
+ function_name = tool_calls[0].get("function", {}).get("name")
+ if function_name is not None:
+ self._last_function_name = function_name
+
+ # If we have a saved function name that matches RESPONSE_FORMAT_TOOL_NAME
+ # or this chunk has the matching function name
+ if (
+ self._last_function_name == RESPONSE_FORMAT_TOOL_NAME
+ or function_name == RESPONSE_FORMAT_TOOL_NAME
+ ):
+ # Convert tool calls to message format
+ message = _convert_tool_response_to_message(tool_calls)
+ if message is not None:
+ if message.content == "{}": # empty json
+ message.content = ""
+ choice["delta"]["content"] = message.content
+ choice["delta"]["tool_calls"] = None
+ elif tool_calls:
+ for _tc in tool_calls:
+ if _tc.get("function", {}).get("arguments") == "{}":
+ _tc["function"]["arguments"] = "" # avoid invalid json
+ # extract the content str
+ content_str = DatabricksConfig.extract_content_str(
+ choice["delta"].get("content")
+ )
+
+ # extract the reasoning content
+ (
+ reasoning_content,
+ thinking_blocks,
+ ) = DatabricksConfig.extract_reasoning_content(
+ choice["delta"]["content"]
+ )
+
+ choice["delta"]["content"] = content_str
+ choice["delta"]["reasoning_content"] = reasoning_content
+ choice["delta"]["thinking_blocks"] = thinking_blocks
+ translated_choices.append(choice)
+ return ModelResponseStream(
+ id=chunk["id"],
+ object="chat.completion.chunk",
+ created=chunk["created"],
+ model=chunk["model"],
+ choices=translated_choices,
+ )
+ except KeyError as e:
+ raise DatabricksException(
+ message=f"KeyError: {e}, Got unexpected response from Databricks: {chunk}",
+ status_code=400,
+ )
+ except Exception as e:
+ raise e
diff --git a/litellm/llms/databricks/common_utils.py b/litellm/llms/databricks/common_utils.py
index e8481e25b2..1353b5b13f 100644
--- a/litellm/llms/databricks/common_utils.py
+++ b/litellm/llms/databricks/common_utils.py
@@ -1,9 +1,35 @@
from typing import Literal, Optional, Tuple
-from .exceptions import DatabricksError
+from litellm.llms.base_llm.chat.transformation import BaseLLMException
+
+
+class DatabricksException(BaseLLMException):
+ pass
class DatabricksBase:
+ def _get_api_base(self, api_base: Optional[str]) -> str:
+ if api_base is None:
+ try:
+ from databricks.sdk import WorkspaceClient
+
+ databricks_client = WorkspaceClient()
+
+ api_base = (
+ api_base or f"{databricks_client.config.host}/serving-endpoints"
+ )
+
+ return api_base
+ except ImportError:
+ raise DatabricksException(
+ status_code=400,
+ message=(
+ "Either set the DATABRICKS_API_BASE and DATABRICKS_API_KEY environment variables, "
+ "or install the databricks-sdk Python library."
+ ),
+ )
+ return api_base
+
def _get_databricks_credentials(
self, api_key: Optional[str], api_base: Optional[str], headers: Optional[dict]
) -> Tuple[str, dict]:
@@ -16,14 +42,14 @@ class DatabricksBase:
api_base = api_base or f"{databricks_client.config.host}/serving-endpoints"
if api_key is None:
- databricks_auth_headers: dict[str, str] = (
- databricks_client.config.authenticate()
- )
+ databricks_auth_headers: dict[
+ str, str
+ ] = databricks_client.config.authenticate()
headers = {**databricks_auth_headers, **headers}
return api_base, headers
except ImportError:
- raise DatabricksError(
+ raise DatabricksException(
status_code=400,
message=(
"If the Databricks base URL and API key are not set, the databricks-sdk "
@@ -41,9 +67,9 @@ class DatabricksBase:
custom_endpoint: Optional[bool],
headers: Optional[dict],
) -> Tuple[str, dict]:
- if api_key is None and headers is None:
- if custom_endpoint is not None:
- raise DatabricksError(
+ if api_key is None and not headers: # handle empty headers
+ if custom_endpoint is True:
+ raise DatabricksException(
status_code=400,
message="Missing API Key - A call is being made to LLM Provider but no key is set either in the environment variables ({LLM_PROVIDER}_API_KEY) or via params",
)
@@ -54,7 +80,7 @@ class DatabricksBase:
if api_base is None:
if custom_endpoint:
- raise DatabricksError(
+ raise DatabricksException(
status_code=400,
message="Missing API Base - A call is being made to LLM Provider but no api base is set either in the environment variables ({LLM_PROVIDER}_API_KEY) or via params",
)
diff --git a/litellm/llms/databricks/embed/transformation.py b/litellm/llms/databricks/embed/transformation.py
index 53e3b30dd2..a113a349cc 100644
--- a/litellm/llms/databricks/embed/transformation.py
+++ b/litellm/llms/databricks/embed/transformation.py
@@ -11,9 +11,9 @@ class DatabricksEmbeddingConfig:
Reference: https://learn.microsoft.com/en-us/azure/databricks/machine-learning/foundation-models/api-reference#--embedding-task
"""
- instruction: Optional[str] = (
- None # An optional instruction to pass to the embedding model. BGE Authors recommend 'Represent this sentence for searching relevant passages:' for retrieval queries
- )
+ instruction: Optional[
+ str
+ ] = None # An optional instruction to pass to the embedding model. BGE Authors recommend 'Represent this sentence for searching relevant passages:' for retrieval queries
def __init__(self, instruction: Optional[str] = None) -> None:
locals_ = locals().copy()
diff --git a/litellm/llms/databricks/exceptions.py b/litellm/llms/databricks/exceptions.py
deleted file mode 100644
index 8bb3d435d0..0000000000
--- a/litellm/llms/databricks/exceptions.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import httpx
-
-
-class DatabricksError(Exception):
- def __init__(self, status_code, message):
- self.status_code = status_code
- self.message = message
- self.request = httpx.Request(method="POST", url="https://docs.databricks.com/")
- self.response = httpx.Response(status_code=status_code, request=self.request)
- super().__init__(
- self.message
- ) # Call the base class constructor with the parameters it needs
diff --git a/litellm/llms/databricks/streaming_utils.py b/litellm/llms/databricks/streaming_utils.py
index 2db53df908..eebe318288 100644
--- a/litellm/llms/databricks/streaming_utils.py
+++ b/litellm/llms/databricks/streaming_utils.py
@@ -55,7 +55,6 @@ class ModelResponseIterator:
usage_chunk: Optional[Usage] = getattr(processed_chunk, "usage", None)
if usage_chunk is not None:
-
usage = ChatCompletionUsageBlock(
prompt_tokens=usage_chunk.prompt_tokens,
completion_tokens=usage_chunk.completion_tokens,
diff --git a/litellm/llms/deepgram/audio_transcription/transformation.py b/litellm/llms/deepgram/audio_transcription/transformation.py
index 06296736ea..f1b18808f7 100644
--- a/litellm/llms/deepgram/audio_transcription/transformation.py
+++ b/litellm/llms/deepgram/audio_transcription/transformation.py
@@ -2,6 +2,7 @@
Translates from OpenAI's `/v1/audio/transcriptions` to Deepgram's `/v1/listen`
"""
+import io
from typing import List, Optional, Union
from httpx import Headers, Response
@@ -12,7 +13,7 @@ from litellm.types.llms.openai import (
AllMessageValues,
OpenAIAudioTranscriptionOptionalParams,
)
-from litellm.types.utils import TranscriptionResponse
+from litellm.types.utils import FileTypes, TranscriptionResponse
from ...base_llm.audio_transcription.transformation import (
BaseAudioTranscriptionConfig,
@@ -47,6 +48,55 @@ class DeepgramAudioTranscriptionConfig(BaseAudioTranscriptionConfig):
message=error_message, status_code=status_code, headers=headers
)
+ def transform_audio_transcription_request(
+ self,
+ model: str,
+ audio_file: FileTypes,
+ optional_params: dict,
+ litellm_params: dict,
+ ) -> Union[dict, bytes]:
+ """
+ Processes the audio file input based on its type and returns the binary data.
+
+ Args:
+ audio_file: Can be a file path (str), a tuple (filename, file_content), or binary data (bytes).
+
+ Returns:
+ The binary data of the audio file.
+ """
+ binary_data: bytes # Explicitly declare the type
+
+ # Handle the audio file based on type
+ if isinstance(audio_file, str):
+ # If it's a file path
+ with open(audio_file, "rb") as f:
+ binary_data = f.read() # `f.read()` always returns `bytes`
+ elif isinstance(audio_file, tuple):
+ # Handle tuple case
+ _, file_content = audio_file[:2]
+ if isinstance(file_content, str):
+ with open(file_content, "rb") as f:
+ binary_data = f.read() # `f.read()` always returns `bytes`
+ elif isinstance(file_content, bytes):
+ binary_data = file_content
+ else:
+ raise TypeError(
+ f"Unexpected type in tuple: {type(file_content)}. Expected str or bytes."
+ )
+ elif isinstance(audio_file, bytes):
+ # Assume it's already binary data
+ binary_data = audio_file
+ elif isinstance(audio_file, io.BufferedReader) or isinstance(
+ audio_file, io.BytesIO
+ ):
+ # Handle file-like objects
+ binary_data = audio_file.read()
+
+ else:
+ raise TypeError(f"Unsupported type for audio_file: {type(audio_file)}")
+
+ return binary_data
+
def transform_audio_transcription_response(
self,
model: str,
@@ -76,9 +126,9 @@ class DeepgramAudioTranscriptionConfig(BaseAudioTranscriptionConfig):
# Add additional metadata matching OpenAI format
response["task"] = "transcribe"
- response["language"] = (
- "english" # Deepgram auto-detects but doesn't return language
- )
+ response[
+ "language"
+ ] = "english" # Deepgram auto-detects but doesn't return language
response["duration"] = response_json["metadata"]["duration"]
# Transform words to match OpenAI format
@@ -101,6 +151,7 @@ class DeepgramAudioTranscriptionConfig(BaseAudioTranscriptionConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -120,6 +171,7 @@ class DeepgramAudioTranscriptionConfig(BaseAudioTranscriptionConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/deepinfra/chat/transformation.py b/litellm/llms/deepinfra/chat/transformation.py
index 429759fad1..0d446d39b9 100644
--- a/litellm/llms/deepinfra/chat/transformation.py
+++ b/litellm/llms/deepinfra/chat/transformation.py
@@ -1,6 +1,7 @@
from typing import Optional, Tuple, Union
import litellm
+from litellm.constants import MIN_NON_ZERO_TEMPERATURE
from litellm.llms.openai.chat.gpt_transformation import OpenAIGPTConfig
from litellm.secret_managers.main import get_secret_str
@@ -84,7 +85,7 @@ class DeepInfraConfig(OpenAIGPTConfig):
and value == 0
and model == "mistralai/Mistral-7B-Instruct-v0.1"
): # this model does no support temperature == 0
- value = 0.0001 # close to 0
+ value = MIN_NON_ZERO_TEMPERATURE # close to 0
if param == "tool_choice":
if (
value != "auto" and value != "none"
diff --git a/litellm/llms/deepseek/chat/transformation.py b/litellm/llms/deepseek/chat/transformation.py
index 180cf7dc69..f429f46331 100644
--- a/litellm/llms/deepseek/chat/transformation.py
+++ b/litellm/llms/deepseek/chat/transformation.py
@@ -14,7 +14,6 @@ from ...openai.chat.gpt_transformation import OpenAIGPTConfig
class DeepSeekChatConfig(OpenAIGPTConfig):
-
def _transform_messages(
self, messages: List[AllMessageValues], model: str
) -> List[AllMessageValues]:
@@ -38,6 +37,7 @@ class DeepSeekChatConfig(OpenAIGPTConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
diff --git a/litellm/llms/deprecated_providers/aleph_alpha.py b/litellm/llms/deprecated_providers/aleph_alpha.py
index 81ad134641..4cfede2a1b 100644
--- a/litellm/llms/deprecated_providers/aleph_alpha.py
+++ b/litellm/llms/deprecated_providers/aleph_alpha.py
@@ -77,9 +77,9 @@ class AlephAlphaConfig:
- `control_log_additive` (boolean; default value: true): Method of applying control to attention scores.
"""
- maximum_tokens: Optional[int] = (
- litellm.max_tokens
- ) # aleph alpha requires max tokens
+ maximum_tokens: Optional[
+ int
+ ] = litellm.max_tokens # aleph alpha requires max tokens
minimum_tokens: Optional[int] = None
echo: Optional[bool] = None
temperature: Optional[int] = None
diff --git a/litellm/llms/fireworks_ai/audio_transcription/transformation.py b/litellm/llms/fireworks_ai/audio_transcription/transformation.py
index 8f35705299..00bb5f2679 100644
--- a/litellm/llms/fireworks_ai/audio_transcription/transformation.py
+++ b/litellm/llms/fireworks_ai/audio_transcription/transformation.py
@@ -2,27 +2,16 @@ from typing import List
from litellm.types.llms.openai import OpenAIAudioTranscriptionOptionalParams
-from ...base_llm.audio_transcription.transformation import BaseAudioTranscriptionConfig
+from ...openai.transcriptions.whisper_transformation import (
+ OpenAIWhisperAudioTranscriptionConfig,
+)
from ..common_utils import FireworksAIMixin
class FireworksAIAudioTranscriptionConfig(
- FireworksAIMixin, BaseAudioTranscriptionConfig
+ FireworksAIMixin, OpenAIWhisperAudioTranscriptionConfig
):
def get_supported_openai_params(
self, model: str
) -> List[OpenAIAudioTranscriptionOptionalParams]:
return ["language", "prompt", "response_format", "timestamp_granularities"]
-
- def map_openai_params(
- self,
- non_default_params: dict,
- optional_params: dict,
- model: str,
- drop_params: bool,
- ) -> dict:
- supported_params = self.get_supported_openai_params(model)
- for k, v in non_default_params.items():
- if k in supported_params:
- optional_params[k] = v
- return optional_params
diff --git a/litellm/llms/fireworks_ai/chat/transformation.py b/litellm/llms/fireworks_ai/chat/transformation.py
index 1c82f24ac0..2a795bdf2f 100644
--- a/litellm/llms/fireworks_ai/chat/transformation.py
+++ b/litellm/llms/fireworks_ai/chat/transformation.py
@@ -1,11 +1,33 @@
-from typing import List, Literal, Optional, Tuple, Union, cast
+import json
+import uuid
+from typing import Any, List, Literal, Optional, Tuple, Union, cast
+
+import httpx
import litellm
+from litellm.constants import RESPONSE_FORMAT_TOOL_NAME
+from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
+from litellm.litellm_core_utils.llm_response_utils.get_headers import (
+ get_response_headers,
+)
from litellm.secret_managers.main import get_secret_str
-from litellm.types.llms.openai import AllMessageValues, ChatCompletionImageObject
-from litellm.types.utils import ProviderSpecificModelInfo
+from litellm.types.llms.openai import (
+ AllMessageValues,
+ ChatCompletionImageObject,
+ ChatCompletionToolParam,
+ OpenAIChatCompletionToolParam,
+)
+from litellm.types.utils import (
+ ChatCompletionMessageToolCall,
+ Choices,
+ Function,
+ Message,
+ ModelResponse,
+ ProviderSpecificModelInfo,
+)
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
+from ..common_utils import FireworksAIException
class FireworksAIConfig(OpenAIGPTConfig):
@@ -88,7 +110,6 @@ class FireworksAIConfig(OpenAIGPTConfig):
model: str,
drop_params: bool,
) -> dict:
-
supported_openai_params = self.get_supported_openai_params(model=model)
is_tools_set = any(
param == "tools" and value is not None
@@ -104,7 +125,6 @@ class FireworksAIConfig(OpenAIGPTConfig):
# pass through the value of tool choice
optional_params["tool_choice"] = value
elif param == "response_format":
-
if (
is_tools_set
): # fireworks ai doesn't support tools and response_format together
@@ -152,6 +172,14 @@ class FireworksAIConfig(OpenAIGPTConfig):
] = f"{content['image_url']['url']}#transform=inline"
return content
+ def _transform_tools(
+ self, tools: List[OpenAIChatCompletionToolParam]
+ ) -> List[OpenAIChatCompletionToolParam]:
+ for tool in tools:
+ if tool.get("type") == "function":
+ tool["function"].pop("strict", None)
+ return tools
+
def _transform_messages_helper(
self, messages: List[AllMessageValues], model: str, litellm_params: dict
) -> List[AllMessageValues]:
@@ -198,6 +226,9 @@ class FireworksAIConfig(OpenAIGPTConfig):
messages = self._transform_messages_helper(
messages=messages, model=model, litellm_params=litellm_params
)
+ if "tools" in optional_params and optional_params["tools"] is not None:
+ tools = self._transform_tools(tools=optional_params["tools"])
+ optional_params["tools"] = tools
return super().transform_request(
model=model,
messages=messages,
@@ -206,6 +237,94 @@ class FireworksAIConfig(OpenAIGPTConfig):
headers=headers,
)
+ def _handle_message_content_with_tool_calls(
+ self,
+ message: Message,
+ tool_calls: Optional[List[ChatCompletionToolParam]],
+ ) -> Message:
+ """
+ Fireworks AI sends tool calls in the content field instead of tool_calls
+
+ Relevant Issue: https://github.com/BerriAI/litellm/issues/7209#issuecomment-2813208780
+ """
+ if (
+ tool_calls is not None
+ and message.content is not None
+ and message.tool_calls is None
+ ):
+ try:
+ function = Function(**json.loads(message.content))
+ if function.name != RESPONSE_FORMAT_TOOL_NAME and function.name in [
+ tool["function"]["name"] for tool in tool_calls
+ ]:
+ tool_call = ChatCompletionMessageToolCall(
+ function=function, id=str(uuid.uuid4()), type="function"
+ )
+ message.tool_calls = [tool_call]
+
+ message.content = None
+ except Exception:
+ pass
+
+ return message
+
+ def transform_response(
+ self,
+ model: str,
+ raw_response: httpx.Response,
+ model_response: ModelResponse,
+ logging_obj: LiteLLMLoggingObj,
+ request_data: dict,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ encoding: Any,
+ api_key: Optional[str] = None,
+ json_mode: Optional[bool] = None,
+ ) -> ModelResponse:
+ ## LOGGING
+ logging_obj.post_call(
+ input=messages,
+ api_key=api_key,
+ original_response=raw_response.text,
+ additional_args={"complete_input_dict": request_data},
+ )
+
+ ## RESPONSE OBJECT
+ try:
+ completion_response = raw_response.json()
+ except Exception as e:
+ response_headers = getattr(raw_response, "headers", None)
+ raise FireworksAIException(
+ message="Unable to get json response - {}, Original Response: {}".format(
+ str(e), raw_response.text
+ ),
+ status_code=raw_response.status_code,
+ headers=response_headers,
+ )
+
+ raw_response_headers = dict(raw_response.headers)
+
+ additional_headers = get_response_headers(raw_response_headers)
+
+ response = ModelResponse(**completion_response)
+
+ if response.model is not None:
+ response.model = "fireworks_ai/" + response.model
+
+ ## FIREWORKS AI sends tool calls in the content field instead of tool_calls
+ for choice in response.choices:
+ cast(
+ Choices, choice
+ ).message = self._handle_message_content_with_tool_calls(
+ message=cast(Choices, choice).message,
+ tool_calls=optional_params.get("tools", None),
+ )
+
+ response._hidden_params = {"additional_headers": additional_headers}
+
+ return response
+
def _get_openai_compatible_provider_info(
self, api_base: Optional[str], api_key: Optional[str]
) -> Tuple[Optional[str], Optional[str]]:
@@ -223,7 +342,6 @@ class FireworksAIConfig(OpenAIGPTConfig):
return api_base, dynamic_api_key
def get_models(self, api_key: Optional[str] = None, api_base: Optional[str] = None):
-
api_base, api_key = self._get_openai_compatible_provider_info(
api_base=api_base, api_key=api_key
)
diff --git a/litellm/llms/fireworks_ai/common_utils.py b/litellm/llms/fireworks_ai/common_utils.py
index 293403b133..17aa67b525 100644
--- a/litellm/llms/fireworks_ai/common_utils.py
+++ b/litellm/llms/fireworks_ai/common_utils.py
@@ -41,6 +41,7 @@ class FireworksAIMixin:
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/fireworks_ai/cost_calculator.py b/litellm/llms/fireworks_ai/cost_calculator.py
index f53aba4a47..31414625ab 100644
--- a/litellm/llms/fireworks_ai/cost_calculator.py
+++ b/litellm/llms/fireworks_ai/cost_calculator.py
@@ -4,6 +4,12 @@ For calculating cost of fireworks ai serverless inference models.
from typing import Tuple
+from litellm.constants import (
+ FIREWORKS_AI_16_B,
+ FIREWORKS_AI_56_B_MOE,
+ FIREWORKS_AI_80_B,
+ FIREWORKS_AI_176_B_MOE,
+)
from litellm.types.utils import Usage
from litellm.utils import get_model_info
@@ -25,9 +31,9 @@ def get_base_model_for_pricing(model_name: str) -> str:
moe_match = re.search(r"(\d+)x(\d+)b", model_name)
if moe_match:
total_billion = int(moe_match.group(1)) * int(moe_match.group(2))
- if total_billion <= 56:
+ if total_billion <= FIREWORKS_AI_56_B_MOE:
return "fireworks-ai-moe-up-to-56b"
- elif total_billion <= 176:
+ elif total_billion <= FIREWORKS_AI_176_B_MOE:
return "fireworks-ai-56b-to-176b"
# Check for standard models in the form b
@@ -37,9 +43,9 @@ def get_base_model_for_pricing(model_name: str) -> str:
params_billion = float(params_match)
# Determine the category based on the number of parameters
- if params_billion <= 16.0:
+ if params_billion <= FIREWORKS_AI_16_B:
return "fireworks-ai-up-to-16b"
- elif params_billion <= 80.0:
+ elif params_billion <= FIREWORKS_AI_80_B:
return "fireworks-ai-16b-80b"
# If no matches, return the original model_name
diff --git a/litellm/llms/gemini/chat/transformation.py b/litellm/llms/gemini/chat/transformation.py
index fbc1916dcc..dc65c46455 100644
--- a/litellm/llms/gemini/chat/transformation.py
+++ b/litellm/llms/gemini/chat/transformation.py
@@ -7,6 +7,7 @@ from litellm.litellm_core_utils.prompt_templates.factory import (
)
from litellm.types.llms.openai import AllMessageValues
from litellm.types.llms.vertex_ai import ContentType, PartType
+from litellm.utils import supports_reasoning
from ...vertex_ai.gemini.transformation import _gemini_convert_messages_with_history
from ...vertex_ai.gemini.vertex_and_google_ai_studio_gemini import VertexGeminiConfig
@@ -67,7 +68,7 @@ class GoogleAIStudioGeminiConfig(VertexGeminiConfig):
return super().get_config()
def get_supported_openai_params(self, model: str) -> List[str]:
- return [
+ supported_params = [
"temperature",
"top_p",
"max_tokens",
@@ -81,7 +82,12 @@ class GoogleAIStudioGeminiConfig(VertexGeminiConfig):
"stop",
"logprobs",
"frequency_penalty",
+ "modalities",
]
+ if supports_reasoning(model):
+ supported_params.append("reasoning_effort")
+ supported_params.append("thinking")
+ return supported_params
def map_openai_params(
self,
@@ -90,7 +96,6 @@ class GoogleAIStudioGeminiConfig(VertexGeminiConfig):
model: str,
drop_params: bool,
) -> Dict:
-
if litellm.vertex_ai_safety_settings is not None:
optional_params["safety_settings"] = litellm.vertex_ai_safety_settings
return super().map_openai_params(
diff --git a/litellm/llms/gemini/common_utils.py b/litellm/llms/gemini/common_utils.py
new file mode 100644
index 0000000000..fef41f7d58
--- /dev/null
+++ b/litellm/llms/gemini/common_utils.py
@@ -0,0 +1,84 @@
+from typing import List, Optional, Union
+
+import httpx
+
+import litellm
+from litellm.llms.base_llm.base_utils import BaseLLMModelInfo
+from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import AllMessageValues
+
+
+class GeminiError(BaseLLMException):
+ pass
+
+
+class GeminiModelInfo(BaseLLMModelInfo):
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ """Google AI Studio sends api key in query params"""
+ return headers
+
+ @property
+ def api_version(self) -> str:
+ return "v1beta"
+
+ @staticmethod
+ def get_api_base(api_base: Optional[str] = None) -> Optional[str]:
+ return (
+ api_base
+ or get_secret_str("GEMINI_API_BASE")
+ or "https://generativelanguage.googleapis.com"
+ )
+
+ @staticmethod
+ def get_api_key(api_key: Optional[str] = None) -> Optional[str]:
+ return api_key or (get_secret_str("GEMINI_API_KEY"))
+
+ @staticmethod
+ def get_base_model(model: str) -> Optional[str]:
+ return model.replace("gemini/", "")
+
+ def get_models(
+ self, api_key: Optional[str] = None, api_base: Optional[str] = None
+ ) -> List[str]:
+ api_base = GeminiModelInfo.get_api_base(api_base)
+ api_key = GeminiModelInfo.get_api_key(api_key)
+ endpoint = f"/{self.api_version}/models"
+ if api_base is None or api_key is None:
+ raise ValueError(
+ "GEMINI_API_BASE or GEMINI_API_KEY is not set. Please set the environment variable, to query Gemini's `/models` endpoint."
+ )
+
+ response = litellm.module_level_client.get(
+ url=f"{api_base}{endpoint}?key={api_key}",
+ )
+
+ if response.status_code != 200:
+ raise ValueError(
+ f"Failed to fetch models from Gemini. Status code: {response.status_code}, Response: {response.json()}"
+ )
+
+ models = response.json()["models"]
+
+ litellm_model_names = []
+ for model in models:
+ stripped_model_name = model["name"].strip("models/")
+ litellm_model_name = "gemini/" + stripped_model_name
+ litellm_model_names.append(litellm_model_name)
+ return litellm_model_names
+
+ def get_error_class(
+ self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
+ ) -> BaseLLMException:
+ return GeminiError(
+ status_code=status_code, message=error_message, headers=headers
+ )
diff --git a/litellm/llms/gemini/files/transformation.py b/litellm/llms/gemini/files/transformation.py
new file mode 100644
index 0000000000..e98e76dabc
--- /dev/null
+++ b/litellm/llms/gemini/files/transformation.py
@@ -0,0 +1,173 @@
+"""
+Supports writing files to Google AI Studio Files API.
+
+For vertex ai, check out the vertex_ai/files/handler.py file.
+"""
+import time
+from typing import List, Optional
+
+import httpx
+
+from litellm._logging import verbose_logger
+from litellm.litellm_core_utils.prompt_templates.common_utils import extract_file_data
+from litellm.llms.base_llm.files.transformation import (
+ BaseFilesConfig,
+ LiteLLMLoggingObj,
+)
+from litellm.types.llms.gemini import GeminiCreateFilesResponseObject
+from litellm.types.llms.openai import (
+ CreateFileRequest,
+ OpenAICreateFileRequestOptionalParams,
+ OpenAIFileObject,
+)
+from litellm.types.utils import LlmProviders
+
+from ..common_utils import GeminiModelInfo
+
+
+class GoogleAIStudioFilesHandler(GeminiModelInfo, BaseFilesConfig):
+ def __init__(self):
+ pass
+
+ @property
+ def custom_llm_provider(self) -> LlmProviders:
+ return LlmProviders.GEMINI
+
+ def get_complete_url(
+ self,
+ api_base: Optional[str],
+ api_key: Optional[str],
+ model: str,
+ optional_params: dict,
+ litellm_params: dict,
+ stream: Optional[bool] = None,
+ ) -> str:
+ """
+ OPTIONAL
+
+ Get the complete url for the request
+
+ Some providers need `model` in `api_base`
+ """
+ endpoint = "upload/v1beta/files"
+ api_base = self.get_api_base(api_base)
+ if not api_base:
+ raise ValueError("api_base is required")
+
+ if not api_key:
+ raise ValueError("api_key is required")
+
+ url = "{}/{}?key={}".format(api_base, endpoint, api_key)
+ return url
+
+ def get_supported_openai_params(
+ self, model: str
+ ) -> List[OpenAICreateFileRequestOptionalParams]:
+ return []
+
+ def map_openai_params(
+ self,
+ non_default_params: dict,
+ optional_params: dict,
+ model: str,
+ drop_params: bool,
+ ) -> dict:
+ return optional_params
+
+ def transform_create_file_request(
+ self,
+ model: str,
+ create_file_data: CreateFileRequest,
+ optional_params: dict,
+ litellm_params: dict,
+ ) -> dict:
+ """
+ Transform the OpenAI-style file creation request into Gemini's format
+
+ Returns:
+ dict: Contains both request data and headers for the two-step upload
+ """
+ # Extract the file information
+ file_data = create_file_data.get("file")
+ if file_data is None:
+ raise ValueError("File data is required")
+
+ # Use the common utility function to extract file data
+ extracted_data = extract_file_data(file_data)
+
+ # Get file size
+ file_size = len(extracted_data["content"])
+
+ # Step 1: Initial resumable upload request
+ headers = {
+ "X-Goog-Upload-Protocol": "resumable",
+ "X-Goog-Upload-Command": "start",
+ "X-Goog-Upload-Header-Content-Length": str(file_size),
+ "X-Goog-Upload-Header-Content-Type": extracted_data["content_type"],
+ "Content-Type": "application/json",
+ }
+ headers.update(extracted_data["headers"]) # Add any custom headers
+
+ # Initial metadata request body
+ initial_data = {
+ "file": {
+ "display_name": extracted_data["filename"] or str(int(time.time()))
+ }
+ }
+
+ # Step 2: Actual file upload data
+ upload_headers = {
+ "Content-Length": str(file_size),
+ "X-Goog-Upload-Offset": "0",
+ "X-Goog-Upload-Command": "upload, finalize",
+ }
+
+ return {
+ "initial_request": {"headers": headers, "data": initial_data},
+ "upload_request": {
+ "headers": upload_headers,
+ "data": extracted_data["content"],
+ },
+ }
+
+ def transform_create_file_response(
+ self,
+ model: Optional[str],
+ raw_response: httpx.Response,
+ logging_obj: LiteLLMLoggingObj,
+ litellm_params: dict,
+ ) -> OpenAIFileObject:
+ """
+ Transform Gemini's file upload response into OpenAI-style FileObject
+ """
+ try:
+ response_json = raw_response.json()
+
+ response_object = GeminiCreateFilesResponseObject(
+ **response_json.get("file", {}) # type: ignore
+ )
+
+ # Extract file information from Gemini response
+
+ return OpenAIFileObject(
+ id=response_object["uri"], # Gemini uses URI as identifier
+ bytes=int(
+ response_object["sizeBytes"]
+ ), # Gemini doesn't return file size
+ created_at=int(
+ time.mktime(
+ time.strptime(
+ response_object["createTime"].replace("Z", "+00:00"),
+ "%Y-%m-%dT%H:%M:%S.%f%z",
+ )
+ )
+ ),
+ filename=response_object["displayName"],
+ object="file",
+ purpose="user_data", # Default to assistants as that's the main use case
+ status="uploaded",
+ status_details=None,
+ )
+ except Exception as e:
+ verbose_logger.exception(f"Error parsing file upload response: {str(e)}")
+ raise ValueError(f"Error parsing file upload response: {str(e)}")
diff --git a/litellm/llms/groq/chat/transformation.py b/litellm/llms/groq/chat/transformation.py
index 5b24f7d112..4befdc504e 100644
--- a/litellm/llms/groq/chat/transformation.py
+++ b/litellm/llms/groq/chat/transformation.py
@@ -14,11 +14,10 @@ from litellm.types.llms.openai import (
ChatCompletionToolParamFunctionChunk,
)
-from ...openai.chat.gpt_transformation import OpenAIGPTConfig
+from ...openai_like.chat.transformation import OpenAILikeChatConfig
-class GroqChatConfig(OpenAIGPTConfig):
-
+class GroqChatConfig(OpenAILikeChatConfig):
frequency_penalty: Optional[int] = None
function_call: Optional[Union[str, dict]] = None
functions: Optional[list] = None
@@ -58,6 +57,14 @@ class GroqChatConfig(OpenAIGPTConfig):
def get_config(cls):
return super().get_config()
+ def get_supported_openai_params(self, model: str) -> list:
+ base_params = super().get_supported_openai_params(model)
+ try:
+ base_params.remove("max_retries")
+ except ValueError:
+ pass
+ return base_params
+
def _transform_messages(self, messages: List[AllMessageValues], model: str) -> List:
for idx, message in enumerate(messages):
"""
@@ -125,8 +132,11 @@ class GroqChatConfig(OpenAIGPTConfig):
optional_params: dict,
model: str,
drop_params: bool = False,
+ replace_max_completion_tokens_with_max_tokens: bool = False, # groq supports max_completion_tokens
) -> dict:
_response_format = non_default_params.get("response_format")
+ if self._should_fake_stream(non_default_params):
+ optional_params["fake_stream"] = True
if _response_format is not None and isinstance(_response_format, dict):
json_schema: Optional[dict] = None
if "response_schema" in _response_format:
@@ -153,6 +163,8 @@ class GroqChatConfig(OpenAIGPTConfig):
non_default_params.pop(
"response_format", None
) # only remove if it's a json_schema - handled via using groq's tool calling params.
- return super().map_openai_params(
+ optional_params = super().map_openai_params(
non_default_params, optional_params, model, drop_params
)
+
+ return optional_params
diff --git a/litellm/llms/groq/stt/transformation.py b/litellm/llms/groq/stt/transformation.py
index c4dbd8d0ca..b467fab14f 100644
--- a/litellm/llms/groq/stt/transformation.py
+++ b/litellm/llms/groq/stt/transformation.py
@@ -9,7 +9,6 @@ import litellm
class GroqSTTConfig:
-
frequency_penalty: Optional[int] = None
function_call: Optional[Union[str, dict]] = None
functions: Optional[list] = None
diff --git a/litellm/llms/hosted_vllm/chat/transformation.py b/litellm/llms/hosted_vllm/chat/transformation.py
index 9332e98789..e328bf2881 100644
--- a/litellm/llms/hosted_vllm/chat/transformation.py
+++ b/litellm/llms/hosted_vllm/chat/transformation.py
@@ -2,9 +2,19 @@
Translate from OpenAI's `/v1/chat/completions` to VLLM's `/v1/chat/completions`
"""
-from typing import Optional, Tuple
+from typing import List, Optional, Tuple, cast
+from litellm.litellm_core_utils.prompt_templates.common_utils import (
+ _get_image_mime_type_from_url,
+)
+from litellm.litellm_core_utils.prompt_templates.factory import _parse_mime_type
from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import (
+ AllMessageValues,
+ ChatCompletionFileObject,
+ ChatCompletionVideoObject,
+ ChatCompletionVideoUrlObject,
+)
from ....utils import _remove_additional_properties, _remove_strict_from_schema
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
@@ -38,3 +48,71 @@ class HostedVLLMChatConfig(OpenAIGPTConfig):
api_key or get_secret_str("HOSTED_VLLM_API_KEY") or "fake-api-key"
) # vllm does not require an api key
return api_base, dynamic_api_key
+
+ def _is_video_file(self, content_item: ChatCompletionFileObject) -> bool:
+ """
+ Check if the file is a video
+
+ - format: video/
+ - file_data: base64 encoded video data
+ - file_id: infer mp4 from extension
+ """
+ file = content_item.get("file", {})
+ format = file.get("format")
+ file_data = file.get("file_data")
+ file_id = file.get("file_id")
+ if content_item.get("type") != "file":
+ return False
+ if format and format.startswith("video/"):
+ return True
+ elif file_data:
+ mime_type = _parse_mime_type(file_data)
+ if mime_type and mime_type.startswith("video/"):
+ return True
+ elif file_id:
+ mime_type = _get_image_mime_type_from_url(file_id)
+ if mime_type and mime_type.startswith("video/"):
+ return True
+ return False
+
+ def _convert_file_to_video_url(
+ self, content_item: ChatCompletionFileObject
+ ) -> ChatCompletionVideoObject:
+ file = content_item.get("file", {})
+ file_id = file.get("file_id")
+ file_data = file.get("file_data")
+
+ if file_id:
+ return ChatCompletionVideoObject(
+ type="video_url", video_url=ChatCompletionVideoUrlObject(url=file_id)
+ )
+ elif file_data:
+ return ChatCompletionVideoObject(
+ type="video_url", video_url=ChatCompletionVideoUrlObject(url=file_data)
+ )
+ raise ValueError("file_id or file_data is required")
+
+ def _transform_messages(
+ self, messages: List[AllMessageValues], model: str
+ ) -> List[AllMessageValues]:
+ """
+ Support translating video files from file_id or file_data to video_url
+ """
+ for message in messages:
+ if message["role"] == "user":
+ message_content = message.get("content")
+ if message_content and isinstance(message_content, list):
+ replaced_content_items: List[
+ Tuple[int, ChatCompletionFileObject]
+ ] = []
+ for idx, content_item in enumerate(message_content):
+ if content_item.get("type") == "file":
+ content_item = cast(ChatCompletionFileObject, content_item)
+ if self._is_video_file(content_item):
+ replaced_content_items.append((idx, content_item))
+ for idx, content_item in replaced_content_items:
+ message_content[idx] = self._convert_file_to_video_url(
+ content_item
+ )
+ transformed_messages = super()._transform_messages(messages, model)
+ return transformed_messages
diff --git a/litellm/llms/huggingface/chat/handler.py b/litellm/llms/huggingface/chat/handler.py
deleted file mode 100644
index 2b65e5b7da..0000000000
--- a/litellm/llms/huggingface/chat/handler.py
+++ /dev/null
@@ -1,769 +0,0 @@
-## Uses the huggingface text generation inference API
-import json
-import os
-from typing import (
- Any,
- Callable,
- Dict,
- List,
- Literal,
- Optional,
- Tuple,
- Union,
- cast,
- get_args,
-)
-
-import httpx
-
-import litellm
-from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
-from litellm.litellm_core_utils.streaming_handler import CustomStreamWrapper
-from litellm.llms.custom_httpx.http_handler import (
- AsyncHTTPHandler,
- HTTPHandler,
- _get_httpx_client,
- get_async_httpx_client,
-)
-from litellm.llms.huggingface.chat.transformation import (
- HuggingfaceChatConfig as HuggingfaceConfig,
-)
-from litellm.types.llms.openai import AllMessageValues
-from litellm.types.utils import EmbeddingResponse
-from litellm.types.utils import Logprobs as TextCompletionLogprobs
-from litellm.types.utils import ModelResponse
-
-from ...base import BaseLLM
-from ..common_utils import HuggingfaceError
-
-hf_chat_config = HuggingfaceConfig()
-
-
-hf_tasks_embeddings = Literal[ # pipeline tags + hf tei endpoints - https://huggingface.github.io/text-embeddings-inference/#/
- "sentence-similarity", "feature-extraction", "rerank", "embed", "similarity"
-]
-
-
-def get_hf_task_embedding_for_model(
- model: str, task_type: Optional[str], api_base: str
-) -> Optional[str]:
- if task_type is not None:
- if task_type in get_args(hf_tasks_embeddings):
- return task_type
- else:
- raise Exception(
- "Invalid task_type={}. Expected one of={}".format(
- task_type, hf_tasks_embeddings
- )
- )
- http_client = HTTPHandler(concurrent_limit=1)
-
- model_info = http_client.get(url=api_base)
-
- model_info_dict = model_info.json()
-
- pipeline_tag: Optional[str] = model_info_dict.get("pipeline_tag", None)
-
- return pipeline_tag
-
-
-async def async_get_hf_task_embedding_for_model(
- model: str, task_type: Optional[str], api_base: str
-) -> Optional[str]:
- if task_type is not None:
- if task_type in get_args(hf_tasks_embeddings):
- return task_type
- else:
- raise Exception(
- "Invalid task_type={}. Expected one of={}".format(
- task_type, hf_tasks_embeddings
- )
- )
- http_client = get_async_httpx_client(
- llm_provider=litellm.LlmProviders.HUGGINGFACE,
- )
-
- model_info = await http_client.get(url=api_base)
-
- model_info_dict = model_info.json()
-
- pipeline_tag: Optional[str] = model_info_dict.get("pipeline_tag", None)
-
- return pipeline_tag
-
-
-async def make_call(
- client: Optional[AsyncHTTPHandler],
- api_base: str,
- headers: dict,
- data: str,
- model: str,
- messages: list,
- logging_obj,
- timeout: Optional[Union[float, httpx.Timeout]],
- json_mode: bool,
-) -> Tuple[Any, httpx.Headers]:
- if client is None:
- client = litellm.module_level_aclient
-
- try:
- response = await client.post(
- api_base, headers=headers, data=data, stream=True, timeout=timeout
- )
- except httpx.HTTPStatusError as e:
- error_headers = getattr(e, "headers", None)
- error_response = getattr(e, "response", None)
- if error_headers is None and error_response:
- error_headers = getattr(error_response, "headers", None)
- raise HuggingfaceError(
- status_code=e.response.status_code,
- message=str(await e.response.aread()),
- headers=cast(dict, error_headers) if error_headers else None,
- )
- except Exception as e:
- for exception in litellm.LITELLM_EXCEPTION_TYPES:
- if isinstance(e, exception):
- raise e
- raise HuggingfaceError(status_code=500, message=str(e))
-
- # LOGGING
- logging_obj.post_call(
- input=messages,
- api_key="",
- original_response=response, # Pass the completion stream for logging
- additional_args={"complete_input_dict": data},
- )
-
- return response.aiter_lines(), response.headers
-
-
-class Huggingface(BaseLLM):
- _client_session: Optional[httpx.Client] = None
- _aclient_session: Optional[httpx.AsyncClient] = None
-
- def __init__(self) -> None:
- super().__init__()
-
- def completion( # noqa: PLR0915
- self,
- model: str,
- messages: list,
- api_base: Optional[str],
- model_response: ModelResponse,
- print_verbose: Callable,
- timeout: float,
- encoding,
- api_key,
- logging_obj,
- optional_params: dict,
- litellm_params: dict,
- custom_prompt_dict={},
- acompletion: bool = False,
- logger_fn=None,
- client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
- headers: dict = {},
- ):
- super().completion()
- exception_mapping_worked = False
- try:
- task, model = hf_chat_config.get_hf_task_for_model(model)
- litellm_params["task"] = task
- headers = hf_chat_config.validate_environment(
- api_key=api_key,
- headers=headers,
- model=model,
- messages=messages,
- optional_params=optional_params,
- )
- completion_url = hf_chat_config.get_api_base(api_base=api_base, model=model)
- data = hf_chat_config.transform_request(
- model=model,
- messages=messages,
- optional_params=optional_params,
- litellm_params=litellm_params,
- headers=headers,
- )
-
- ## LOGGING
- logging_obj.pre_call(
- input=data,
- api_key=api_key,
- additional_args={
- "complete_input_dict": data,
- "headers": headers,
- "api_base": completion_url,
- "acompletion": acompletion,
- },
- )
- ## COMPLETION CALL
-
- if acompletion is True:
- ### ASYNC STREAMING
- if optional_params.get("stream", False):
- return self.async_streaming(logging_obj=logging_obj, api_base=completion_url, data=data, headers=headers, model_response=model_response, model=model, timeout=timeout, messages=messages) # type: ignore
- else:
- ### ASYNC COMPLETION
- return self.acompletion(
- api_base=completion_url,
- data=data,
- headers=headers,
- model_response=model_response,
- encoding=encoding,
- model=model,
- optional_params=optional_params,
- timeout=timeout,
- litellm_params=litellm_params,
- logging_obj=logging_obj,
- api_key=api_key,
- messages=messages,
- client=(
- client
- if client is not None
- and isinstance(client, AsyncHTTPHandler)
- else None
- ),
- )
- if client is None or not isinstance(client, HTTPHandler):
- client = _get_httpx_client()
- ### SYNC STREAMING
- if "stream" in optional_params and optional_params["stream"] is True:
- response = client.post(
- url=completion_url,
- headers=headers,
- data=json.dumps(data),
- stream=optional_params["stream"],
- )
- return response.iter_lines()
- ### SYNC COMPLETION
- else:
- response = client.post(
- url=completion_url,
- headers=headers,
- data=json.dumps(data),
- )
-
- return hf_chat_config.transform_response(
- model=model,
- raw_response=response,
- model_response=model_response,
- logging_obj=logging_obj,
- api_key=api_key,
- request_data=data,
- messages=messages,
- optional_params=optional_params,
- encoding=encoding,
- json_mode=None,
- litellm_params=litellm_params,
- )
- except httpx.HTTPStatusError as e:
- raise HuggingfaceError(
- status_code=e.response.status_code,
- message=e.response.text,
- headers=e.response.headers,
- )
- except HuggingfaceError as e:
- exception_mapping_worked = True
- raise e
- except Exception as e:
- if exception_mapping_worked:
- raise e
- else:
- import traceback
-
- raise HuggingfaceError(status_code=500, message=traceback.format_exc())
-
- async def acompletion(
- self,
- api_base: str,
- data: dict,
- headers: dict,
- model_response: ModelResponse,
- encoding: Any,
- model: str,
- optional_params: dict,
- litellm_params: dict,
- timeout: float,
- logging_obj: LiteLLMLoggingObj,
- api_key: str,
- messages: List[AllMessageValues],
- client: Optional[AsyncHTTPHandler] = None,
- ):
- response: Optional[httpx.Response] = None
- try:
- if client is None:
- client = get_async_httpx_client(
- llm_provider=litellm.LlmProviders.HUGGINGFACE
- )
- ### ASYNC COMPLETION
- http_response = await client.post(
- url=api_base, headers=headers, data=json.dumps(data), timeout=timeout
- )
-
- response = http_response
-
- return hf_chat_config.transform_response(
- model=model,
- raw_response=http_response,
- model_response=model_response,
- logging_obj=logging_obj,
- api_key=api_key,
- request_data=data,
- messages=messages,
- optional_params=optional_params,
- encoding=encoding,
- json_mode=None,
- litellm_params=litellm_params,
- )
- except Exception as e:
- if isinstance(e, httpx.TimeoutException):
- raise HuggingfaceError(status_code=500, message="Request Timeout Error")
- elif isinstance(e, HuggingfaceError):
- raise e
- elif response is not None and hasattr(response, "text"):
- raise HuggingfaceError(
- status_code=500,
- message=f"{str(e)}\n\nOriginal Response: {response.text}",
- headers=response.headers,
- )
- else:
- raise HuggingfaceError(status_code=500, message=f"{str(e)}")
-
- async def async_streaming(
- self,
- logging_obj,
- api_base: str,
- data: dict,
- headers: dict,
- model_response: ModelResponse,
- messages: List[AllMessageValues],
- model: str,
- timeout: float,
- client: Optional[AsyncHTTPHandler] = None,
- ):
- completion_stream, _ = await make_call(
- client=client,
- api_base=api_base,
- headers=headers,
- data=json.dumps(data),
- model=model,
- messages=messages,
- logging_obj=logging_obj,
- timeout=timeout,
- json_mode=False,
- )
- streamwrapper = CustomStreamWrapper(
- completion_stream=completion_stream,
- model=model,
- custom_llm_provider="huggingface",
- logging_obj=logging_obj,
- )
- return streamwrapper
-
- def _transform_input_on_pipeline_tag(
- self, input: List, pipeline_tag: Optional[str]
- ) -> dict:
- if pipeline_tag is None:
- return {"inputs": input}
- if pipeline_tag == "sentence-similarity" or pipeline_tag == "similarity":
- if len(input) < 2:
- raise HuggingfaceError(
- status_code=400,
- message="sentence-similarity requires 2+ sentences",
- )
- return {"inputs": {"source_sentence": input[0], "sentences": input[1:]}}
- elif pipeline_tag == "rerank":
- if len(input) < 2:
- raise HuggingfaceError(
- status_code=400,
- message="reranker requires 2+ sentences",
- )
- return {"inputs": {"query": input[0], "texts": input[1:]}}
- return {"inputs": input} # default to feature-extraction pipeline tag
-
- async def _async_transform_input(
- self,
- model: str,
- task_type: Optional[str],
- embed_url: str,
- input: List,
- optional_params: dict,
- ) -> dict:
- hf_task = await async_get_hf_task_embedding_for_model(
- model=model, task_type=task_type, api_base=embed_url
- )
-
- data = self._transform_input_on_pipeline_tag(input=input, pipeline_tag=hf_task)
-
- if len(optional_params.keys()) > 0:
- data["options"] = optional_params
-
- return data
-
- def _process_optional_params(self, data: dict, optional_params: dict) -> dict:
- special_options_keys = HuggingfaceConfig().get_special_options_params()
- special_parameters_keys = [
- "min_length",
- "max_length",
- "top_k",
- "top_p",
- "temperature",
- "repetition_penalty",
- "max_time",
- ]
-
- for k, v in optional_params.items():
- if k in special_options_keys:
- data.setdefault("options", {})
- data["options"][k] = v
- elif k in special_parameters_keys:
- data.setdefault("parameters", {})
- data["parameters"][k] = v
- else:
- data[k] = v
-
- return data
-
- def _transform_input(
- self,
- input: List,
- model: str,
- call_type: Literal["sync", "async"],
- optional_params: dict,
- embed_url: str,
- ) -> dict:
- data: Dict = {}
-
- ## TRANSFORMATION ##
- if "sentence-transformers" in model:
- if len(input) == 0:
- raise HuggingfaceError(
- status_code=400,
- message="sentence transformers requires 2+ sentences",
- )
- data = {"inputs": {"source_sentence": input[0], "sentences": input[1:]}}
- else:
- data = {"inputs": input}
-
- task_type = optional_params.pop("input_type", None)
-
- if call_type == "sync":
- hf_task = get_hf_task_embedding_for_model(
- model=model, task_type=task_type, api_base=embed_url
- )
- elif call_type == "async":
- return self._async_transform_input(
- model=model, task_type=task_type, embed_url=embed_url, input=input
- ) # type: ignore
-
- data = self._transform_input_on_pipeline_tag(
- input=input, pipeline_tag=hf_task
- )
-
- if len(optional_params.keys()) > 0:
- data = self._process_optional_params(
- data=data, optional_params=optional_params
- )
-
- return data
-
- def _process_embedding_response(
- self,
- embeddings: dict,
- model_response: EmbeddingResponse,
- model: str,
- input: List,
- encoding: Any,
- ) -> EmbeddingResponse:
- output_data = []
- if "similarities" in embeddings:
- for idx, embedding in embeddings["similarities"]:
- output_data.append(
- {
- "object": "embedding",
- "index": idx,
- "embedding": embedding, # flatten list returned from hf
- }
- )
- else:
- for idx, embedding in enumerate(embeddings):
- if isinstance(embedding, float):
- output_data.append(
- {
- "object": "embedding",
- "index": idx,
- "embedding": embedding, # flatten list returned from hf
- }
- )
- elif isinstance(embedding, list) and isinstance(embedding[0], float):
- output_data.append(
- {
- "object": "embedding",
- "index": idx,
- "embedding": embedding, # flatten list returned from hf
- }
- )
- else:
- output_data.append(
- {
- "object": "embedding",
- "index": idx,
- "embedding": embedding[0][
- 0
- ], # flatten list returned from hf
- }
- )
- model_response.object = "list"
- model_response.data = output_data
- model_response.model = model
- input_tokens = 0
- for text in input:
- input_tokens += len(encoding.encode(text))
-
- setattr(
- model_response,
- "usage",
- litellm.Usage(
- prompt_tokens=input_tokens,
- completion_tokens=input_tokens,
- total_tokens=input_tokens,
- prompt_tokens_details=None,
- completion_tokens_details=None,
- ),
- )
- return model_response
-
- async def aembedding(
- self,
- model: str,
- input: list,
- model_response: litellm.utils.EmbeddingResponse,
- timeout: Union[float, httpx.Timeout],
- logging_obj: LiteLLMLoggingObj,
- optional_params: dict,
- api_base: str,
- api_key: Optional[str],
- headers: dict,
- encoding: Callable,
- client: Optional[AsyncHTTPHandler] = None,
- ):
- ## TRANSFORMATION ##
- data = self._transform_input(
- input=input,
- model=model,
- call_type="sync",
- optional_params=optional_params,
- embed_url=api_base,
- )
-
- ## LOGGING
- logging_obj.pre_call(
- input=input,
- api_key=api_key,
- additional_args={
- "complete_input_dict": data,
- "headers": headers,
- "api_base": api_base,
- },
- )
- ## COMPLETION CALL
- if client is None:
- client = get_async_httpx_client(
- llm_provider=litellm.LlmProviders.HUGGINGFACE,
- )
-
- response = await client.post(api_base, headers=headers, data=json.dumps(data))
-
- ## LOGGING
- logging_obj.post_call(
- input=input,
- api_key=api_key,
- additional_args={"complete_input_dict": data},
- original_response=response,
- )
-
- embeddings = response.json()
-
- if "error" in embeddings:
- raise HuggingfaceError(status_code=500, message=embeddings["error"])
-
- ## PROCESS RESPONSE ##
- return self._process_embedding_response(
- embeddings=embeddings,
- model_response=model_response,
- model=model,
- input=input,
- encoding=encoding,
- )
-
- def embedding(
- self,
- model: str,
- input: list,
- model_response: EmbeddingResponse,
- optional_params: dict,
- logging_obj: LiteLLMLoggingObj,
- encoding: Callable,
- api_key: Optional[str] = None,
- api_base: Optional[str] = None,
- timeout: Union[float, httpx.Timeout] = httpx.Timeout(None),
- aembedding: Optional[bool] = None,
- client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
- headers={},
- ) -> EmbeddingResponse:
- super().embedding()
- headers = hf_chat_config.validate_environment(
- api_key=api_key,
- headers=headers,
- model=model,
- optional_params=optional_params,
- messages=[],
- )
- # print_verbose(f"{model}, {task}")
- embed_url = ""
- if "https" in model:
- embed_url = model
- elif api_base:
- embed_url = api_base
- elif "HF_API_BASE" in os.environ:
- embed_url = os.getenv("HF_API_BASE", "")
- elif "HUGGINGFACE_API_BASE" in os.environ:
- embed_url = os.getenv("HUGGINGFACE_API_BASE", "")
- else:
- embed_url = f"https://api-inference.huggingface.co/models/{model}"
-
- ## ROUTING ##
- if aembedding is True:
- return self.aembedding(
- input=input,
- model_response=model_response,
- timeout=timeout,
- logging_obj=logging_obj,
- headers=headers,
- api_base=embed_url, # type: ignore
- api_key=api_key,
- client=client if isinstance(client, AsyncHTTPHandler) else None,
- model=model,
- optional_params=optional_params,
- encoding=encoding,
- )
-
- ## TRANSFORMATION ##
-
- data = self._transform_input(
- input=input,
- model=model,
- call_type="sync",
- optional_params=optional_params,
- embed_url=embed_url,
- )
-
- ## LOGGING
- logging_obj.pre_call(
- input=input,
- api_key=api_key,
- additional_args={
- "complete_input_dict": data,
- "headers": headers,
- "api_base": embed_url,
- },
- )
- ## COMPLETION CALL
- if client is None or not isinstance(client, HTTPHandler):
- client = HTTPHandler(concurrent_limit=1)
- response = client.post(embed_url, headers=headers, data=json.dumps(data))
-
- ## LOGGING
- logging_obj.post_call(
- input=input,
- api_key=api_key,
- additional_args={"complete_input_dict": data},
- original_response=response,
- )
-
- embeddings = response.json()
-
- if "error" in embeddings:
- raise HuggingfaceError(status_code=500, message=embeddings["error"])
-
- ## PROCESS RESPONSE ##
- return self._process_embedding_response(
- embeddings=embeddings,
- model_response=model_response,
- model=model,
- input=input,
- encoding=encoding,
- )
-
- def _transform_logprobs(
- self, hf_response: Optional[List]
- ) -> Optional[TextCompletionLogprobs]:
- """
- Transform Hugging Face logprobs to OpenAI.Completion() format
- """
- if hf_response is None:
- return None
-
- # Initialize an empty list for the transformed logprobs
- _logprob: TextCompletionLogprobs = TextCompletionLogprobs(
- text_offset=[],
- token_logprobs=[],
- tokens=[],
- top_logprobs=[],
- )
-
- # For each Hugging Face response, transform the logprobs
- for response in hf_response:
- # Extract the relevant information from the response
- response_details = response["details"]
- top_tokens = response_details.get("top_tokens", {})
-
- for i, token in enumerate(response_details["prefill"]):
- # Extract the text of the token
- token_text = token["text"]
-
- # Extract the logprob of the token
- token_logprob = token["logprob"]
-
- # Add the token information to the 'token_info' list
- cast(List[str], _logprob.tokens).append(token_text)
- cast(List[float], _logprob.token_logprobs).append(token_logprob)
-
- # stub this to work with llm eval harness
- top_alt_tokens = {"": -1.0, "": -2.0, "": -3.0} # noqa: F601
- cast(List[Dict[str, float]], _logprob.top_logprobs).append(
- top_alt_tokens
- )
-
- # For each element in the 'tokens' list, extract the relevant information
- for i, token in enumerate(response_details["tokens"]):
- # Extract the text of the token
- token_text = token["text"]
-
- # Extract the logprob of the token
- token_logprob = token["logprob"]
-
- top_alt_tokens = {}
- temp_top_logprobs = []
- if top_tokens != {}:
- temp_top_logprobs = top_tokens[i]
-
- # top_alt_tokens should look like this: { "alternative_1": -1, "alternative_2": -2, "alternative_3": -3 }
- for elem in temp_top_logprobs:
- text = elem["text"]
- logprob = elem["logprob"]
- top_alt_tokens[text] = logprob
-
- # Add the token information to the 'token_info' list
- cast(List[str], _logprob.tokens).append(token_text)
- cast(List[float], _logprob.token_logprobs).append(token_logprob)
- cast(List[Dict[str, float]], _logprob.top_logprobs).append(
- top_alt_tokens
- )
-
- # Add the text offset of the token
- # This is computed as the sum of the lengths of all previous tokens
- cast(List[int], _logprob.text_offset).append(
- sum(len(t["text"]) for t in response_details["tokens"][:i])
- )
-
- return _logprob
diff --git a/litellm/llms/huggingface/chat/transformation.py b/litellm/llms/huggingface/chat/transformation.py
index 858fda473e..0ad93be763 100644
--- a/litellm/llms/huggingface/chat/transformation.py
+++ b/litellm/llms/huggingface/chat/transformation.py
@@ -1,27 +1,10 @@
-import json
+import logging
import os
-import time
-from copy import deepcopy
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
import httpx
-import litellm
-from litellm.litellm_core_utils.prompt_templates.common_utils import (
- convert_content_list_to_str,
-)
-from litellm.litellm_core_utils.prompt_templates.factory import (
- custom_prompt,
- prompt_factory,
-)
-from litellm.litellm_core_utils.streaming_handler import CustomStreamWrapper
-from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMException
-from litellm.secret_managers.main import get_secret_str
-from litellm.types.llms.openai import AllMessageValues
-from litellm.types.utils import Choices, Message, ModelResponse, Usage
-from litellm.utils import token_counter
-
-from ..common_utils import HuggingfaceError, hf_task_list, hf_tasks, output_parser
+from litellm.types.llms.openai import AllMessageValues, ChatCompletionRequest
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
@@ -30,176 +13,102 @@ if TYPE_CHECKING:
else:
LoggingClass = Any
+from litellm.llms.base_llm.chat.transformation import BaseLLMException
-tgi_models_cache = None
-conv_models_cache = None
+from ...openai.chat.gpt_transformation import OpenAIGPTConfig
+from ..common_utils import HuggingFaceError, _fetch_inference_provider_mapping
+
+logger = logging.getLogger(__name__)
+
+BASE_URL = "https://router.huggingface.co"
-class HuggingfaceChatConfig(BaseConfig):
+class HuggingFaceChatConfig(OpenAIGPTConfig):
"""
- Reference: https://huggingface.github.io/text-generation-inference/#/Text%20Generation%20Inference/compat_generate
+ Reference: https://huggingface.co/docs/huggingface_hub/guides/inference
"""
- hf_task: Optional[hf_tasks] = (
- None # litellm-specific param, used to know the api spec to use when calling huggingface api
- )
- best_of: Optional[int] = None
- decoder_input_details: Optional[bool] = None
- details: Optional[bool] = True # enables returning logprobs + best of
- max_new_tokens: Optional[int] = None
- repetition_penalty: Optional[float] = None
- return_full_text: Optional[bool] = (
- False # by default don't return the input as part of the output
- )
- seed: Optional[int] = None
- temperature: Optional[float] = None
- top_k: Optional[int] = None
- top_n_tokens: Optional[int] = None
- top_p: Optional[int] = None
- truncate: Optional[int] = None
- typical_p: Optional[float] = None
- watermark: Optional[bool] = None
-
- def __init__(
+ def validate_environment(
self,
- best_of: Optional[int] = None,
- decoder_input_details: Optional[bool] = None,
- details: Optional[bool] = None,
- max_new_tokens: Optional[int] = None,
- repetition_penalty: Optional[float] = None,
- return_full_text: Optional[bool] = None,
- seed: Optional[int] = None,
- temperature: Optional[float] = None,
- top_k: Optional[int] = None,
- top_n_tokens: Optional[int] = None,
- top_p: Optional[int] = None,
- truncate: Optional[int] = None,
- typical_p: Optional[float] = None,
- watermark: Optional[bool] = None,
- ) -> None:
- locals_ = locals().copy()
- for key, value in locals_.items():
- if key != "self" and value is not None:
- setattr(self.__class__, key, value)
-
- @classmethod
- def get_config(cls):
- return super().get_config()
-
- def get_special_options_params(self):
- return ["use_cache", "wait_for_model"]
-
- def get_supported_openai_params(self, model: str):
- return [
- "stream",
- "temperature",
- "max_tokens",
- "max_completion_tokens",
- "top_p",
- "stop",
- "n",
- "echo",
- ]
-
- def map_openai_params(
- self,
- non_default_params: Dict,
- optional_params: Dict,
+ headers: dict,
model: str,
- drop_params: bool,
- ) -> Dict:
- for param, value in non_default_params.items():
- # temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None
- if param == "temperature":
- if value == 0.0 or value == 0:
- # hugging face exception raised when temp==0
- # Failed: Error occurred: HuggingfaceException - Input validation error: `temperature` must be strictly positive
- value = 0.01
- optional_params["temperature"] = value
- if param == "top_p":
- optional_params["top_p"] = value
- if param == "n":
- optional_params["best_of"] = value
- optional_params["do_sample"] = (
- True # Need to sample if you want best of for hf inference endpoints
- )
- if param == "stream":
- optional_params["stream"] = value
- if param == "stop":
- optional_params["stop"] = value
- if param == "max_tokens" or param == "max_completion_tokens":
- # HF TGI raises the following exception when max_new_tokens==0
- # Failed: Error occurred: HuggingfaceException - Input validation error: `max_new_tokens` must be strictly positive
- if value == 0:
- value = 1
- optional_params["max_new_tokens"] = value
- if param == "echo":
- # https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation.decoder_input_details
- # Return the decoder input token logprobs and ids. You must set details=True as well for it to be taken into account. Defaults to False
- optional_params["decoder_input_details"] = True
+ messages: List[AllMessageValues],
+ optional_params: Dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ default_headers = {
+ "content-type": "application/json",
+ }
+ if api_key is not None:
+ default_headers["Authorization"] = f"Bearer {api_key}"
- return optional_params
+ headers = {**headers, **default_headers}
- def get_hf_api_key(self) -> Optional[str]:
- return get_secret_str("HUGGINGFACE_API_KEY")
+ return headers
- def read_tgi_conv_models(self):
- try:
- global tgi_models_cache, conv_models_cache
- # Check if the cache is already populated
- # so we don't keep on reading txt file if there are 1k requests
- if (tgi_models_cache is not None) and (conv_models_cache is not None):
- return tgi_models_cache, conv_models_cache
- # If not, read the file and populate the cache
- tgi_models = set()
- script_directory = os.path.dirname(os.path.abspath(__file__))
- script_directory = os.path.dirname(script_directory)
- # Construct the file path relative to the script's directory
- file_path = os.path.join(
- script_directory,
- "huggingface_llms_metadata",
- "hf_text_generation_models.txt",
+ def get_error_class(
+ self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
+ ) -> BaseLLMException:
+ return HuggingFaceError(
+ status_code=status_code, message=error_message, headers=headers
+ )
+
+ def get_base_url(self, model: str, base_url: Optional[str]) -> Optional[str]:
+ """
+ Get the API base for the Huggingface API.
+
+ Do not add the chat/embedding/rerank extension here. Let the handler do this.
+ """
+ if model.startswith(("http://", "https://")):
+ base_url = model
+ elif base_url is None:
+ base_url = os.getenv("HF_API_BASE") or os.getenv("HUGGINGFACE_API_BASE", "")
+ return base_url
+
+ def get_complete_url(
+ self,
+ api_base: Optional[str],
+ api_key: Optional[str],
+ model: str,
+ optional_params: dict,
+ litellm_params: dict,
+ stream: Optional[bool] = None,
+ ) -> str:
+ """
+ Get the complete URL for the API call.
+ For provider-specific routing through huggingface
+ """
+ # 1. Check if api_base is provided
+ if api_base is not None:
+ complete_url = api_base
+ elif os.getenv("HF_API_BASE") or os.getenv("HUGGINGFACE_API_BASE"):
+ complete_url = str(os.getenv("HF_API_BASE")) or str(
+ os.getenv("HUGGINGFACE_API_BASE")
)
-
- with open(file_path, "r") as file:
- for line in file:
- tgi_models.add(line.strip())
-
- # Cache the set for future use
- tgi_models_cache = tgi_models
-
- # If not, read the file and populate the cache
- file_path = os.path.join(
- script_directory,
- "huggingface_llms_metadata",
- "hf_conversational_models.txt",
- )
- conv_models = set()
- with open(file_path, "r") as file:
- for line in file:
- conv_models.add(line.strip())
- # Cache the set for future use
- conv_models_cache = conv_models
- return tgi_models, conv_models
- except Exception:
- return set(), set()
-
- def get_hf_task_for_model(self, model: str) -> Tuple[hf_tasks, str]:
- # read text file, cast it to set
- # read the file called "huggingface_llms_metadata/hf_text_generation_models.txt"
- if model.split("/")[0] in hf_task_list:
- split_model = model.split("/", 1)
- return split_model[0], split_model[1] # type: ignore
- tgi_models, conversational_models = self.read_tgi_conv_models()
-
- if model in tgi_models:
- return "text-generation-inference", model
- elif model in conversational_models:
- return "conversational", model
- elif "roneneldan/TinyStories" in model:
- return "text-generation", model
+ elif model.startswith(("http://", "https://")):
+ complete_url = model
+ # 4. Default construction with provider
else:
- return "text-generation-inference", model # default to tgi
+ # Parse provider and model
+ first_part, remaining = model.split("/", 1)
+ if "/" in remaining:
+ provider = first_part
+ else:
+ provider = "hf-inference"
+
+ if provider == "hf-inference":
+ route = f"{provider}/models/{model}/v1/chat/completions"
+ elif provider == "novita":
+ route = f"{provider}/chat/completions"
+ else:
+ route = f"{provider}/v1/chat/completions"
+ complete_url = f"{BASE_URL}/{route}"
+
+ # Ensure URL doesn't end with a slash
+ complete_url = complete_url.rstrip("/")
+ return complete_url
def transform_request(
self,
@@ -209,381 +118,32 @@ class HuggingfaceChatConfig(BaseConfig):
litellm_params: dict,
headers: dict,
) -> dict:
- task = litellm_params.get("task", None)
- ## VALIDATE API FORMAT
- if task is None or not isinstance(task, str) or task not in hf_task_list:
- raise Exception(
- "Invalid hf task - {}. Valid formats - {}.".format(task, hf_tasks)
- )
-
- ## Load Config
- config = litellm.HuggingfaceConfig.get_config()
- for k, v in config.items():
- if (
- k not in optional_params
- ): # completion(top_k=3) > huggingfaceConfig(top_k=3) <- allows for dynamic variables to be passed in
- optional_params[k] = v
-
- ### MAP INPUT PARAMS
- #### HANDLE SPECIAL PARAMS
- special_params = self.get_special_options_params()
- special_params_dict = {}
- # Create a list of keys to pop after iteration
- keys_to_pop = []
-
- for k, v in optional_params.items():
- if k in special_params:
- special_params_dict[k] = v
- keys_to_pop.append(k)
-
- # Pop the keys from the dictionary after iteration
- for k in keys_to_pop:
- optional_params.pop(k)
- if task == "conversational":
- inference_params = deepcopy(optional_params)
- inference_params.pop("details")
- inference_params.pop("return_full_text")
- past_user_inputs = []
- generated_responses = []
- text = ""
- for message in messages:
- if message["role"] == "user":
- if text != "":
- past_user_inputs.append(text)
- text = convert_content_list_to_str(message)
- elif message["role"] == "assistant" or message["role"] == "system":
- generated_responses.append(convert_content_list_to_str(message))
- data = {
- "inputs": {
- "text": text,
- "past_user_inputs": past_user_inputs,
- "generated_responses": generated_responses,
- },
- "parameters": inference_params,
- }
-
- elif task == "text-generation-inference":
- # always send "details" and "return_full_text" as params
- if model in litellm.custom_prompt_dict:
- # check if the model has a registered custom prompt
- model_prompt_details = litellm.custom_prompt_dict[model]
- prompt = custom_prompt(
- role_dict=model_prompt_details.get("roles", None),
- initial_prompt_value=model_prompt_details.get(
- "initial_prompt_value", ""
- ),
- final_prompt_value=model_prompt_details.get(
- "final_prompt_value", ""
- ),
- messages=messages,
- )
- else:
- prompt = prompt_factory(model=model, messages=messages)
- data = {
- "inputs": prompt, # type: ignore
- "parameters": optional_params,
- "stream": ( # type: ignore
- True
- if "stream" in optional_params
- and isinstance(optional_params["stream"], bool)
- and optional_params["stream"] is True # type: ignore
- else False
- ),
- }
+ if "max_retries" in optional_params:
+ logger.warning("`max_retries` is not supported. It will be ignored.")
+ optional_params.pop("max_retries", None)
+ first_part, remaining = model.split("/", 1)
+ if "/" in remaining:
+ provider = first_part
+ model_id = remaining
else:
- # Non TGI and Conversational llms
- # We need this branch, it removes 'details' and 'return_full_text' from params
- if model in litellm.custom_prompt_dict:
- # check if the model has a registered custom prompt
- model_prompt_details = litellm.custom_prompt_dict[model]
- prompt = custom_prompt(
- role_dict=model_prompt_details.get("roles", {}),
- initial_prompt_value=model_prompt_details.get(
- "initial_prompt_value", ""
- ),
- final_prompt_value=model_prompt_details.get(
- "final_prompt_value", ""
- ),
- bos_token=model_prompt_details.get("bos_token", ""),
- eos_token=model_prompt_details.get("eos_token", ""),
- messages=messages,
- )
- else:
- prompt = prompt_factory(model=model, messages=messages)
- inference_params = deepcopy(optional_params)
- inference_params.pop("details")
- inference_params.pop("return_full_text")
- data = {
- "inputs": prompt, # type: ignore
- }
- if task == "text-generation-inference":
- data["parameters"] = inference_params
- data["stream"] = ( # type: ignore
- True # type: ignore
- if "stream" in optional_params and optional_params["stream"] is True
- else False
- )
-
- ### RE-ADD SPECIAL PARAMS
- if len(special_params_dict.keys()) > 0:
- data.update({"options": special_params_dict})
-
- return data
-
- def get_api_base(self, api_base: Optional[str], model: str) -> str:
- """
- Get the API base for the Huggingface API.
-
- Do not add the chat/embedding/rerank extension here. Let the handler do this.
- """
- if "https" in model:
- completion_url = model
- elif api_base is not None:
- completion_url = api_base
- elif "HF_API_BASE" in os.environ:
- completion_url = os.getenv("HF_API_BASE", "")
- elif "HUGGINGFACE_API_BASE" in os.environ:
- completion_url = os.getenv("HUGGINGFACE_API_BASE", "")
- else:
- completion_url = f"https://api-inference.huggingface.co/models/{model}"
-
- return completion_url
-
- def validate_environment(
- self,
- headers: Dict,
- model: str,
- messages: List[AllMessageValues],
- optional_params: Dict,
- api_key: Optional[str] = None,
- api_base: Optional[str] = None,
- ) -> Dict:
- default_headers = {
- "content-type": "application/json",
- }
- if api_key is not None:
- default_headers["Authorization"] = (
- f"Bearer {api_key}" # Huggingface Inference Endpoint default is to accept bearer tokens
+ provider = "hf-inference"
+ model_id = model
+ provider_mapping = _fetch_inference_provider_mapping(model_id)
+ if provider not in provider_mapping:
+ raise HuggingFaceError(
+ message=f"Model {model_id} is not supported for provider {provider}",
+ status_code=404,
+ headers={},
)
-
- headers = {**headers, **default_headers}
- return headers
-
- def get_error_class(
- self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
- ) -> BaseLLMException:
- return HuggingfaceError(
- status_code=status_code, message=error_message, headers=headers
- )
-
- def _convert_streamed_response_to_complete_response(
- self,
- response: httpx.Response,
- logging_obj: LoggingClass,
- model: str,
- data: dict,
- api_key: Optional[str] = None,
- ) -> List[Dict[str, Any]]:
- streamed_response = CustomStreamWrapper(
- completion_stream=response.iter_lines(),
- model=model,
- custom_llm_provider="huggingface",
- logging_obj=logging_obj,
- )
- content = ""
- for chunk in streamed_response:
- content += chunk["choices"][0]["delta"]["content"]
- completion_response: List[Dict[str, Any]] = [{"generated_text": content}]
- ## LOGGING
- logging_obj.post_call(
- input=data,
- api_key=api_key,
- original_response=completion_response,
- additional_args={"complete_input_dict": data},
- )
- return completion_response
-
- def convert_to_model_response_object( # noqa: PLR0915
- self,
- completion_response: Union[List[Dict[str, Any]], Dict[str, Any]],
- model_response: ModelResponse,
- task: Optional[hf_tasks],
- optional_params: dict,
- encoding: Any,
- messages: List[AllMessageValues],
- model: str,
- ):
- if task is None:
- task = "text-generation-inference" # default to tgi
-
- if task == "conversational":
- if len(completion_response["generated_text"]) > 0: # type: ignore
- model_response.choices[0].message.content = completion_response[ # type: ignore
- "generated_text"
- ]
- elif task == "text-generation-inference":
- if (
- not isinstance(completion_response, list)
- or not isinstance(completion_response[0], dict)
- or "generated_text" not in completion_response[0]
- ):
- raise HuggingfaceError(
- status_code=422,
- message=f"response is not in expected format - {completion_response}",
- headers=None,
- )
-
- if len(completion_response[0]["generated_text"]) > 0:
- model_response.choices[0].message.content = output_parser( # type: ignore
- completion_response[0]["generated_text"]
- )
- ## GETTING LOGPROBS + FINISH REASON
- if (
- "details" in completion_response[0]
- and "tokens" in completion_response[0]["details"]
- ):
- model_response.choices[0].finish_reason = completion_response[0][
- "details"
- ]["finish_reason"]
- sum_logprob = 0
- for token in completion_response[0]["details"]["tokens"]:
- if token["logprob"] is not None:
- sum_logprob += token["logprob"]
- setattr(model_response.choices[0].message, "_logprob", sum_logprob) # type: ignore
- if "best_of" in optional_params and optional_params["best_of"] > 1:
- if (
- "details" in completion_response[0]
- and "best_of_sequences" in completion_response[0]["details"]
- ):
- choices_list = []
- for idx, item in enumerate(
- completion_response[0]["details"]["best_of_sequences"]
- ):
- sum_logprob = 0
- for token in item["tokens"]:
- if token["logprob"] is not None:
- sum_logprob += token["logprob"]
- if len(item["generated_text"]) > 0:
- message_obj = Message(
- content=output_parser(item["generated_text"]),
- logprobs=sum_logprob,
- )
- else:
- message_obj = Message(content=None)
- choice_obj = Choices(
- finish_reason=item["finish_reason"],
- index=idx + 1,
- message=message_obj,
- )
- choices_list.append(choice_obj)
- model_response.choices.extend(choices_list)
- elif task == "text-classification":
- model_response.choices[0].message.content = json.dumps( # type: ignore
- completion_response
+ provider_mapping = provider_mapping[provider]
+ if provider_mapping["status"] == "staging":
+ logger.warning(
+ f"Model {model_id} is in staging mode for provider {provider}. Meant for test purposes only."
+ )
+ mapped_model = provider_mapping["providerId"]
+ messages = self._transform_messages(messages=messages, model=mapped_model)
+ return dict(
+ ChatCompletionRequest(
+ model=mapped_model, messages=messages, **optional_params
)
- else:
- if (
- isinstance(completion_response, list)
- and len(completion_response[0]["generated_text"]) > 0
- ):
- model_response.choices[0].message.content = output_parser( # type: ignore
- completion_response[0]["generated_text"]
- )
- ## CALCULATING USAGE
- prompt_tokens = 0
- try:
- prompt_tokens = token_counter(model=model, messages=messages)
- except Exception:
- # this should remain non blocking we should not block a response returning if calculating usage fails
- pass
- output_text = model_response["choices"][0]["message"].get("content", "")
- if output_text is not None and len(output_text) > 0:
- completion_tokens = 0
- try:
- completion_tokens = len(
- encoding.encode(
- model_response["choices"][0]["message"].get("content", "")
- )
- ) ##[TODO] use the llama2 tokenizer here
- except Exception:
- # this should remain non blocking we should not block a response returning if calculating usage fails
- pass
- else:
- completion_tokens = 0
-
- model_response.created = int(time.time())
- model_response.model = model
- usage = Usage(
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
- total_tokens=prompt_tokens + completion_tokens,
- )
- setattr(model_response, "usage", usage)
- model_response._hidden_params["original_response"] = completion_response
- return model_response
-
- def transform_response(
- self,
- model: str,
- raw_response: httpx.Response,
- model_response: ModelResponse,
- logging_obj: LoggingClass,
- request_data: Dict,
- messages: List[AllMessageValues],
- optional_params: Dict,
- litellm_params: Dict,
- encoding: Any,
- api_key: Optional[str] = None,
- json_mode: Optional[bool] = None,
- ) -> ModelResponse:
- ## Some servers might return streaming responses even though stream was not set to true. (e.g. Baseten)
- task = litellm_params.get("task", None)
- is_streamed = False
- if (
- raw_response.__dict__["headers"].get("Content-Type", "")
- == "text/event-stream"
- ):
- is_streamed = True
-
- # iterate over the complete streamed response, and return the final answer
- if is_streamed:
- completion_response = self._convert_streamed_response_to_complete_response(
- response=raw_response,
- logging_obj=logging_obj,
- model=model,
- data=request_data,
- api_key=api_key,
- )
- else:
- ## LOGGING
- logging_obj.post_call(
- input=request_data,
- api_key=api_key,
- original_response=raw_response.text,
- additional_args={"complete_input_dict": request_data},
- )
- ## RESPONSE OBJECT
- try:
- completion_response = raw_response.json()
- if isinstance(completion_response, dict):
- completion_response = [completion_response]
- except Exception:
- raise HuggingfaceError(
- message=f"Original Response received: {raw_response.text}",
- status_code=raw_response.status_code,
- )
-
- if isinstance(completion_response, dict) and "error" in completion_response:
- raise HuggingfaceError(
- message=completion_response["error"], # type: ignore
- status_code=raw_response.status_code,
- )
- return self.convert_to_model_response_object(
- completion_response=completion_response,
- model_response=model_response,
- task=task if task is not None and task in hf_task_list else None,
- optional_params=optional_params,
- encoding=encoding,
- messages=messages,
- model=model,
)
diff --git a/litellm/llms/huggingface/common_utils.py b/litellm/llms/huggingface/common_utils.py
index d793b29874..9ab4367c9b 100644
--- a/litellm/llms/huggingface/common_utils.py
+++ b/litellm/llms/huggingface/common_utils.py
@@ -1,18 +1,30 @@
+import os
+from functools import lru_cache
from typing import Literal, Optional, Union
import httpx
from litellm.llms.base_llm.chat.transformation import BaseLLMException
+HF_HUB_URL = "https://huggingface.co"
-class HuggingfaceError(BaseLLMException):
+
+class HuggingFaceError(BaseLLMException):
def __init__(
self,
- status_code: int,
- message: str,
- headers: Optional[Union[dict, httpx.Headers]] = None,
+ status_code,
+ message,
+ request: Optional[httpx.Request] = None,
+ response: Optional[httpx.Response] = None,
+ headers: Optional[Union[httpx.Headers, dict]] = None,
):
- super().__init__(status_code=status_code, message=message, headers=headers)
+ super().__init__(
+ status_code=status_code,
+ message=message,
+ request=request,
+ response=response,
+ headers=headers,
+ )
hf_tasks = Literal[
@@ -43,3 +55,48 @@ def output_parser(generated_text: str):
if generated_text.endswith(token):
generated_text = generated_text[::-1].replace(token[::-1], "", 1)[::-1]
return generated_text
+
+
+@lru_cache(maxsize=128)
+def _fetch_inference_provider_mapping(model: str) -> dict:
+ """
+ Fetch provider mappings for a model from the Hugging Face Hub.
+
+ Args:
+ model: The model identifier (e.g., 'meta-llama/Llama-2-7b')
+
+ Returns:
+ dict: The inference provider mapping for the model
+
+ Raises:
+ ValueError: If no provider mapping is found
+ HuggingFaceError: If the API request fails
+ """
+ headers = {"Accept": "application/json"}
+ if os.getenv("HUGGINGFACE_API_KEY"):
+ headers["Authorization"] = f"Bearer {os.getenv('HUGGINGFACE_API_KEY')}"
+
+ path = f"{HF_HUB_URL}/api/models/{model}"
+ params = {"expand": ["inferenceProviderMapping"]}
+
+ try:
+ response = httpx.get(path, headers=headers, params=params)
+ response.raise_for_status()
+ provider_mapping = response.json().get("inferenceProviderMapping")
+
+ if provider_mapping is None:
+ raise ValueError(f"No provider mapping found for model {model}")
+
+ return provider_mapping
+ except httpx.HTTPError as e:
+ if hasattr(e, "response"):
+ status_code = getattr(e.response, "status_code", 500)
+ headers = getattr(e.response, "headers", {})
+ else:
+ status_code = 500
+ headers = {}
+ raise HuggingFaceError(
+ message=f"Failed to fetch provider mapping: {str(e)}",
+ status_code=status_code,
+ headers=headers,
+ )
diff --git a/litellm/llms/huggingface/embedding/handler.py b/litellm/llms/huggingface/embedding/handler.py
new file mode 100644
index 0000000000..bfd73c1346
--- /dev/null
+++ b/litellm/llms/huggingface/embedding/handler.py
@@ -0,0 +1,425 @@
+import json
+import os
+from typing import Any, Callable, Dict, List, Literal, Optional, Union, get_args
+
+import httpx
+
+import litellm
+from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
+from litellm.llms.custom_httpx.http_handler import (
+ AsyncHTTPHandler,
+ HTTPHandler,
+ get_async_httpx_client,
+)
+from litellm.types.utils import EmbeddingResponse
+
+from ...base import BaseLLM
+from ..common_utils import HuggingFaceError
+from .transformation import HuggingFaceEmbeddingConfig
+
+config = HuggingFaceEmbeddingConfig()
+
+HF_HUB_URL = "https://huggingface.co"
+
+hf_tasks_embeddings = Literal[ # pipeline tags + hf tei endpoints - https://huggingface.github.io/text-embeddings-inference/#/
+ "sentence-similarity", "feature-extraction", "rerank", "embed", "similarity"
+]
+
+
+def get_hf_task_embedding_for_model(
+ model: str, task_type: Optional[str], api_base: str
+) -> Optional[str]:
+ if task_type is not None:
+ if task_type in get_args(hf_tasks_embeddings):
+ return task_type
+ else:
+ raise Exception(
+ "Invalid task_type={}. Expected one of={}".format(
+ task_type, hf_tasks_embeddings
+ )
+ )
+ http_client = HTTPHandler(concurrent_limit=1)
+
+ model_info = http_client.get(url=f"{api_base}/api/models/{model}")
+
+ model_info_dict = model_info.json()
+
+ pipeline_tag: Optional[str] = model_info_dict.get("pipeline_tag", None)
+
+ return pipeline_tag
+
+
+async def async_get_hf_task_embedding_for_model(
+ model: str, task_type: Optional[str], api_base: str
+) -> Optional[str]:
+ if task_type is not None:
+ if task_type in get_args(hf_tasks_embeddings):
+ return task_type
+ else:
+ raise Exception(
+ "Invalid task_type={}. Expected one of={}".format(
+ task_type, hf_tasks_embeddings
+ )
+ )
+ http_client = get_async_httpx_client(
+ llm_provider=litellm.LlmProviders.HUGGINGFACE,
+ )
+
+ model_info = await http_client.get(url=f"{api_base}/api/models/{model}")
+
+ model_info_dict = model_info.json()
+
+ pipeline_tag: Optional[str] = model_info_dict.get("pipeline_tag", None)
+
+ return pipeline_tag
+
+
+class HuggingFaceEmbedding(BaseLLM):
+ _client_session: Optional[httpx.Client] = None
+ _aclient_session: Optional[httpx.AsyncClient] = None
+
+ def __init__(self) -> None:
+ super().__init__()
+
+ def _transform_input_on_pipeline_tag(
+ self, input: List, pipeline_tag: Optional[str]
+ ) -> dict:
+ if pipeline_tag is None:
+ return {"inputs": input}
+ if pipeline_tag == "sentence-similarity" or pipeline_tag == "similarity":
+ if len(input) < 2:
+ raise HuggingFaceError(
+ status_code=400,
+ message="sentence-similarity requires 2+ sentences",
+ )
+ return {"inputs": {"source_sentence": input[0], "sentences": input[1:]}}
+ elif pipeline_tag == "rerank":
+ if len(input) < 2:
+ raise HuggingFaceError(
+ status_code=400,
+ message="reranker requires 2+ sentences",
+ )
+ return {"inputs": {"query": input[0], "texts": input[1:]}}
+ return {"inputs": input} # default to feature-extraction pipeline tag
+
+ async def _async_transform_input(
+ self,
+ model: str,
+ task_type: Optional[str],
+ embed_url: str,
+ input: List,
+ optional_params: dict,
+ ) -> dict:
+ hf_task = await async_get_hf_task_embedding_for_model(
+ model=model, task_type=task_type, api_base=HF_HUB_URL
+ )
+
+ data = self._transform_input_on_pipeline_tag(input=input, pipeline_tag=hf_task)
+
+ if len(optional_params.keys()) > 0:
+ data["options"] = optional_params
+
+ return data
+
+ def _process_optional_params(self, data: dict, optional_params: dict) -> dict:
+ special_options_keys = config.get_special_options_params()
+ special_parameters_keys = [
+ "min_length",
+ "max_length",
+ "top_k",
+ "top_p",
+ "temperature",
+ "repetition_penalty",
+ "max_time",
+ ]
+
+ for k, v in optional_params.items():
+ if k in special_options_keys:
+ data.setdefault("options", {})
+ data["options"][k] = v
+ elif k in special_parameters_keys:
+ data.setdefault("parameters", {})
+ data["parameters"][k] = v
+ else:
+ data[k] = v
+
+ return data
+
+ def _transform_input(
+ self,
+ input: List,
+ model: str,
+ call_type: Literal["sync", "async"],
+ optional_params: dict,
+ embed_url: str,
+ ) -> dict:
+ data: Dict = {}
+
+ ## TRANSFORMATION ##
+ if "sentence-transformers" in model:
+ if len(input) == 0:
+ raise HuggingFaceError(
+ status_code=400,
+ message="sentence transformers requires 2+ sentences",
+ )
+ data = {"inputs": {"source_sentence": input[0], "sentences": input[1:]}}
+ else:
+ data = {"inputs": input}
+
+ task_type = optional_params.pop("input_type", None)
+
+ if call_type == "sync":
+ hf_task = get_hf_task_embedding_for_model(
+ model=model, task_type=task_type, api_base=HF_HUB_URL
+ )
+ elif call_type == "async":
+ return self._async_transform_input(
+ model=model, task_type=task_type, embed_url=embed_url, input=input
+ ) # type: ignore
+
+ data = self._transform_input_on_pipeline_tag(
+ input=input, pipeline_tag=hf_task
+ )
+
+ if len(optional_params.keys()) > 0:
+ data = self._process_optional_params(
+ data=data, optional_params=optional_params
+ )
+
+ return data
+
+ def _process_embedding_response(
+ self,
+ embeddings: dict,
+ model_response: EmbeddingResponse,
+ model: str,
+ input: List,
+ encoding: Any,
+ ) -> EmbeddingResponse:
+ output_data = []
+ if "similarities" in embeddings:
+ for idx, embedding in embeddings["similarities"]:
+ output_data.append(
+ {
+ "object": "embedding",
+ "index": idx,
+ "embedding": embedding, # flatten list returned from hf
+ }
+ )
+ else:
+ for idx, embedding in enumerate(embeddings):
+ if isinstance(embedding, float):
+ output_data.append(
+ {
+ "object": "embedding",
+ "index": idx,
+ "embedding": embedding, # flatten list returned from hf
+ }
+ )
+ elif isinstance(embedding, list) and isinstance(embedding[0], float):
+ output_data.append(
+ {
+ "object": "embedding",
+ "index": idx,
+ "embedding": embedding, # flatten list returned from hf
+ }
+ )
+ else:
+ output_data.append(
+ {
+ "object": "embedding",
+ "index": idx,
+ "embedding": embedding[0][
+ 0
+ ], # flatten list returned from hf
+ }
+ )
+ model_response.object = "list"
+ model_response.data = output_data
+ model_response.model = model
+ input_tokens = 0
+ for text in input:
+ input_tokens += len(encoding.encode(text))
+
+ setattr(
+ model_response,
+ "usage",
+ litellm.Usage(
+ prompt_tokens=input_tokens,
+ completion_tokens=input_tokens,
+ total_tokens=input_tokens,
+ prompt_tokens_details=None,
+ completion_tokens_details=None,
+ ),
+ )
+ return model_response
+
+ async def aembedding(
+ self,
+ model: str,
+ input: list,
+ model_response: litellm.utils.EmbeddingResponse,
+ timeout: Union[float, httpx.Timeout],
+ logging_obj: LiteLLMLoggingObj,
+ optional_params: dict,
+ api_base: str,
+ api_key: Optional[str],
+ headers: dict,
+ encoding: Callable,
+ client: Optional[AsyncHTTPHandler] = None,
+ ):
+ ## TRANSFORMATION ##
+ data = self._transform_input(
+ input=input,
+ model=model,
+ call_type="sync",
+ optional_params=optional_params,
+ embed_url=api_base,
+ )
+
+ ## LOGGING
+ logging_obj.pre_call(
+ input=input,
+ api_key=api_key,
+ additional_args={
+ "complete_input_dict": data,
+ "headers": headers,
+ "api_base": api_base,
+ },
+ )
+ ## COMPLETION CALL
+ if client is None:
+ client = get_async_httpx_client(
+ llm_provider=litellm.LlmProviders.HUGGINGFACE,
+ )
+
+ response = await client.post(api_base, headers=headers, data=json.dumps(data))
+
+ ## LOGGING
+ logging_obj.post_call(
+ input=input,
+ api_key=api_key,
+ additional_args={"complete_input_dict": data},
+ original_response=response,
+ )
+
+ embeddings = response.json()
+
+ if "error" in embeddings:
+ raise HuggingFaceError(status_code=500, message=embeddings["error"])
+
+ ## PROCESS RESPONSE ##
+ return self._process_embedding_response(
+ embeddings=embeddings,
+ model_response=model_response,
+ model=model,
+ input=input,
+ encoding=encoding,
+ )
+
+ def embedding(
+ self,
+ model: str,
+ input: list,
+ model_response: EmbeddingResponse,
+ optional_params: dict,
+ litellm_params: dict,
+ logging_obj: LiteLLMLoggingObj,
+ encoding: Callable,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ timeout: Union[float, httpx.Timeout] = httpx.Timeout(None),
+ aembedding: Optional[bool] = None,
+ client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
+ headers={},
+ ) -> EmbeddingResponse:
+ super().embedding()
+ headers = config.validate_environment(
+ api_key=api_key,
+ headers=headers,
+ model=model,
+ optional_params=optional_params,
+ messages=[],
+ litellm_params=litellm_params,
+ )
+ task_type = optional_params.pop("input_type", None)
+ task = get_hf_task_embedding_for_model(
+ model=model, task_type=task_type, api_base=HF_HUB_URL
+ )
+ # print_verbose(f"{model}, {task}")
+ embed_url = ""
+ if "https" in model:
+ embed_url = model
+ elif api_base:
+ embed_url = api_base
+ elif "HF_API_BASE" in os.environ:
+ embed_url = os.getenv("HF_API_BASE", "")
+ elif "HUGGINGFACE_API_BASE" in os.environ:
+ embed_url = os.getenv("HUGGINGFACE_API_BASE", "")
+ else:
+ embed_url = (
+ f"https://router.huggingface.co/hf-inference/pipeline/{task}/{model}"
+ )
+
+ ## ROUTING ##
+ if aembedding is True:
+ return self.aembedding(
+ input=input,
+ model_response=model_response,
+ timeout=timeout,
+ logging_obj=logging_obj,
+ headers=headers,
+ api_base=embed_url, # type: ignore
+ api_key=api_key,
+ client=client if isinstance(client, AsyncHTTPHandler) else None,
+ model=model,
+ optional_params=optional_params,
+ encoding=encoding,
+ )
+
+ ## TRANSFORMATION ##
+
+ data = self._transform_input(
+ input=input,
+ model=model,
+ call_type="sync",
+ optional_params=optional_params,
+ embed_url=embed_url,
+ )
+
+ ## LOGGING
+ logging_obj.pre_call(
+ input=input,
+ api_key=api_key,
+ additional_args={
+ "complete_input_dict": data,
+ "headers": headers,
+ "api_base": embed_url,
+ },
+ )
+ ## COMPLETION CALL
+ if client is None or not isinstance(client, HTTPHandler):
+ client = HTTPHandler(concurrent_limit=1)
+ response = client.post(embed_url, headers=headers, data=json.dumps(data))
+
+ ## LOGGING
+ logging_obj.post_call(
+ input=input,
+ api_key=api_key,
+ additional_args={"complete_input_dict": data},
+ original_response=response,
+ )
+
+ embeddings = response.json()
+
+ if "error" in embeddings:
+ raise HuggingFaceError(status_code=500, message=embeddings["error"])
+
+ ## PROCESS RESPONSE ##
+ return self._process_embedding_response(
+ embeddings=embeddings,
+ model_response=model_response,
+ model=model,
+ input=input,
+ encoding=encoding,
+ )
diff --git a/litellm/llms/huggingface/embedding/transformation.py b/litellm/llms/huggingface/embedding/transformation.py
new file mode 100644
index 0000000000..60bd5dcd61
--- /dev/null
+++ b/litellm/llms/huggingface/embedding/transformation.py
@@ -0,0 +1,590 @@
+import json
+import os
+import time
+from copy import deepcopy
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
+
+import httpx
+
+import litellm
+from litellm.litellm_core_utils.prompt_templates.common_utils import (
+ convert_content_list_to_str,
+)
+from litellm.litellm_core_utils.prompt_templates.factory import (
+ custom_prompt,
+ prompt_factory,
+)
+from litellm.litellm_core_utils.streaming_handler import CustomStreamWrapper
+from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMException
+from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import AllMessageValues
+from litellm.types.utils import Choices, Message, ModelResponse, Usage
+from litellm.utils import token_counter
+
+from ..common_utils import HuggingFaceError, hf_task_list, hf_tasks, output_parser
+
+if TYPE_CHECKING:
+ from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
+
+ LoggingClass = LiteLLMLoggingObj
+else:
+ LoggingClass = Any
+
+
+tgi_models_cache = None
+conv_models_cache = None
+
+
+class HuggingFaceEmbeddingConfig(BaseConfig):
+ """
+ Reference: https://huggingface.github.io/text-generation-inference/#/Text%20Generation%20Inference/compat_generate
+ """
+
+ hf_task: Optional[
+ hf_tasks
+ ] = None # litellm-specific param, used to know the api spec to use when calling huggingface api
+ best_of: Optional[int] = None
+ decoder_input_details: Optional[bool] = None
+ details: Optional[bool] = True # enables returning logprobs + best of
+ max_new_tokens: Optional[int] = None
+ repetition_penalty: Optional[float] = None
+ return_full_text: Optional[
+ bool
+ ] = False # by default don't return the input as part of the output
+ seed: Optional[int] = None
+ temperature: Optional[float] = None
+ top_k: Optional[int] = None
+ top_n_tokens: Optional[int] = None
+ top_p: Optional[int] = None
+ truncate: Optional[int] = None
+ typical_p: Optional[float] = None
+ watermark: Optional[bool] = None
+
+ def __init__(
+ self,
+ best_of: Optional[int] = None,
+ decoder_input_details: Optional[bool] = None,
+ details: Optional[bool] = None,
+ max_new_tokens: Optional[int] = None,
+ repetition_penalty: Optional[float] = None,
+ return_full_text: Optional[bool] = None,
+ seed: Optional[int] = None,
+ temperature: Optional[float] = None,
+ top_k: Optional[int] = None,
+ top_n_tokens: Optional[int] = None,
+ top_p: Optional[int] = None,
+ truncate: Optional[int] = None,
+ typical_p: Optional[float] = None,
+ watermark: Optional[bool] = None,
+ ) -> None:
+ locals_ = locals().copy()
+ for key, value in locals_.items():
+ if key != "self" and value is not None:
+ setattr(self.__class__, key, value)
+
+ @classmethod
+ def get_config(cls):
+ return super().get_config()
+
+ def get_special_options_params(self):
+ return ["use_cache", "wait_for_model"]
+
+ def get_supported_openai_params(self, model: str):
+ return [
+ "stream",
+ "temperature",
+ "max_tokens",
+ "max_completion_tokens",
+ "top_p",
+ "stop",
+ "n",
+ "echo",
+ ]
+
+ def map_openai_params(
+ self,
+ non_default_params: Dict,
+ optional_params: Dict,
+ model: str,
+ drop_params: bool,
+ ) -> Dict:
+ for param, value in non_default_params.items():
+ # temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None
+ if param == "temperature":
+ if value == 0.0 or value == 0:
+ # hugging face exception raised when temp==0
+ # Failed: Error occurred: HuggingfaceException - Input validation error: `temperature` must be strictly positive
+ value = 0.01
+ optional_params["temperature"] = value
+ if param == "top_p":
+ optional_params["top_p"] = value
+ if param == "n":
+ optional_params["best_of"] = value
+ optional_params[
+ "do_sample"
+ ] = True # Need to sample if you want best of for hf inference endpoints
+ if param == "stream":
+ optional_params["stream"] = value
+ if param == "stop":
+ optional_params["stop"] = value
+ if param == "max_tokens" or param == "max_completion_tokens":
+ # HF TGI raises the following exception when max_new_tokens==0
+ # Failed: Error occurred: HuggingfaceException - Input validation error: `max_new_tokens` must be strictly positive
+ if value == 0:
+ value = 1
+ optional_params["max_new_tokens"] = value
+ if param == "echo":
+ # https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation.decoder_input_details
+ # Return the decoder input token logprobs and ids. You must set details=True as well for it to be taken into account. Defaults to False
+ optional_params["decoder_input_details"] = True
+
+ return optional_params
+
+ def get_hf_api_key(self) -> Optional[str]:
+ return get_secret_str("HUGGINGFACE_API_KEY")
+
+ def read_tgi_conv_models(self):
+ try:
+ global tgi_models_cache, conv_models_cache
+ # Check if the cache is already populated
+ # so we don't keep on reading txt file if there are 1k requests
+ if (tgi_models_cache is not None) and (conv_models_cache is not None):
+ return tgi_models_cache, conv_models_cache
+ # If not, read the file and populate the cache
+ tgi_models = set()
+ script_directory = os.path.dirname(os.path.abspath(__file__))
+ script_directory = os.path.dirname(script_directory)
+ # Construct the file path relative to the script's directory
+ file_path = os.path.join(
+ script_directory,
+ "huggingface_llms_metadata",
+ "hf_text_generation_models.txt",
+ )
+
+ with open(file_path, "r") as file:
+ for line in file:
+ tgi_models.add(line.strip())
+
+ # Cache the set for future use
+ tgi_models_cache = tgi_models
+
+ # If not, read the file and populate the cache
+ file_path = os.path.join(
+ script_directory,
+ "huggingface_llms_metadata",
+ "hf_conversational_models.txt",
+ )
+ conv_models = set()
+ with open(file_path, "r") as file:
+ for line in file:
+ conv_models.add(line.strip())
+ # Cache the set for future use
+ conv_models_cache = conv_models
+ return tgi_models, conv_models
+ except Exception:
+ return set(), set()
+
+ def get_hf_task_for_model(self, model: str) -> Tuple[hf_tasks, str]:
+ # read text file, cast it to set
+ # read the file called "huggingface_llms_metadata/hf_text_generation_models.txt"
+ if model.split("/")[0] in hf_task_list:
+ split_model = model.split("/", 1)
+ return split_model[0], split_model[1] # type: ignore
+ tgi_models, conversational_models = self.read_tgi_conv_models()
+
+ if model in tgi_models:
+ return "text-generation-inference", model
+ elif model in conversational_models:
+ return "conversational", model
+ elif "roneneldan/TinyStories" in model:
+ return "text-generation", model
+ else:
+ return "text-generation-inference", model # default to tgi
+
+ def transform_request(
+ self,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ headers: dict,
+ ) -> dict:
+ task = litellm_params.get("task", None)
+ ## VALIDATE API FORMAT
+ if task is None or not isinstance(task, str) or task not in hf_task_list:
+ raise Exception(
+ "Invalid hf task - {}. Valid formats - {}.".format(task, hf_tasks)
+ )
+
+ ## Load Config
+ config = litellm.HuggingFaceEmbeddingConfig.get_config()
+ for k, v in config.items():
+ if (
+ k not in optional_params
+ ): # completion(top_k=3) > huggingfaceConfig(top_k=3) <- allows for dynamic variables to be passed in
+ optional_params[k] = v
+
+ ### MAP INPUT PARAMS
+ #### HANDLE SPECIAL PARAMS
+ special_params = self.get_special_options_params()
+ special_params_dict = {}
+ # Create a list of keys to pop after iteration
+ keys_to_pop = []
+
+ for k, v in optional_params.items():
+ if k in special_params:
+ special_params_dict[k] = v
+ keys_to_pop.append(k)
+
+ # Pop the keys from the dictionary after iteration
+ for k in keys_to_pop:
+ optional_params.pop(k)
+ if task == "conversational":
+ inference_params = deepcopy(optional_params)
+ inference_params.pop("details")
+ inference_params.pop("return_full_text")
+ past_user_inputs = []
+ generated_responses = []
+ text = ""
+ for message in messages:
+ if message["role"] == "user":
+ if text != "":
+ past_user_inputs.append(text)
+ text = convert_content_list_to_str(message)
+ elif message["role"] == "assistant" or message["role"] == "system":
+ generated_responses.append(convert_content_list_to_str(message))
+ data = {
+ "inputs": {
+ "text": text,
+ "past_user_inputs": past_user_inputs,
+ "generated_responses": generated_responses,
+ },
+ "parameters": inference_params,
+ }
+
+ elif task == "text-generation-inference":
+ # always send "details" and "return_full_text" as params
+ if model in litellm.custom_prompt_dict:
+ # check if the model has a registered custom prompt
+ model_prompt_details = litellm.custom_prompt_dict[model]
+ prompt = custom_prompt(
+ role_dict=model_prompt_details.get("roles", None),
+ initial_prompt_value=model_prompt_details.get(
+ "initial_prompt_value", ""
+ ),
+ final_prompt_value=model_prompt_details.get(
+ "final_prompt_value", ""
+ ),
+ messages=messages,
+ )
+ else:
+ prompt = prompt_factory(model=model, messages=messages)
+ data = {
+ "inputs": prompt, # type: ignore
+ "parameters": optional_params,
+ "stream": ( # type: ignore
+ True
+ if "stream" in optional_params
+ and isinstance(optional_params["stream"], bool)
+ and optional_params["stream"] is True # type: ignore
+ else False
+ ),
+ }
+ else:
+ # Non TGI and Conversational llms
+ # We need this branch, it removes 'details' and 'return_full_text' from params
+ if model in litellm.custom_prompt_dict:
+ # check if the model has a registered custom prompt
+ model_prompt_details = litellm.custom_prompt_dict[model]
+ prompt = custom_prompt(
+ role_dict=model_prompt_details.get("roles", {}),
+ initial_prompt_value=model_prompt_details.get(
+ "initial_prompt_value", ""
+ ),
+ final_prompt_value=model_prompt_details.get(
+ "final_prompt_value", ""
+ ),
+ bos_token=model_prompt_details.get("bos_token", ""),
+ eos_token=model_prompt_details.get("eos_token", ""),
+ messages=messages,
+ )
+ else:
+ prompt = prompt_factory(model=model, messages=messages)
+ inference_params = deepcopy(optional_params)
+ inference_params.pop("details")
+ inference_params.pop("return_full_text")
+ data = {
+ "inputs": prompt, # type: ignore
+ }
+ if task == "text-generation-inference":
+ data["parameters"] = inference_params
+ data["stream"] = ( # type: ignore
+ True # type: ignore
+ if "stream" in optional_params and optional_params["stream"] is True
+ else False
+ )
+
+ ### RE-ADD SPECIAL PARAMS
+ if len(special_params_dict.keys()) > 0:
+ data.update({"options": special_params_dict})
+
+ return data
+
+ def get_api_base(self, api_base: Optional[str], model: str) -> str:
+ """
+ Get the API base for the Huggingface API.
+
+ Do not add the chat/embedding/rerank extension here. Let the handler do this.
+ """
+ if "https" in model:
+ completion_url = model
+ elif api_base is not None:
+ completion_url = api_base
+ elif "HF_API_BASE" in os.environ:
+ completion_url = os.getenv("HF_API_BASE", "")
+ elif "HUGGINGFACE_API_BASE" in os.environ:
+ completion_url = os.getenv("HUGGINGFACE_API_BASE", "")
+ else:
+ completion_url = f"https://api-inference.huggingface.co/models/{model}"
+
+ return completion_url
+
+ def validate_environment(
+ self,
+ headers: Dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: Dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> Dict:
+ default_headers = {
+ "content-type": "application/json",
+ }
+ if api_key is not None:
+ default_headers[
+ "Authorization"
+ ] = f"Bearer {api_key}" # Huggingface Inference Endpoint default is to accept bearer tokens
+
+ headers = {**headers, **default_headers}
+ return headers
+
+ def get_error_class(
+ self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
+ ) -> BaseLLMException:
+ return HuggingFaceError(
+ status_code=status_code, message=error_message, headers=headers
+ )
+
+ def _convert_streamed_response_to_complete_response(
+ self,
+ response: httpx.Response,
+ logging_obj: LoggingClass,
+ model: str,
+ data: dict,
+ api_key: Optional[str] = None,
+ ) -> List[Dict[str, Any]]:
+ streamed_response = CustomStreamWrapper(
+ completion_stream=response.iter_lines(),
+ model=model,
+ custom_llm_provider="huggingface",
+ logging_obj=logging_obj,
+ )
+ content = ""
+ for chunk in streamed_response:
+ content += chunk["choices"][0]["delta"]["content"]
+ completion_response: List[Dict[str, Any]] = [{"generated_text": content}]
+ ## LOGGING
+ logging_obj.post_call(
+ input=data,
+ api_key=api_key,
+ original_response=completion_response,
+ additional_args={"complete_input_dict": data},
+ )
+ return completion_response
+
+ def convert_to_model_response_object( # noqa: PLR0915
+ self,
+ completion_response: Union[List[Dict[str, Any]], Dict[str, Any]],
+ model_response: ModelResponse,
+ task: Optional[hf_tasks],
+ optional_params: dict,
+ encoding: Any,
+ messages: List[AllMessageValues],
+ model: str,
+ ):
+ if task is None:
+ task = "text-generation-inference" # default to tgi
+
+ if task == "conversational":
+ if len(completion_response["generated_text"]) > 0: # type: ignore
+ model_response.choices[0].message.content = completion_response[ # type: ignore
+ "generated_text"
+ ]
+ elif task == "text-generation-inference":
+ if (
+ not isinstance(completion_response, list)
+ or not isinstance(completion_response[0], dict)
+ or "generated_text" not in completion_response[0]
+ ):
+ raise HuggingFaceError(
+ status_code=422,
+ message=f"response is not in expected format - {completion_response}",
+ headers=None,
+ )
+
+ if len(completion_response[0]["generated_text"]) > 0:
+ model_response.choices[0].message.content = output_parser( # type: ignore
+ completion_response[0]["generated_text"]
+ )
+ ## GETTING LOGPROBS + FINISH REASON
+ if (
+ "details" in completion_response[0]
+ and "tokens" in completion_response[0]["details"]
+ ):
+ model_response.choices[0].finish_reason = completion_response[0][
+ "details"
+ ]["finish_reason"]
+ sum_logprob = 0
+ for token in completion_response[0]["details"]["tokens"]:
+ if token["logprob"] is not None:
+ sum_logprob += token["logprob"]
+ setattr(model_response.choices[0].message, "_logprob", sum_logprob) # type: ignore
+ if "best_of" in optional_params and optional_params["best_of"] > 1:
+ if (
+ "details" in completion_response[0]
+ and "best_of_sequences" in completion_response[0]["details"]
+ ):
+ choices_list = []
+ for idx, item in enumerate(
+ completion_response[0]["details"]["best_of_sequences"]
+ ):
+ sum_logprob = 0
+ for token in item["tokens"]:
+ if token["logprob"] is not None:
+ sum_logprob += token["logprob"]
+ if len(item["generated_text"]) > 0:
+ message_obj = Message(
+ content=output_parser(item["generated_text"]),
+ logprobs=sum_logprob,
+ )
+ else:
+ message_obj = Message(content=None)
+ choice_obj = Choices(
+ finish_reason=item["finish_reason"],
+ index=idx + 1,
+ message=message_obj,
+ )
+ choices_list.append(choice_obj)
+ model_response.choices.extend(choices_list)
+ elif task == "text-classification":
+ model_response.choices[0].message.content = json.dumps( # type: ignore
+ completion_response
+ )
+ else:
+ if (
+ isinstance(completion_response, list)
+ and len(completion_response[0]["generated_text"]) > 0
+ ):
+ model_response.choices[0].message.content = output_parser( # type: ignore
+ completion_response[0]["generated_text"]
+ )
+ ## CALCULATING USAGE
+ prompt_tokens = 0
+ try:
+ prompt_tokens = token_counter(model=model, messages=messages)
+ except Exception:
+ # this should remain non blocking we should not block a response returning if calculating usage fails
+ pass
+ output_text = model_response["choices"][0]["message"].get("content", "")
+ if output_text is not None and len(output_text) > 0:
+ completion_tokens = 0
+ try:
+ completion_tokens = len(
+ encoding.encode(
+ model_response["choices"][0]["message"].get("content", "")
+ )
+ ) ##[TODO] use the llama2 tokenizer here
+ except Exception:
+ # this should remain non blocking we should not block a response returning if calculating usage fails
+ pass
+ else:
+ completion_tokens = 0
+
+ model_response.created = int(time.time())
+ model_response.model = model
+ usage = Usage(
+ prompt_tokens=prompt_tokens,
+ completion_tokens=completion_tokens,
+ total_tokens=prompt_tokens + completion_tokens,
+ )
+ setattr(model_response, "usage", usage)
+ model_response._hidden_params["original_response"] = completion_response
+ return model_response
+
+ def transform_response(
+ self,
+ model: str,
+ raw_response: httpx.Response,
+ model_response: ModelResponse,
+ logging_obj: LoggingClass,
+ request_data: Dict,
+ messages: List[AllMessageValues],
+ optional_params: Dict,
+ litellm_params: Dict,
+ encoding: Any,
+ api_key: Optional[str] = None,
+ json_mode: Optional[bool] = None,
+ ) -> ModelResponse:
+ ## Some servers might return streaming responses even though stream was not set to true. (e.g. Baseten)
+ task = litellm_params.get("task", None)
+ is_streamed = False
+ if (
+ raw_response.__dict__["headers"].get("Content-Type", "")
+ == "text/event-stream"
+ ):
+ is_streamed = True
+
+ # iterate over the complete streamed response, and return the final answer
+ if is_streamed:
+ completion_response = self._convert_streamed_response_to_complete_response(
+ response=raw_response,
+ logging_obj=logging_obj,
+ model=model,
+ data=request_data,
+ api_key=api_key,
+ )
+ else:
+ ## LOGGING
+ logging_obj.post_call(
+ input=request_data,
+ api_key=api_key,
+ original_response=raw_response.text,
+ additional_args={"complete_input_dict": request_data},
+ )
+ ## RESPONSE OBJECT
+ try:
+ completion_response = raw_response.json()
+ if isinstance(completion_response, dict):
+ completion_response = [completion_response]
+ except Exception:
+ raise HuggingFaceError(
+ message=f"Original Response received: {raw_response.text}",
+ status_code=raw_response.status_code,
+ )
+
+ if isinstance(completion_response, dict) and "error" in completion_response:
+ raise HuggingFaceError(
+ message=completion_response["error"], # type: ignore
+ status_code=raw_response.status_code,
+ )
+ return self.convert_to_model_response_object(
+ completion_response=completion_response,
+ model_response=model_response,
+ task=task if task is not None and task in hf_task_list else None,
+ optional_params=optional_params,
+ encoding=encoding,
+ messages=messages,
+ model=model,
+ )
diff --git a/litellm/llms/infinity/rerank/common_utils.py b/litellm/llms/infinity/common_utils.py
similarity index 76%
rename from litellm/llms/infinity/rerank/common_utils.py
rename to litellm/llms/infinity/common_utils.py
index 99477d1a33..089818c829 100644
--- a/litellm/llms/infinity/rerank/common_utils.py
+++ b/litellm/llms/infinity/common_utils.py
@@ -1,10 +1,16 @@
+from typing import Union
import httpx
from litellm.llms.base_llm.chat.transformation import BaseLLMException
class InfinityError(BaseLLMException):
- def __init__(self, status_code, message):
+ def __init__(
+ self,
+ status_code: int,
+ message: str,
+ headers: Union[dict, httpx.Headers] = {}
+ ):
self.status_code = status_code
self.message = message
self.request = httpx.Request(
@@ -16,4 +22,5 @@ class InfinityError(BaseLLMException):
message=message,
request=self.request,
response=self.response,
+ headers=headers,
) # Call the base class constructor with the parameters it needs
diff --git a/litellm/llms/infinity/embedding/handler.py b/litellm/llms/infinity/embedding/handler.py
new file mode 100644
index 0000000000..cdcb99c433
--- /dev/null
+++ b/litellm/llms/infinity/embedding/handler.py
@@ -0,0 +1,5 @@
+"""
+Infinity Embedding - uses `llm_http_handler.py` to make httpx requests
+
+Request/Response transformation is handled in `transformation.py`
+"""
diff --git a/litellm/llms/infinity/embedding/transformation.py b/litellm/llms/infinity/embedding/transformation.py
new file mode 100644
index 0000000000..824dcd38da
--- /dev/null
+++ b/litellm/llms/infinity/embedding/transformation.py
@@ -0,0 +1,141 @@
+from typing import List, Optional, Union
+
+import httpx
+
+from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
+from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig
+from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import AllEmbeddingInputValues, AllMessageValues
+from litellm.types.utils import EmbeddingResponse, Usage
+
+from ..common_utils import InfinityError
+
+
+class InfinityEmbeddingConfig(BaseEmbeddingConfig):
+ """
+ Reference: https://infinity.modal.michaelfeil.eu/docs
+ """
+
+ def __init__(self) -> None:
+ pass
+
+ def get_complete_url(
+ self,
+ api_base: Optional[str],
+ api_key: Optional[str],
+ model: str,
+ optional_params: dict,
+ litellm_params: dict,
+ stream: Optional[bool] = None,
+ ) -> str:
+ if api_base is None:
+ raise ValueError("api_base is required for Infinity embeddings")
+ # Remove trailing slashes and ensure clean base URL
+ api_base = api_base.rstrip("/")
+ if not api_base.endswith("/embeddings"):
+ api_base = f"{api_base}/embeddings"
+ return api_base
+
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ if api_key is None:
+ api_key = get_secret_str("INFINITY_API_KEY")
+
+ default_headers = {
+ "Authorization": f"Bearer {api_key}",
+ "accept": "application/json",
+ "Content-Type": "application/json",
+ }
+
+ # If 'Authorization' is provided in headers, it overrides the default.
+ if "Authorization" in headers:
+ default_headers["Authorization"] = headers["Authorization"]
+
+ # Merge other headers, overriding any default ones except Authorization
+ return {**default_headers, **headers}
+
+ def get_supported_openai_params(self, model: str) -> list:
+ return [
+ "encoding_format",
+ "modality",
+ "dimensions",
+ ]
+
+ def map_openai_params(
+ self,
+ non_default_params: dict,
+ optional_params: dict,
+ model: str,
+ drop_params: bool,
+ ) -> dict:
+ """
+ Map OpenAI params to Infinity params
+
+ Reference: https://infinity.modal.michaelfeil.eu/docs
+ """
+ if "encoding_format" in non_default_params:
+ optional_params["encoding_format"] = non_default_params["encoding_format"]
+ if "modality" in non_default_params:
+ optional_params["modality"] = non_default_params["modality"]
+ if "dimensions" in non_default_params:
+ optional_params["output_dimension"] = non_default_params["dimensions"]
+ return optional_params
+
+ def transform_embedding_request(
+ self,
+ model: str,
+ input: AllEmbeddingInputValues,
+ optional_params: dict,
+ headers: dict,
+ ) -> dict:
+ return {
+ "input": input,
+ "model": model,
+ **optional_params,
+ }
+
+ def transform_embedding_response(
+ self,
+ model: str,
+ raw_response: httpx.Response,
+ model_response: EmbeddingResponse,
+ logging_obj: LiteLLMLoggingObj,
+ api_key: Optional[str] = None,
+ request_data: dict = {},
+ optional_params: dict = {},
+ litellm_params: dict = {},
+ ) -> EmbeddingResponse:
+ try:
+ raw_response_json = raw_response.json()
+ except Exception:
+ raise InfinityError(
+ message=raw_response.text, status_code=raw_response.status_code
+ )
+
+ # model_response.usage
+ model_response.model = raw_response_json.get("model")
+ model_response.data = raw_response_json.get("data")
+ model_response.object = raw_response_json.get("object")
+
+ usage = Usage(
+ prompt_tokens=raw_response_json.get("usage", {}).get("prompt_tokens", 0),
+ total_tokens=raw_response_json.get("usage", {}).get("total_tokens", 0),
+ )
+ model_response.usage = usage
+ return model_response
+
+ def get_error_class(
+ self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
+ ) -> BaseLLMException:
+ return InfinityError(
+ message=error_message, status_code=status_code, headers=headers
+ )
diff --git a/litellm/llms/infinity/rerank/transformation.py b/litellm/llms/infinity/rerank/transformation.py
index 1e7234ab17..4b75fa121b 100644
--- a/litellm/llms/infinity/rerank/transformation.py
+++ b/litellm/llms/infinity/rerank/transformation.py
@@ -22,7 +22,7 @@ from litellm.types.rerank import (
RerankTokens,
)
-from .common_utils import InfinityError
+from ..common_utils import InfinityError
class InfinityRerankConfig(CohereRerankConfig):
diff --git a/litellm/llms/litellm_proxy/chat/transformation.py b/litellm/llms/litellm_proxy/chat/transformation.py
index dadd921ab8..22013198ba 100644
--- a/litellm/llms/litellm_proxy/chat/transformation.py
+++ b/litellm/llms/litellm_proxy/chat/transformation.py
@@ -10,6 +10,27 @@ from ...openai.chat.gpt_transformation import OpenAIGPTConfig
class LiteLLMProxyChatConfig(OpenAIGPTConfig):
+ def get_supported_openai_params(self, model: str) -> List:
+ list = super().get_supported_openai_params(model)
+ list.append("thinking")
+ list.append("reasoning_effort")
+ return list
+
+ def _map_openai_params(
+ self,
+ non_default_params: dict,
+ optional_params: dict,
+ model: str,
+ drop_params: bool,
+ ) -> dict:
+ supported_openai_params = self.get_supported_openai_params(model)
+ for param, value in non_default_params.items():
+ if param == "thinking":
+ optional_params.setdefault("extra_body", {})["thinking"] = value
+ elif param in supported_openai_params:
+ optional_params[param] = value
+ return optional_params
+
def _get_openai_compatible_provider_info(
self, api_base: Optional[str], api_key: Optional[str]
) -> Tuple[Optional[str], Optional[str]]:
diff --git a/litellm/llms/maritalk.py b/litellm/llms/maritalk.py
index 5f2b8d71bc..418d13b344 100644
--- a/litellm/llms/maritalk.py
+++ b/litellm/llms/maritalk.py
@@ -17,7 +17,6 @@ class MaritalkError(BaseLLMException):
class MaritalkConfig(OpenAIGPTConfig):
-
def __init__(
self,
frequency_penalty: Optional[float] = None,
diff --git a/litellm/llms/mistral/mistral_chat_transformation.py b/litellm/llms/mistral/mistral_chat_transformation.py
index 3e7a97c92f..67d88868d3 100644
--- a/litellm/llms/mistral/mistral_chat_transformation.py
+++ b/litellm/llms/mistral/mistral_chat_transformation.py
@@ -80,6 +80,7 @@ class MistralConfig(OpenAIGPTConfig):
"temperature",
"top_p",
"max_tokens",
+ "max_completion_tokens",
"tools",
"tool_choice",
"seed",
@@ -105,6 +106,10 @@ class MistralConfig(OpenAIGPTConfig):
for param, value in non_default_params.items():
if param == "max_tokens":
optional_params["max_tokens"] = value
+ if (
+ param == "max_completion_tokens"
+ ): # max_completion_tokens should take priority
+ optional_params["max_tokens"] = value
if param == "tools":
optional_params["tools"] = value
if param == "stream" and value is True:
diff --git a/litellm/llms/nlp_cloud/chat/handler.py b/litellm/llms/nlp_cloud/chat/handler.py
index b0abdda587..b0563d8b55 100644
--- a/litellm/llms/nlp_cloud/chat/handler.py
+++ b/litellm/llms/nlp_cloud/chat/handler.py
@@ -36,6 +36,7 @@ def completion(
model=model,
messages=messages,
optional_params=optional_params,
+ litellm_params=litellm_params,
)
## Load Config
diff --git a/litellm/llms/nlp_cloud/chat/transformation.py b/litellm/llms/nlp_cloud/chat/transformation.py
index b7967249ab..8037a45832 100644
--- a/litellm/llms/nlp_cloud/chat/transformation.py
+++ b/litellm/llms/nlp_cloud/chat/transformation.py
@@ -93,6 +93,7 @@ class NLPCloudConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/ollama/completion/transformation.py b/litellm/llms/ollama/completion/transformation.py
index 4a7a3556ae..789b728337 100644
--- a/litellm/llms/ollama/completion/transformation.py
+++ b/litellm/llms/ollama/completion/transformation.py
@@ -6,6 +6,9 @@ from typing import TYPE_CHECKING, Any, AsyncIterator, Iterator, List, Optional,
from httpx._models import Headers, Response
import litellm
+from litellm.litellm_core_utils.prompt_templates.common_utils import (
+ get_str_from_messages,
+)
from litellm.litellm_core_utils.prompt_templates.factory import (
convert_to_ollama_image,
custom_prompt,
@@ -86,9 +89,9 @@ class OllamaConfig(BaseConfig):
repeat_penalty: Optional[float] = None
temperature: Optional[float] = None
seed: Optional[int] = None
- stop: Optional[list] = (
- None # stop is a list based on this - https://github.com/ollama/ollama/pull/442
- )
+ stop: Optional[
+ list
+ ] = None # stop is a list based on this - https://github.com/ollama/ollama/pull/442
tfs_z: Optional[float] = None
num_predict: Optional[int] = None
top_k: Optional[int] = None
@@ -302,6 +305,8 @@ class OllamaConfig(BaseConfig):
custom_prompt_dict = (
litellm_params.get("custom_prompt_dict") or litellm.custom_prompt_dict
)
+
+ text_completion_request = litellm_params.get("text_completion")
if model in custom_prompt_dict:
# check if the model has a registered custom prompt
model_prompt_details = custom_prompt_dict[model]
@@ -311,7 +316,9 @@ class OllamaConfig(BaseConfig):
final_prompt_value=model_prompt_details["final_prompt_value"],
messages=messages,
)
- else:
+ elif text_completion_request: # handle `/completions` requests
+ ollama_prompt = get_str_from_messages(messages=messages)
+ else: # handle `/chat/completions` requests
modified_prompt = ollama_pt(model=model, messages=messages)
if isinstance(modified_prompt, dict):
ollama_prompt, images = (
@@ -346,6 +353,7 @@ class OllamaConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
@@ -354,6 +362,7 @@ class OllamaConfig(BaseConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
diff --git a/litellm/llms/oobabooga/chat/oobabooga.py b/litellm/llms/oobabooga/chat/oobabooga.py
index 8829d2233e..5eb68a03d4 100644
--- a/litellm/llms/oobabooga/chat/oobabooga.py
+++ b/litellm/llms/oobabooga/chat/oobabooga.py
@@ -32,6 +32,7 @@ def completion(
model=model,
messages=messages,
optional_params=optional_params,
+ litellm_params=litellm_params,
)
if "https" in model:
completion_url = model
@@ -123,6 +124,7 @@ def embedding(
model=model,
messages=[],
optional_params=optional_params,
+ litellm_params={},
)
response = litellm.module_level_client.post(
embeddings_url, headers=headers, json=data
diff --git a/litellm/llms/oobabooga/chat/transformation.py b/litellm/llms/oobabooga/chat/transformation.py
index 6fd56f934e..e87b70130c 100644
--- a/litellm/llms/oobabooga/chat/transformation.py
+++ b/litellm/llms/oobabooga/chat/transformation.py
@@ -88,6 +88,7 @@ class OobaboogaConfig(OpenAIGPTConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/openai/chat/gpt_transformation.py b/litellm/llms/openai/chat/gpt_transformation.py
index 8974a2a074..e8f60357a6 100644
--- a/litellm/llms/openai/chat/gpt_transformation.py
+++ b/litellm/llms/openai/chat/gpt_transformation.py
@@ -22,6 +22,8 @@ from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMExcepti
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import (
AllMessageValues,
+ ChatCompletionFileObject,
+ ChatCompletionFileObjectFile,
ChatCompletionImageObject,
ChatCompletionImageUrlObject,
)
@@ -125,6 +127,7 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig):
"max_retries",
"extra_headers",
"parallel_tool_calls",
+ "audio",
] # works across all models
model_specific_params = []
@@ -187,6 +190,7 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig):
message_content = message.get("content")
if message_content and isinstance(message_content, list):
for content_item in message_content:
+ litellm_specific_params = {"format"}
if content_item.get("type") == "image_url":
content_item = cast(ChatCompletionImageObject, content_item)
if isinstance(content_item["image_url"], str):
@@ -194,7 +198,6 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig):
"url": content_item["image_url"],
}
elif isinstance(content_item["image_url"], dict):
- litellm_specific_params = {"format"}
new_image_url_obj = ChatCompletionImageUrlObject(
**{ # type: ignore
k: v
@@ -203,6 +206,17 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig):
}
)
content_item["image_url"] = new_image_url_obj
+ elif content_item.get("type") == "file":
+ content_item = cast(ChatCompletionFileObject, content_item)
+ file_obj = content_item["file"]
+ new_file_obj = ChatCompletionFileObjectFile(
+ **{ # type: ignore
+ k: v
+ for k, v in file_obj.items()
+ if k not in litellm_specific_params
+ }
+ )
+ content_item["file"] = new_file_obj
return messages
def transform_request(
@@ -289,6 +303,7 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -319,6 +334,7 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
@@ -373,7 +389,7 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig):
)
@staticmethod
- def get_base_model(model: str) -> str:
+ def get_base_model(model: Optional[str] = None) -> Optional[str]:
return model
def get_model_response_iterator(
@@ -390,7 +406,6 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig):
class OpenAIChatCompletionStreamingHandler(BaseModelResponseIterator):
-
def chunk_parser(self, chunk: dict) -> ModelResponseStream:
try:
return ModelResponseStream(
diff --git a/litellm/llms/openai/chat/o_series_transformation.py b/litellm/llms/openai/chat/o_series_transformation.py
index b2ffda6e7d..c9a700face 100644
--- a/litellm/llms/openai/chat/o_series_transformation.py
+++ b/litellm/llms/openai/chat/o_series_transformation.py
@@ -131,7 +131,10 @@ class OpenAIOSeriesConfig(OpenAIGPTConfig):
def is_model_o_series_model(self, model: str) -> bool:
if model in litellm.open_ai_chat_completion_models and (
- "o1" in model or "o3" in model
+ "o1" in model
+ or "o3" in model
+ or "o4"
+ in model # [TODO] make this a more generic check (e.g. using `openai-o-series` as provider like gemini)
):
return True
return False
diff --git a/litellm/llms/openai/common_utils.py b/litellm/llms/openai/common_utils.py
index a8412f867b..55da16d6cd 100644
--- a/litellm/llms/openai/common_utils.py
+++ b/litellm/llms/openai/common_utils.py
@@ -2,13 +2,17 @@
Common helpers / utils across al OpenAI endpoints
"""
+import hashlib
import json
-from typing import Any, Dict, List, Optional, Union
+from typing import Any, Dict, List, Literal, Optional, Union
import httpx
import openai
+from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
+import litellm
from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.llms.custom_httpx.http_handler import _DEFAULT_TTL_FOR_HTTPX_CLIENTS
class OpenAIError(BaseLLMException):
@@ -92,3 +96,113 @@ def drop_params_from_unprocessable_entity_error(
new_data = {k: v for k, v in data.items() if k not in invalid_params}
return new_data
+
+
+class BaseOpenAILLM:
+ """
+ Base class for OpenAI LLMs for getting their httpx clients and SSL verification settings
+ """
+
+ @staticmethod
+ def get_cached_openai_client(
+ client_initialization_params: dict, client_type: Literal["openai", "azure"]
+ ) -> Optional[Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI]]:
+ """Retrieves the OpenAI client from the in-memory cache based on the client initialization parameters"""
+ _cache_key = BaseOpenAILLM.get_openai_client_cache_key(
+ client_initialization_params=client_initialization_params,
+ client_type=client_type,
+ )
+ _cached_client = litellm.in_memory_llm_clients_cache.get_cache(_cache_key)
+ return _cached_client
+
+ @staticmethod
+ def set_cached_openai_client(
+ openai_client: Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI],
+ client_type: Literal["openai", "azure"],
+ client_initialization_params: dict,
+ ):
+ """Stores the OpenAI client in the in-memory cache for _DEFAULT_TTL_FOR_HTTPX_CLIENTS SECONDS"""
+ _cache_key = BaseOpenAILLM.get_openai_client_cache_key(
+ client_initialization_params=client_initialization_params,
+ client_type=client_type,
+ )
+ litellm.in_memory_llm_clients_cache.set_cache(
+ key=_cache_key,
+ value=openai_client,
+ ttl=_DEFAULT_TTL_FOR_HTTPX_CLIENTS,
+ )
+
+ @staticmethod
+ def get_openai_client_cache_key(
+ client_initialization_params: dict, client_type: Literal["openai", "azure"]
+ ) -> str:
+ """Creates a cache key for the OpenAI client based on the client initialization parameters"""
+ hashed_api_key = None
+ if client_initialization_params.get("api_key") is not None:
+ hash_object = hashlib.sha256(
+ client_initialization_params.get("api_key", "").encode()
+ )
+ # Hexadecimal representation of the hash
+ hashed_api_key = hash_object.hexdigest()
+
+ # Create a more readable cache key using a list of key-value pairs
+ key_parts = [
+ f"hashed_api_key={hashed_api_key}",
+ f"is_async={client_initialization_params.get('is_async')}",
+ ]
+
+ LITELLM_CLIENT_SPECIFIC_PARAMS = [
+ "timeout",
+ "max_retries",
+ "organization",
+ "api_base",
+ ]
+ openai_client_fields = (
+ BaseOpenAILLM.get_openai_client_initialization_param_fields(
+ client_type=client_type
+ )
+ + LITELLM_CLIENT_SPECIFIC_PARAMS
+ )
+
+ for param in openai_client_fields:
+ key_parts.append(f"{param}={client_initialization_params.get(param)}")
+
+ _cache_key = ",".join(key_parts)
+ return _cache_key
+
+ @staticmethod
+ def get_openai_client_initialization_param_fields(
+ client_type: Literal["openai", "azure"]
+ ) -> List[str]:
+ """Returns a list of fields that are used to initialize the OpenAI client"""
+ import inspect
+
+ from openai import AzureOpenAI, OpenAI
+
+ if client_type == "openai":
+ signature = inspect.signature(OpenAI.__init__)
+ else:
+ signature = inspect.signature(AzureOpenAI.__init__)
+
+ # Extract parameter names, excluding 'self'
+ param_names = [param for param in signature.parameters if param != "self"]
+ return param_names
+
+ @staticmethod
+ def _get_async_http_client() -> Optional[httpx.AsyncClient]:
+ if litellm.aclient_session is not None:
+ return litellm.aclient_session
+
+ return httpx.AsyncClient(
+ limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100),
+ verify=litellm.ssl_verify,
+ )
+
+ @staticmethod
+ def _get_sync_http_client() -> Optional[httpx.Client]:
+ if litellm.client_session is not None:
+ return litellm.client_session
+ return httpx.Client(
+ limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100),
+ verify=litellm.ssl_verify,
+ )
diff --git a/litellm/llms/openai/completion/handler.py b/litellm/llms/openai/completion/handler.py
index 2e60f55b57..fa31c487cd 100644
--- a/litellm/llms/openai/completion/handler.py
+++ b/litellm/llms/openai/completion/handler.py
@@ -220,7 +220,6 @@ class OpenAITextCompletion(BaseLLM):
client=None,
organization=None,
):
-
if client is None:
openai_client = OpenAI(
api_key=api_key,
diff --git a/litellm/llms/openai/completion/transformation.py b/litellm/llms/openai/completion/transformation.py
index 1aef72d3fa..43fbc1f219 100644
--- a/litellm/llms/openai/completion/transformation.py
+++ b/litellm/llms/openai/completion/transformation.py
@@ -111,9 +111,9 @@ class OpenAITextCompletionConfig(BaseTextCompletionConfig, OpenAIGPTConfig):
if "model" in response_object:
model_response_object.model = response_object["model"]
- model_response_object._hidden_params["original_response"] = (
- response_object # track original response, if users make a litellm.text_completion() request, we can return the original response
- )
+ model_response_object._hidden_params[
+ "original_response"
+ ] = response_object # track original response, if users make a litellm.text_completion() request, we can return the original response
return model_response_object
except Exception as e:
raise e
diff --git a/litellm/llms/openai/cost_calculation.py b/litellm/llms/openai/cost_calculation.py
index 0c26fd7448..304c444e37 100644
--- a/litellm/llms/openai/cost_calculation.py
+++ b/litellm/llms/openai/cost_calculation.py
@@ -6,6 +6,7 @@ Helper util for handling openai-specific cost calculation
from typing import Literal, Optional, Tuple
from litellm._logging import verbose_logger
+from litellm.litellm_core_utils.llm_cost_calc.utils import generic_cost_per_token
from litellm.types.utils import CallTypes, Usage
from litellm.utils import get_model_info
@@ -28,52 +29,53 @@ def cost_per_token(model: str, usage: Usage) -> Tuple[float, float]:
Returns:
Tuple[float, float] - prompt_cost_in_usd, completion_cost_in_usd
"""
- ## GET MODEL INFO
- model_info = get_model_info(model=model, custom_llm_provider="openai")
## CALCULATE INPUT COST
- ### Non-cached text tokens
- non_cached_text_tokens = usage.prompt_tokens
- cached_tokens: Optional[int] = None
- if usage.prompt_tokens_details and usage.prompt_tokens_details.cached_tokens:
- cached_tokens = usage.prompt_tokens_details.cached_tokens
- non_cached_text_tokens = non_cached_text_tokens - cached_tokens
- prompt_cost: float = non_cached_text_tokens * model_info["input_cost_per_token"]
- ## Prompt Caching cost calculation
- if model_info.get("cache_read_input_token_cost") is not None and cached_tokens:
- # Note: We read ._cache_read_input_tokens from the Usage - since cost_calculator.py standardizes the cache read tokens on usage._cache_read_input_tokens
- prompt_cost += cached_tokens * (
- model_info.get("cache_read_input_token_cost", 0) or 0
- )
+ return generic_cost_per_token(
+ model=model, usage=usage, custom_llm_provider="openai"
+ )
+ # ### Non-cached text tokens
+ # non_cached_text_tokens = usage.prompt_tokens
+ # cached_tokens: Optional[int] = None
+ # if usage.prompt_tokens_details and usage.prompt_tokens_details.cached_tokens:
+ # cached_tokens = usage.prompt_tokens_details.cached_tokens
+ # non_cached_text_tokens = non_cached_text_tokens - cached_tokens
+ # prompt_cost: float = non_cached_text_tokens * model_info["input_cost_per_token"]
+ # ## Prompt Caching cost calculation
+ # if model_info.get("cache_read_input_token_cost") is not None and cached_tokens:
+ # # Note: We read ._cache_read_input_tokens from the Usage - since cost_calculator.py standardizes the cache read tokens on usage._cache_read_input_tokens
+ # prompt_cost += cached_tokens * (
+ # model_info.get("cache_read_input_token_cost", 0) or 0
+ # )
- _audio_tokens: Optional[int] = (
- usage.prompt_tokens_details.audio_tokens
- if usage.prompt_tokens_details is not None
- else None
- )
- _audio_cost_per_token: Optional[float] = model_info.get(
- "input_cost_per_audio_token"
- )
- if _audio_tokens is not None and _audio_cost_per_token is not None:
- audio_cost: float = _audio_tokens * _audio_cost_per_token
- prompt_cost += audio_cost
+ # _audio_tokens: Optional[int] = (
+ # usage.prompt_tokens_details.audio_tokens
+ # if usage.prompt_tokens_details is not None
+ # else None
+ # )
+ # _audio_cost_per_token: Optional[float] = model_info.get(
+ # "input_cost_per_audio_token"
+ # )
+ # if _audio_tokens is not None and _audio_cost_per_token is not None:
+ # audio_cost: float = _audio_tokens * _audio_cost_per_token
+ # prompt_cost += audio_cost
- ## CALCULATE OUTPUT COST
- completion_cost: float = (
- usage["completion_tokens"] * model_info["output_cost_per_token"]
- )
- _output_cost_per_audio_token: Optional[float] = model_info.get(
- "output_cost_per_audio_token"
- )
- _output_audio_tokens: Optional[int] = (
- usage.completion_tokens_details.audio_tokens
- if usage.completion_tokens_details is not None
- else None
- )
- if _output_cost_per_audio_token is not None and _output_audio_tokens is not None:
- audio_cost = _output_audio_tokens * _output_cost_per_audio_token
- completion_cost += audio_cost
+ # ## CALCULATE OUTPUT COST
+ # completion_cost: float = (
+ # usage["completion_tokens"] * model_info["output_cost_per_token"]
+ # )
+ # _output_cost_per_audio_token: Optional[float] = model_info.get(
+ # "output_cost_per_audio_token"
+ # )
+ # _output_audio_tokens: Optional[int] = (
+ # usage.completion_tokens_details.audio_tokens
+ # if usage.completion_tokens_details is not None
+ # else None
+ # )
+ # if _output_cost_per_audio_token is not None and _output_audio_tokens is not None:
+ # audio_cost = _output_audio_tokens * _output_cost_per_audio_token
+ # completion_cost += audio_cost
- return prompt_cost, completion_cost
+ # return prompt_cost, completion_cost
def cost_per_second(
diff --git a/litellm/llms/openai/fine_tuning/handler.py b/litellm/llms/openai/fine_tuning/handler.py
index 97b237c757..2b697f85d2 100644
--- a/litellm/llms/openai/fine_tuning/handler.py
+++ b/litellm/llms/openai/fine_tuning/handler.py
@@ -28,14 +28,7 @@ class OpenAIFineTuningAPI:
_is_async: bool = False,
api_version: Optional[str] = None,
litellm_params: Optional[dict] = None,
- ) -> Optional[
- Union[
- OpenAI,
- AsyncOpenAI,
- AzureOpenAI,
- AsyncAzureOpenAI,
- ]
- ]:
+ ) -> Optional[Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI,]]:
received_args = locals()
openai_client: Optional[
Union[OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI]
diff --git a/litellm/llms/openai/openai.py b/litellm/llms/openai/openai.py
index 880a043d08..13412ef96a 100644
--- a/litellm/llms/openai/openai.py
+++ b/litellm/llms/openai/openai.py
@@ -1,4 +1,3 @@
-import hashlib
import time
import types
from typing import (
@@ -33,7 +32,6 @@ from litellm.litellm_core_utils.logging_utils import track_llm_api_timing
from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator
from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMException
from litellm.llms.bedrock.chat.invoke_handler import MockResponseIterator
-from litellm.llms.custom_httpx.http_handler import _DEFAULT_TTL_FOR_HTTPX_CLIENTS
from litellm.types.utils import (
EmbeddingResponse,
ImageResponse,
@@ -50,7 +48,11 @@ from litellm.utils import (
from ...types.llms.openai import *
from ..base import BaseLLM
from .chat.o_series_transformation import OpenAIOSeriesConfig
-from .common_utils import OpenAIError, drop_params_from_unprocessable_entity_error
+from .common_utils import (
+ BaseOpenAILLM,
+ OpenAIError,
+ drop_params_from_unprocessable_entity_error,
+)
openaiOSeriesConfig = OpenAIOSeriesConfig()
@@ -264,7 +266,6 @@ class OpenAIConfig(BaseConfig):
api_key: Optional[str] = None,
json_mode: Optional[bool] = None,
) -> ModelResponse:
-
logging_obj.post_call(original_response=raw_response.text)
logging_obj.model_call_details["response_headers"] = raw_response.headers
final_response_obj = cast(
@@ -285,6 +286,7 @@ class OpenAIConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
@@ -317,8 +319,7 @@ class OpenAIChatCompletionResponseIterator(BaseModelResponseIterator):
raise e
-class OpenAIChatCompletion(BaseLLM):
-
+class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
def __init__(self) -> None:
super().__init__()
@@ -343,7 +344,8 @@ class OpenAIChatCompletion(BaseLLM):
max_retries: Optional[int] = DEFAULT_MAX_RETRIES,
organization: Optional[str] = None,
client: Optional[Union[OpenAI, AsyncOpenAI]] = None,
- ):
+ ) -> Optional[Union[OpenAI, AsyncOpenAI]]:
+ client_initialization_params: Dict = locals()
if client is None:
if not isinstance(max_retries, int):
raise OpenAIError(
@@ -352,25 +354,21 @@ class OpenAIChatCompletion(BaseLLM):
max_retries
),
)
- # Creating a new OpenAI Client
- # check in memory cache before creating a new one
- # Convert the API key to bytes
- hashed_api_key = None
- if api_key is not None:
- hash_object = hashlib.sha256(api_key.encode())
- # Hexadecimal representation of the hash
- hashed_api_key = hash_object.hexdigest()
+ cached_client = self.get_cached_openai_client(
+ client_initialization_params=client_initialization_params,
+ client_type="openai",
+ )
- _cache_key = f"hashed_api_key={hashed_api_key},api_base={api_base},timeout={timeout},max_retries={max_retries},organization={organization},is_async={is_async}"
-
- _cached_client = litellm.in_memory_llm_clients_cache.get_cache(_cache_key)
- if _cached_client:
- return _cached_client
+ if cached_client:
+ if isinstance(cached_client, OpenAI) or isinstance(
+ cached_client, AsyncOpenAI
+ ):
+ return cached_client
if is_async:
_new_client: Union[OpenAI, AsyncOpenAI] = AsyncOpenAI(
api_key=api_key,
base_url=api_base,
- http_client=litellm.aclient_session,
+ http_client=OpenAIChatCompletion._get_async_http_client(),
timeout=timeout,
max_retries=max_retries,
organization=organization,
@@ -379,17 +377,17 @@ class OpenAIChatCompletion(BaseLLM):
_new_client = OpenAI(
api_key=api_key,
base_url=api_base,
- http_client=litellm.client_session,
+ http_client=OpenAIChatCompletion._get_sync_http_client(),
timeout=timeout,
max_retries=max_retries,
organization=organization,
)
## SAVE CACHE KEY
- litellm.in_memory_llm_clients_cache.set_cache(
- key=_cache_key,
- value=_new_client,
- ttl=_DEFAULT_TTL_FOR_HTTPX_CLIENTS,
+ self.set_cached_openai_client(
+ openai_client=_new_client,
+ client_initialization_params=client_initialization_params,
+ client_type="openai",
)
return _new_client
@@ -514,7 +512,6 @@ class OpenAIChatCompletion(BaseLLM):
custom_llm_provider: Optional[str] = None,
drop_params: Optional[bool] = None,
):
-
super().completion()
try:
fake_stream: bool = False
@@ -554,7 +551,6 @@ class OpenAIChatCompletion(BaseLLM):
for _ in range(
2
): # if call fails due to alternating messages, retry with reformatted message
-
if provider_config is not None:
data = provider_config.transform_request(
model=model,
@@ -650,13 +646,14 @@ class OpenAIChatCompletion(BaseLLM):
},
)
- headers, response = (
- self.make_sync_openai_chat_completion_request(
- openai_client=openai_client,
- data=data,
- timeout=timeout,
- logging_obj=logging_obj,
- )
+ (
+ headers,
+ response,
+ ) = self.make_sync_openai_chat_completion_request(
+ openai_client=openai_client,
+ data=data,
+ timeout=timeout,
+ logging_obj=logging_obj,
)
logging_obj.model_call_details["response_headers"] = headers
@@ -764,7 +761,6 @@ class OpenAIChatCompletion(BaseLLM):
for _ in range(
2
): # if call fails due to alternating messages, retry with reformatted message
-
try:
openai_aclient: AsyncOpenAI = self._get_openai_client( # type: ignore
is_async=True,
@@ -974,7 +970,6 @@ class OpenAIChatCompletion(BaseLLM):
except (
Exception
) as e: # need to exception handle here. async exceptions don't get caught in sync functions.
-
if isinstance(e, OpenAIError):
raise e
@@ -1247,7 +1242,6 @@ class OpenAIChatCompletion(BaseLLM):
):
response = None
try:
-
openai_aclient = self._get_openai_client(
is_async=True,
api_key=api_key,
@@ -1334,7 +1328,6 @@ class OpenAIChatCompletion(BaseLLM):
)
return convert_to_model_response_object(response_object=response, model_response_object=model_response, response_type="image_generation") # type: ignore
except OpenAIError as e:
-
## LOGGING
logging_obj.post_call(
input=prompt,
@@ -1373,7 +1366,6 @@ class OpenAIChatCompletion(BaseLLM):
aspeech: Optional[bool] = None,
client=None,
) -> HttpxBinaryResponseContent:
-
if aspeech is not None and aspeech is True:
return self.async_audio_speech(
model=model,
@@ -1420,7 +1412,6 @@ class OpenAIChatCompletion(BaseLLM):
timeout: Union[float, httpx.Timeout],
client=None,
) -> HttpxBinaryResponseContent:
-
openai_client = cast(
AsyncOpenAI,
self._get_openai_client(
@@ -1491,9 +1482,9 @@ class OpenAIFilesAPI(BaseLLM):
self,
create_file_data: CreateFileRequest,
openai_client: AsyncOpenAI,
- ) -> FileObject:
+ ) -> OpenAIFileObject:
response = await openai_client.files.create(**create_file_data)
- return response
+ return OpenAIFileObject(**response.model_dump())
def create_file(
self,
@@ -1505,7 +1496,7 @@ class OpenAIFilesAPI(BaseLLM):
max_retries: Optional[int],
organization: Optional[str],
client: Optional[Union[OpenAI, AsyncOpenAI]] = None,
- ) -> Union[FileObject, Coroutine[Any, Any, FileObject]]:
+ ) -> Union[OpenAIFileObject, Coroutine[Any, Any, OpenAIFileObject]]:
openai_client: Optional[Union[OpenAI, AsyncOpenAI]] = self.get_openai_client(
api_key=api_key,
api_base=api_base,
@@ -1528,8 +1519,8 @@ class OpenAIFilesAPI(BaseLLM):
return self.acreate_file( # type: ignore
create_file_data=create_file_data, openai_client=openai_client
)
- response = openai_client.files.create(**create_file_data)
- return response
+ response = cast(OpenAI, openai_client).files.create(**create_file_data)
+ return OpenAIFileObject(**response.model_dump())
async def afile_content(
self,
diff --git a/litellm/llms/openai/responses/transformation.py b/litellm/llms/openai/responses/transformation.py
index ce4052dc19..d4a443aedb 100644
--- a/litellm/llms/openai/responses/transformation.py
+++ b/litellm/llms/openai/responses/transformation.py
@@ -7,6 +7,7 @@ from litellm._logging import verbose_logger
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import *
+from litellm.types.responses.main import *
from litellm.types.router import GenericLiteLLMParams
from ..common_utils import OpenAIError
@@ -65,10 +66,12 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
response_api_optional_request_params: Dict,
litellm_params: GenericLiteLLMParams,
headers: dict,
- ) -> ResponsesAPIRequestParams:
+ ) -> Dict:
"""No transform applied since inputs are in OpenAI spec already"""
- return ResponsesAPIRequestParams(
- model=model, input=input, **response_api_optional_request_params
+ return dict(
+ ResponsesAPIRequestParams(
+ model=model, input=input, **response_api_optional_request_params
+ )
)
def transform_response_api_response(
@@ -108,8 +111,7 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
def get_complete_url(
self,
api_base: Optional[str],
- model: str,
- stream: Optional[bool] = None,
+ litellm_params: dict,
) -> str:
"""
Get the endpoint for OpenAI responses API
@@ -188,3 +190,63 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
raise ValueError(f"Unknown event type: {event_type}")
return model_class
+
+ def should_fake_stream(
+ self,
+ model: Optional[str],
+ stream: Optional[bool],
+ custom_llm_provider: Optional[str] = None,
+ ) -> bool:
+ if stream is not True:
+ return False
+ if model is not None:
+ try:
+ if (
+ litellm.utils.supports_native_streaming(
+ model=model,
+ custom_llm_provider=custom_llm_provider,
+ )
+ is False
+ ):
+ return True
+ except Exception as e:
+ verbose_logger.debug(
+ f"Error getting model info in OpenAIResponsesAPIConfig: {e}"
+ )
+ return False
+
+ #########################################################
+ ########## DELETE RESPONSE API TRANSFORMATION ##############
+ #########################################################
+ def transform_delete_response_api_request(
+ self,
+ response_id: str,
+ api_base: str,
+ litellm_params: GenericLiteLLMParams,
+ headers: dict,
+ ) -> Tuple[str, Dict]:
+ """
+ Transform the delete response API request into a URL and data
+
+ OpenAI API expects the following request
+ - DELETE /v1/responses/{response_id}
+ """
+ url = f"{api_base}/{response_id}"
+ data: Dict = {}
+ return url, data
+
+ def transform_delete_response_api_response(
+ self,
+ raw_response: httpx.Response,
+ logging_obj: LiteLLMLoggingObj,
+ ) -> DeleteResponseResult:
+ """
+ Transform the delete response API response into a DeleteResponseResult
+ """
+ try:
+ raw_response_json = raw_response.json()
+ except Exception:
+ raise OpenAIError(
+ message=raw_response.text, status_code=raw_response.status_code
+ )
+ return DeleteResponseResult(**raw_response_json)
diff --git a/litellm/llms/openai/transcriptions/gpt_transformation.py b/litellm/llms/openai/transcriptions/gpt_transformation.py
new file mode 100644
index 0000000000..796e10f515
--- /dev/null
+++ b/litellm/llms/openai/transcriptions/gpt_transformation.py
@@ -0,0 +1,34 @@
+from typing import List
+
+from litellm.types.llms.openai import OpenAIAudioTranscriptionOptionalParams
+from litellm.types.utils import FileTypes
+
+from .whisper_transformation import OpenAIWhisperAudioTranscriptionConfig
+
+
+class OpenAIGPTAudioTranscriptionConfig(OpenAIWhisperAudioTranscriptionConfig):
+ def get_supported_openai_params(
+ self, model: str
+ ) -> List[OpenAIAudioTranscriptionOptionalParams]:
+ """
+ Get the supported OpenAI params for the `gpt-4o-transcribe` models
+ """
+ return [
+ "language",
+ "prompt",
+ "response_format",
+ "temperature",
+ "include",
+ ]
+
+ def transform_audio_transcription_request(
+ self,
+ model: str,
+ audio_file: FileTypes,
+ optional_params: dict,
+ litellm_params: dict,
+ ) -> dict:
+ """
+ Transform the audio transcription request
+ """
+ return {"model": model, "file": audio_file, **optional_params}
diff --git a/litellm/llms/openai/transcriptions/handler.py b/litellm/llms/openai/transcriptions/handler.py
index d9dd3c123b..78a913cbf3 100644
--- a/litellm/llms/openai/transcriptions/handler.py
+++ b/litellm/llms/openai/transcriptions/handler.py
@@ -7,6 +7,9 @@ from pydantic import BaseModel
import litellm
from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_name
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
+from litellm.llms.base_llm.audio_transcription.transformation import (
+ BaseAudioTranscriptionConfig,
+)
from litellm.types.utils import FileTypes
from litellm.utils import (
TranscriptionResponse,
@@ -75,6 +78,7 @@ class OpenAIAudioTranscription(OpenAIChatCompletion):
model: str,
audio_file: FileTypes,
optional_params: dict,
+ litellm_params: dict,
model_response: TranscriptionResponse,
timeout: float,
max_retries: int,
@@ -83,16 +87,24 @@ class OpenAIAudioTranscription(OpenAIChatCompletion):
api_base: Optional[str],
client=None,
atranscription: bool = False,
+ provider_config: Optional[BaseAudioTranscriptionConfig] = None,
) -> TranscriptionResponse:
- data = {"model": model, "file": audio_file, **optional_params}
-
- if "response_format" not in data or (
- data["response_format"] == "text" or data["response_format"] == "json"
- ):
- data["response_format"] = (
- "verbose_json" # ensures 'duration' is received - used for cost calculation
+ """
+ Handle audio transcription request
+ """
+ if provider_config is not None:
+ data = provider_config.transform_audio_transcription_request(
+ model=model,
+ audio_file=audio_file,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
)
+ if isinstance(data, bytes):
+ raise ValueError("OpenAI transformation route requires a dict")
+ else:
+ data = {"model": model, "file": audio_file, **optional_params}
+
if atranscription is True:
return self.async_audio_transcriptions( # type: ignore
audio_file=audio_file,
diff --git a/litellm/llms/openai/transcriptions/whisper_transformation.py b/litellm/llms/openai/transcriptions/whisper_transformation.py
new file mode 100644
index 0000000000..c0ccc71579
--- /dev/null
+++ b/litellm/llms/openai/transcriptions/whisper_transformation.py
@@ -0,0 +1,98 @@
+from typing import List, Optional, Union
+
+from httpx import Headers
+
+from litellm.llms.base_llm.audio_transcription.transformation import (
+ BaseAudioTranscriptionConfig,
+)
+from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import (
+ AllMessageValues,
+ OpenAIAudioTranscriptionOptionalParams,
+)
+from litellm.types.utils import FileTypes
+
+from ..common_utils import OpenAIError
+
+
+class OpenAIWhisperAudioTranscriptionConfig(BaseAudioTranscriptionConfig):
+ def get_supported_openai_params(
+ self, model: str
+ ) -> List[OpenAIAudioTranscriptionOptionalParams]:
+ """
+ Get the supported OpenAI params for the `whisper-1` models
+ """
+ return [
+ "language",
+ "prompt",
+ "response_format",
+ "temperature",
+ "timestamp_granularities",
+ ]
+
+ def map_openai_params(
+ self,
+ non_default_params: dict,
+ optional_params: dict,
+ model: str,
+ drop_params: bool,
+ ) -> dict:
+ """
+ Map the OpenAI params to the Whisper params
+ """
+ supported_params = self.get_supported_openai_params(model)
+ for k, v in non_default_params.items():
+ if k in supported_params:
+ optional_params[k] = v
+ return optional_params
+
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ api_key = api_key or get_secret_str("OPENAI_API_KEY")
+
+ auth_header = {
+ "Authorization": f"Bearer {api_key}",
+ }
+
+ headers.update(auth_header)
+ return headers
+
+ def transform_audio_transcription_request(
+ self,
+ model: str,
+ audio_file: FileTypes,
+ optional_params: dict,
+ litellm_params: dict,
+ ) -> dict:
+ """
+ Transform the audio transcription request
+ """
+
+ data = {"model": model, "file": audio_file, **optional_params}
+
+ if "response_format" not in data or (
+ data["response_format"] == "text" or data["response_format"] == "json"
+ ):
+ data[
+ "response_format"
+ ] = "verbose_json" # ensures 'duration' is received - used for cost calculation
+
+ return data
+
+ def get_error_class(
+ self, error_message: str, status_code: int, headers: Union[dict, Headers]
+ ) -> BaseLLMException:
+ return OpenAIError(
+ status_code=status_code,
+ message=error_message,
+ headers=headers,
+ )
diff --git a/litellm/llms/openai_like/chat/transformation.py b/litellm/llms/openai_like/chat/transformation.py
index 37cfabdab5..068d3d8dfd 100644
--- a/litellm/llms/openai_like/chat/transformation.py
+++ b/litellm/llms/openai_like/chat/transformation.py
@@ -7,7 +7,7 @@ from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
import httpx
from litellm.secret_managers.main import get_secret_str
-from litellm.types.llms.openai import ChatCompletionAssistantMessage
+from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage
from litellm.types.utils import ModelResponse
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
@@ -25,7 +25,6 @@ class OpenAILikeChatConfig(OpenAIGPTConfig):
self,
api_base: Optional[str],
api_key: Optional[str],
- model: Optional[str] = None,
) -> Tuple[Optional[str], Optional[str]]:
api_base = api_base or get_secret_str("OPENAI_LIKE_API_BASE") # type: ignore
dynamic_api_key = (
@@ -34,7 +33,7 @@ class OpenAILikeChatConfig(OpenAIGPTConfig):
return api_base, dynamic_api_key
@staticmethod
- def _convert_tool_response_to_message(
+ def _json_mode_convert_tool_response_to_message(
message: ChatCompletionAssistantMessage, json_mode: bool
) -> ChatCompletionAssistantMessage:
"""
@@ -74,8 +73,8 @@ class OpenAILikeChatConfig(OpenAIGPTConfig):
messages: List,
print_verbose,
encoding,
- json_mode: bool,
- custom_llm_provider: str,
+ json_mode: Optional[bool],
+ custom_llm_provider: Optional[str],
base_model: Optional[str],
) -> ModelResponse:
response_json = response.json()
@@ -88,21 +87,55 @@ class OpenAILikeChatConfig(OpenAIGPTConfig):
if json_mode:
for choice in response_json["choices"]:
- message = OpenAILikeChatConfig._convert_tool_response_to_message(
- choice.get("message"), json_mode
+ message = (
+ OpenAILikeChatConfig._json_mode_convert_tool_response_to_message(
+ choice.get("message"), json_mode
+ )
)
choice["message"] = message
returned_response = ModelResponse(**response_json)
- returned_response.model = (
- custom_llm_provider + "/" + (returned_response.model or "")
- )
+ if custom_llm_provider is not None:
+ returned_response.model = (
+ custom_llm_provider + "/" + (returned_response.model or "")
+ )
if base_model is not None:
returned_response._hidden_params["model"] = base_model
return returned_response
+ def transform_response(
+ self,
+ model: str,
+ raw_response: httpx.Response,
+ model_response: ModelResponse,
+ logging_obj: LiteLLMLoggingObj,
+ request_data: dict,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ encoding: Any,
+ api_key: Optional[str] = None,
+ json_mode: Optional[bool] = None,
+ ) -> ModelResponse:
+ return OpenAILikeChatConfig._transform_response(
+ model=model,
+ response=raw_response,
+ model_response=model_response,
+ stream=optional_params.get("stream", False),
+ logging_obj=logging_obj,
+ optional_params=optional_params,
+ api_key=api_key,
+ data=request_data,
+ messages=messages,
+ print_verbose=None,
+ encoding=None,
+ json_mode=json_mode,
+ custom_llm_provider=None,
+ base_model=None,
+ )
+
def map_openai_params(
self,
non_default_params: dict,
diff --git a/litellm/llms/openrouter/chat/transformation.py b/litellm/llms/openrouter/chat/transformation.py
index 4b95ec87cf..77f402a131 100644
--- a/litellm/llms/openrouter/chat/transformation.py
+++ b/litellm/llms/openrouter/chat/transformation.py
@@ -1,17 +1,19 @@
"""
-Support for OpenAI's `/v1/chat/completions` endpoint.
+Support for OpenAI's `/v1/chat/completions` endpoint.
Calls done in OpenAI/openai.py as OpenRouter is openai-compatible.
Docs: https://openrouter.ai/docs/parameters
"""
-from typing import Any, AsyncIterator, Iterator, Optional, Union
+from typing import Any, AsyncIterator, Iterator, List, Optional, Union
import httpx
from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator
from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.types.llms.openai import AllMessageValues
+from litellm.types.llms.openrouter import OpenRouterErrorMessage
from litellm.types.utils import ModelResponse, ModelResponseStream
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
@@ -19,7 +21,6 @@ from ..common_utils import OpenRouterException
class OpenrouterConfig(OpenAIGPTConfig):
-
def map_openai_params(
self,
non_default_params: dict,
@@ -42,11 +43,32 @@ class OpenrouterConfig(OpenAIGPTConfig):
extra_body["models"] = models
if route is not None:
extra_body["route"] = route
- mapped_openai_params["extra_body"] = (
- extra_body # openai client supports `extra_body` param
- )
+ mapped_openai_params[
+ "extra_body"
+ ] = extra_body # openai client supports `extra_body` param
return mapped_openai_params
+ def transform_request(
+ self,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ headers: dict,
+ ) -> dict:
+ """
+ Transform the overall request to be sent to the API.
+
+ Returns:
+ dict: The transformed request. Sent as the body of the API call.
+ """
+ extra_body = optional_params.pop("extra_body", {})
+ response = super().transform_request(
+ model, messages, optional_params, litellm_params, headers
+ )
+ response.update(extra_body)
+ return response
+
def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
) -> BaseLLMException:
@@ -70,9 +92,26 @@ class OpenrouterConfig(OpenAIGPTConfig):
class OpenRouterChatCompletionStreamingHandler(BaseModelResponseIterator):
-
def chunk_parser(self, chunk: dict) -> ModelResponseStream:
try:
+ ## HANDLE ERROR IN CHUNK ##
+ if "error" in chunk:
+ error_chunk = chunk["error"]
+ error_message = OpenRouterErrorMessage(
+ message="Message: {}, Metadata: {}, User ID: {}".format(
+ error_chunk["message"],
+ error_chunk.get("metadata", {}),
+ error_chunk.get("user_id", ""),
+ ),
+ code=error_chunk["code"],
+ metadata=error_chunk.get("metadata", {}),
+ )
+ raise OpenRouterException(
+ message=error_message["message"],
+ status_code=error_message["code"],
+ headers=error_message["metadata"].get("headers", {}),
+ )
+
new_choices = []
for choice in chunk["choices"]:
choice["delta"]["reasoning_content"] = choice["delta"].get("reasoning")
@@ -84,5 +123,11 @@ class OpenRouterChatCompletionStreamingHandler(BaseModelResponseIterator):
model=chunk["model"],
choices=new_choices,
)
+ except KeyError as e:
+ raise OpenRouterException(
+ message=f"KeyError: {e}, Got unexpected response from OpenRouter: {chunk}",
+ status_code=400,
+ headers={"Content-Type": "application/json"},
+ )
except Exception as e:
raise e
diff --git a/litellm/llms/petals/completion/transformation.py b/litellm/llms/petals/completion/transformation.py
index 08ec15de33..24910cba8f 100644
--- a/litellm/llms/petals/completion/transformation.py
+++ b/litellm/llms/petals/completion/transformation.py
@@ -37,9 +37,9 @@ class PetalsConfig(BaseConfig):
"""
max_length: Optional[int] = None
- max_new_tokens: Optional[int] = (
- litellm.max_tokens
- ) # petals requires max tokens to be set
+ max_new_tokens: Optional[
+ int
+ ] = litellm.max_tokens # petals requires max tokens to be set
do_sample: Optional[bool] = None
temperature: Optional[float] = None
top_k: Optional[int] = None
@@ -131,6 +131,7 @@ class PetalsConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/predibase/chat/handler.py b/litellm/llms/predibase/chat/handler.py
index 43f4b06745..79936764ac 100644
--- a/litellm/llms/predibase/chat/handler.py
+++ b/litellm/llms/predibase/chat/handler.py
@@ -228,10 +228,10 @@ class PredibaseChatCompletion:
api_key: str,
logging_obj,
optional_params: dict,
+ litellm_params: dict,
tenant_id: str,
timeout: Union[float, httpx.Timeout],
acompletion=None,
- litellm_params=None,
logger_fn=None,
headers: dict = {},
) -> Union[ModelResponse, CustomStreamWrapper]:
@@ -241,6 +241,7 @@ class PredibaseChatCompletion:
messages=messages,
optional_params=optional_params,
model=model,
+ litellm_params=litellm_params,
)
completion_url = ""
input_text = ""
@@ -394,7 +395,6 @@ class PredibaseChatCompletion:
logger_fn=None,
headers={},
) -> ModelResponse:
-
async_handler = get_async_httpx_client(
llm_provider=litellm.LlmProviders.PREDIBASE,
params={"timeout": timeout},
diff --git a/litellm/llms/predibase/chat/transformation.py b/litellm/llms/predibase/chat/transformation.py
index f574238696..9fbb9d6c9e 100644
--- a/litellm/llms/predibase/chat/transformation.py
+++ b/litellm/llms/predibase/chat/transformation.py
@@ -2,6 +2,7 @@ from typing import TYPE_CHECKING, Any, List, Literal, Optional, Union
from httpx import Headers, Response
+from litellm.constants import DEFAULT_MAX_TOKENS
from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMException
from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import ModelResponse
@@ -27,12 +28,12 @@ class PredibaseConfig(BaseConfig):
decoder_input_details: Optional[bool] = None
details: bool = True # enables returning logprobs + best of
max_new_tokens: int = (
- 256 # openai default - requests hang if max_new_tokens not given
+ DEFAULT_MAX_TOKENS # openai default - requests hang if max_new_tokens not given
)
repetition_penalty: Optional[float] = None
- return_full_text: Optional[bool] = (
- False # by default don't return the input as part of the output
- )
+ return_full_text: Optional[
+ bool
+ ] = False # by default don't return the input as part of the output
seed: Optional[int] = None
stop: Optional[List[str]] = None
temperature: Optional[float] = None
@@ -99,9 +100,9 @@ class PredibaseConfig(BaseConfig):
optional_params["top_p"] = value
if param == "n":
optional_params["best_of"] = value
- optional_params["do_sample"] = (
- True # Need to sample if you want best of for hf inference endpoints
- )
+ optional_params[
+ "do_sample"
+ ] = True # Need to sample if you want best of for hf inference endpoints
if param == "stream":
optional_params["stream"] = value
if param == "stop":
@@ -163,6 +164,7 @@ class PredibaseConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/replicate/chat/handler.py b/litellm/llms/replicate/chat/handler.py
index f52eb2ee05..e4bb64fed7 100644
--- a/litellm/llms/replicate/chat/handler.py
+++ b/litellm/llms/replicate/chat/handler.py
@@ -4,6 +4,7 @@ import time
from typing import Callable, List, Union
import litellm
+from litellm.constants import REPLICATE_POLLING_DELAY_SECONDS
from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
@@ -28,7 +29,9 @@ def handle_prediction_response_streaming(
status = ""
while True and (status not in ["succeeded", "failed", "canceled"]):
- time.sleep(0.5) # prevent being rate limited by replicate
+ time.sleep(
+ REPLICATE_POLLING_DELAY_SECONDS
+ ) # prevent being rate limited by replicate
print_verbose(f"replicate: polling endpoint: {prediction_url}")
response = http_client.get(prediction_url, headers=headers)
if response.status_code == 200:
@@ -77,7 +80,9 @@ async def async_handle_prediction_response_streaming(
status = ""
while True and (status not in ["succeeded", "failed", "canceled"]):
- await asyncio.sleep(0.5) # prevent being rate limited by replicate
+ await asyncio.sleep(
+ REPLICATE_POLLING_DELAY_SECONDS
+ ) # prevent being rate limited by replicate
print_verbose(f"replicate: polling endpoint: {prediction_url}")
response = await http_client.get(prediction_url, headers=headers)
if response.status_code == 200:
@@ -136,6 +141,7 @@ def completion(
model=model,
messages=messages,
optional_params=optional_params,
+ litellm_params=litellm_params,
)
# Start a prediction and get the prediction URL
version_id = replicate_config.model_to_version_id(model)
@@ -170,6 +176,7 @@ def completion(
prediction_url = replicate_config.get_complete_url(
api_base=api_base,
+ api_key=api_key,
model=model,
optional_params=optional_params,
litellm_params=litellm_params,
@@ -244,9 +251,9 @@ async def async_completion(
print_verbose,
headers: dict,
) -> Union[ModelResponse, CustomStreamWrapper]:
-
prediction_url = replicate_config.get_complete_url(
api_base=api_base,
+ api_key=api_key,
model=model,
optional_params=optional_params,
litellm_params=litellm_params,
diff --git a/litellm/llms/replicate/chat/transformation.py b/litellm/llms/replicate/chat/transformation.py
index 75cfe6ced7..4c61086801 100644
--- a/litellm/llms/replicate/chat/transformation.py
+++ b/litellm/llms/replicate/chat/transformation.py
@@ -3,6 +3,7 @@ from typing import TYPE_CHECKING, Any, List, Optional, Union
import httpx
import litellm
+from litellm.constants import REPLICATE_MODEL_NAME_WITH_ID_LENGTH
from litellm.litellm_core_utils.prompt_templates.common_utils import (
convert_content_list_to_str,
)
@@ -139,6 +140,7 @@ class ReplicateConfig(BaseConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -220,10 +222,11 @@ class ReplicateConfig(BaseConfig):
version_id = self.model_to_version_id(model)
request_data: dict = {"input": input_data}
- if ":" in version_id and len(version_id) > 64:
+ if ":" in version_id and len(version_id) > REPLICATE_MODEL_NAME_WITH_ID_LENGTH:
model_parts = version_id.split(":")
if (
- len(model_parts) > 1 and len(model_parts[1]) == 64
+ len(model_parts) > 1
+ and len(model_parts[1]) == REPLICATE_MODEL_NAME_WITH_ID_LENGTH
): ## checks if model name has a 64 digit code - e.g. "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3"
request_data["version"] = model_parts[1]
@@ -309,6 +312,7 @@ class ReplicateConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/sagemaker/chat/handler.py b/litellm/llms/sagemaker/chat/handler.py
index 3a90a15093..b86cda7aea 100644
--- a/litellm/llms/sagemaker/chat/handler.py
+++ b/litellm/llms/sagemaker/chat/handler.py
@@ -5,6 +5,7 @@ from typing import Callable, Optional, Union
import httpx
from litellm.llms.bedrock.base_aws_llm import BaseAWSLLM
+from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.utils import ModelResponse, get_secret
from ..common_utils import AWSEventStreamDecoder
@@ -12,7 +13,6 @@ from .transformation import SagemakerChatConfig
class SagemakerChatHandler(BaseAWSLLM):
-
def _load_credentials(
self,
optional_params: dict,
@@ -125,8 +125,8 @@ class SagemakerChatHandler(BaseAWSLLM):
logger_fn=None,
acompletion: bool = False,
headers: dict = {},
+ client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
):
-
# pop streaming if it's in the optional params as 'stream' raises an error with sagemaker
credentials, aws_region_name = self._load_credentials(optional_params)
inference_params = deepcopy(optional_params)
@@ -173,4 +173,5 @@ class SagemakerChatHandler(BaseAWSLLM):
custom_endpoint=True,
custom_llm_provider="sagemaker_chat",
streaming_decoder=custom_stream_decoder, # type: ignore
+ client=client,
)
diff --git a/litellm/llms/sagemaker/common_utils.py b/litellm/llms/sagemaker/common_utils.py
index 9884f420c3..031a0c7f05 100644
--- a/litellm/llms/sagemaker/common_utils.py
+++ b/litellm/llms/sagemaker/common_utils.py
@@ -34,7 +34,6 @@ class AWSEventStreamDecoder:
def _chunk_parser_messages_api(
self, chunk_data: dict
) -> StreamingChatCompletionChunk:
-
openai_chunk = StreamingChatCompletionChunk(**chunk_data)
return openai_chunk
@@ -128,21 +127,25 @@ class AWSEventStreamDecoder:
async for chunk in iterator:
event_stream_buffer.add_data(chunk)
for event in event_stream_buffer:
- message = self._parse_message_from_event(event)
- if message:
- verbose_logger.debug("sagemaker parsed chunk bytes %s", message)
- # remove data: prefix and "\n\n" at the end
- message = (
- litellm.CustomStreamWrapper._strip_sse_data_from_chunk(message)
- or ""
- )
- message = message.replace("\n\n", "")
+ try:
+ message = self._parse_message_from_event(event)
+ if message:
+ verbose_logger.debug(
+ "sagemaker parsed chunk bytes %s", message
+ )
+ # remove data: prefix and "\n\n" at the end
+ message = (
+ litellm.CustomStreamWrapper._strip_sse_data_from_chunk(
+ message
+ )
+ or ""
+ )
+ message = message.replace("\n\n", "")
- # Accumulate JSON data
- accumulated_json += message
+ # Accumulate JSON data
+ accumulated_json += message
- # Try to parse the accumulated JSON
- try:
+ # Try to parse the accumulated JSON
_data = json.loads(accumulated_json)
if self.is_messages_api:
yield self._chunk_parser_messages_api(chunk_data=_data)
@@ -150,9 +153,19 @@ class AWSEventStreamDecoder:
yield self._chunk_parser(chunk_data=_data)
# Reset accumulated_json after successful parsing
accumulated_json = ""
- except json.JSONDecodeError:
- # If it's not valid JSON yet, continue to the next event
- continue
+ except json.JSONDecodeError:
+ # If it's not valid JSON yet, continue to the next event
+ continue
+ except UnicodeDecodeError as e:
+ verbose_logger.warning(
+ f"UnicodeDecodeError: {e}. Attempting to combine with next event."
+ )
+ continue
+ except Exception as e:
+ verbose_logger.error(
+ f"Error parsing message: {e}. Attempting to combine with next event."
+ )
+ continue
# Handle any remaining data after the iterator is exhausted
if accumulated_json:
@@ -168,6 +181,8 @@ class AWSEventStreamDecoder:
f"Warning: Unparseable JSON data remained: {accumulated_json}"
)
yield None
+ except Exception as e:
+ verbose_logger.error(f"Final error parsing accumulated JSON: {e}")
def _parse_message_from_event(self, event) -> Optional[str]:
response_dict = event.to_response_dict()
@@ -192,7 +207,6 @@ class AWSEventStreamDecoder:
def get_response_stream_shape():
global _response_stream_shape_cache
if _response_stream_shape_cache is None:
-
from botocore.loaders import Loader
from botocore.model import ServiceModel
diff --git a/litellm/llms/sagemaker/completion/handler.py b/litellm/llms/sagemaker/completion/handler.py
index 4aff5f5d71..ebd96ac5b1 100644
--- a/litellm/llms/sagemaker/completion/handler.py
+++ b/litellm/llms/sagemaker/completion/handler.py
@@ -1,6 +1,6 @@
import json
from copy import deepcopy
-from typing import Any, Callable, List, Optional, Union
+from typing import Any, Callable, List, Optional, Union, cast
import httpx
@@ -35,7 +35,6 @@ os.environ['AWS_SECRET_ACCESS_KEY'] = ""
# set os.environ['AWS_REGION_NAME'] =
class SagemakerLLM(BaseAWSLLM):
-
def _load_credentials(
self,
optional_params: dict,
@@ -97,6 +96,7 @@ class SagemakerLLM(BaseAWSLLM):
model: str,
data: dict,
messages: List[AllMessageValues],
+ litellm_params: dict,
optional_params: dict,
aws_region_name: str,
extra_headers: Optional[dict] = None,
@@ -123,6 +123,7 @@ class SagemakerLLM(BaseAWSLLM):
model=model,
messages=messages,
optional_params=optional_params,
+ litellm_params=litellm_params,
)
request = AWSRequest(
method="POST", url=api_base, data=encoded_data, headers=headers
@@ -154,7 +155,6 @@ class SagemakerLLM(BaseAWSLLM):
acompletion: bool = False,
headers: dict = {},
):
-
# pop streaming if it's in the optional params as 'stream' raises an error with sagemaker
credentials, aws_region_name = self._load_credentials(optional_params)
inference_params = deepcopy(optional_params)
@@ -200,6 +200,7 @@ class SagemakerLLM(BaseAWSLLM):
data=data,
messages=messages,
optional_params=optional_params,
+ litellm_params=litellm_params,
credentials=credentials,
aws_region_name=aws_region_name,
)
@@ -213,7 +214,7 @@ class SagemakerLLM(BaseAWSLLM):
sync_response = sync_handler.post(
url=prepared_request.url,
headers=prepared_request.headers, # type: ignore
- json=data,
+ data=prepared_request.body,
stream=stream,
)
@@ -276,6 +277,7 @@ class SagemakerLLM(BaseAWSLLM):
"model": model,
"data": _data,
"optional_params": optional_params,
+ "litellm_params": litellm_params,
"credentials": credentials,
"aws_region_name": aws_region_name,
"messages": messages,
@@ -308,7 +310,7 @@ class SagemakerLLM(BaseAWSLLM):
sync_response = sync_handler.post(
url=prepared_request.url,
headers=prepared_request.headers, # type: ignore
- json=_data,
+ data=prepared_request.body,
timeout=timeout,
)
@@ -356,7 +358,7 @@ class SagemakerLLM(BaseAWSLLM):
self,
api_base: str,
headers: dict,
- data: dict,
+ data: str,
logging_obj,
client=None,
):
@@ -368,7 +370,7 @@ class SagemakerLLM(BaseAWSLLM):
response = await client.post(
api_base,
headers=headers,
- json=data,
+ data=data,
stream=True,
)
@@ -428,6 +430,7 @@ class SagemakerLLM(BaseAWSLLM):
"model": model,
"data": data,
"optional_params": optional_params,
+ "litellm_params": litellm_params,
"credentials": credentials,
"aws_region_name": aws_region_name,
"messages": messages,
@@ -437,10 +440,14 @@ class SagemakerLLM(BaseAWSLLM):
prepared_request.headers.update(
{"X-Amzn-SageMaker-Inference-Component": model_id}
)
+
+ if not prepared_request.body:
+ raise ValueError("Prepared request body is empty")
+
completion_stream = await self.make_async_call(
api_base=prepared_request.url,
headers=prepared_request.headers, # type: ignore
- data=data,
+ data=cast(str, prepared_request.body),
logging_obj=logging_obj,
)
streaming_response = CustomStreamWrapper(
@@ -494,6 +501,7 @@ class SagemakerLLM(BaseAWSLLM):
"model": model,
"data": data,
"optional_params": optional_params,
+ "litellm_params": litellm_params,
"credentials": credentials,
"aws_region_name": aws_region_name,
"messages": messages,
@@ -522,7 +530,7 @@ class SagemakerLLM(BaseAWSLLM):
response = await async_handler.post(
url=prepared_request.url,
headers=prepared_request.headers, # type: ignore
- json=data,
+ data=prepared_request.body,
timeout=timeout,
)
@@ -625,7 +633,7 @@ class SagemakerLLM(BaseAWSLLM):
response = client.invoke_endpoint(
EndpointName={model},
ContentType="application/json",
- Body={data}, # type: ignore
+ Body=f"{data!r}", # Use !r for safe representation
CustomAttributes="accept_eula=true",
)""" # type: ignore
logging_obj.pre_call(
diff --git a/litellm/llms/sagemaker/completion/transformation.py b/litellm/llms/sagemaker/completion/transformation.py
index d0ab5d0697..bfc0b6e5f6 100644
--- a/litellm/llms/sagemaker/completion/transformation.py
+++ b/litellm/llms/sagemaker/completion/transformation.py
@@ -19,6 +19,7 @@ from litellm.litellm_core_utils.prompt_templates.factory import (
from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMException
from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import ModelResponse, Usage
+from litellm.utils import token_counter
from ..common_utils import SagemakerError
@@ -88,9 +89,9 @@ class SagemakerConfig(BaseConfig):
optional_params["top_p"] = value
if param == "n":
optional_params["best_of"] = value
- optional_params["do_sample"] = (
- True # Need to sample if you want best of for hf inference endpoints
- )
+ optional_params[
+ "do_sample"
+ ] = True # Need to sample if you want best of for hf inference endpoints
if param == "stream":
optional_params["stream"] = value
if param == "stop":
@@ -238,9 +239,12 @@ class SagemakerConfig(BaseConfig):
)
## CALCULATING USAGE - baseten charges on time, not tokens - have some mapping of cost here.
- prompt_tokens = len(encoding.encode(prompt))
- completion_tokens = len(
- encoding.encode(model_response["choices"][0]["message"].get("content", ""))
+ prompt_tokens = token_counter(
+ text=prompt, count_response_tokens=True
+ ) # doesn't apply any default token count from openai's chat template
+ completion_tokens = token_counter(
+ text=model_response["choices"][0]["message"].get("content", ""),
+ count_response_tokens=True,
)
model_response.created = int(time.time())
@@ -259,6 +263,7 @@ class SagemakerConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/snowflake/chat/transformation.py b/litellm/llms/snowflake/chat/transformation.py
index d3634e7950..2b92911b05 100644
--- a/litellm/llms/snowflake/chat/transformation.py
+++ b/litellm/llms/snowflake/chat/transformation.py
@@ -92,6 +92,7 @@ class SnowflakeConfig(OpenAIGPTConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
@@ -135,6 +136,7 @@ class SnowflakeConfig(OpenAIGPTConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
diff --git a/litellm/llms/together_ai/cost_calculator.py b/litellm/llms/together_ai/cost_calculator.py
index d3b0db8b89..a1be097bc8 100644
--- a/litellm/llms/together_ai/cost_calculator.py
+++ b/litellm/llms/together_ai/cost_calculator.py
@@ -4,6 +4,16 @@ Handles calculating cost for together ai models
import re
+from litellm.constants import (
+ TOGETHER_AI_4_B,
+ TOGETHER_AI_8_B,
+ TOGETHER_AI_21_B,
+ TOGETHER_AI_41_B,
+ TOGETHER_AI_80_B,
+ TOGETHER_AI_110_B,
+ TOGETHER_AI_EMBEDDING_150_M,
+ TOGETHER_AI_EMBEDDING_350_M,
+)
from litellm.types.utils import CallTypes
@@ -31,17 +41,17 @@ def get_model_params_and_category(model_name, call_type: CallTypes) -> str:
else:
return model_name
# Determine the category based on the number of parameters
- if params_billion <= 4.0:
+ if params_billion <= TOGETHER_AI_4_B:
category = "together-ai-up-to-4b"
- elif params_billion <= 8.0:
+ elif params_billion <= TOGETHER_AI_8_B:
category = "together-ai-4.1b-8b"
- elif params_billion <= 21.0:
+ elif params_billion <= TOGETHER_AI_21_B:
category = "together-ai-8.1b-21b"
- elif params_billion <= 41.0:
+ elif params_billion <= TOGETHER_AI_41_B:
category = "together-ai-21.1b-41b"
- elif params_billion <= 80.0:
+ elif params_billion <= TOGETHER_AI_80_B:
category = "together-ai-41.1b-80b"
- elif params_billion <= 110.0:
+ elif params_billion <= TOGETHER_AI_110_B:
category = "together-ai-81.1b-110b"
if category is not None:
return category
@@ -69,9 +79,9 @@ def get_model_params_and_category_embeddings(model_name) -> str:
else:
return model_name
# Determine the category based on the number of parameters
- if params_million <= 150:
+ if params_million <= TOGETHER_AI_EMBEDDING_150_M:
category = "together-ai-embedding-up-to-150m"
- elif params_million <= 350:
+ elif params_million <= TOGETHER_AI_EMBEDDING_350_M:
category = "together-ai-embedding-151m-to-350m"
if category is not None:
return category
diff --git a/litellm/llms/together_ai/rerank/transformation.py b/litellm/llms/together_ai/rerank/transformation.py
index 4714376979..1fdb772add 100644
--- a/litellm/llms/together_ai/rerank/transformation.py
+++ b/litellm/llms/together_ai/rerank/transformation.py
@@ -19,7 +19,6 @@ from litellm.types.rerank import (
class TogetherAIRerankConfig:
def _transform_response(self, response: dict) -> RerankResponse:
-
_billed_units = RerankBilledUnits(**response.get("usage", {}))
_tokens = RerankTokens(**response.get("usage", {}))
rerank_meta = RerankResponseMeta(billed_units=_billed_units, tokens=_tokens)
diff --git a/litellm/llms/topaz/common_utils.py b/litellm/llms/topaz/common_utils.py
index 4ef2315db4..95fe291493 100644
--- a/litellm/llms/topaz/common_utils.py
+++ b/litellm/llms/topaz/common_utils.py
@@ -1,6 +1,7 @@
from typing import List, Optional
from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import AllMessageValues
from ..base_llm.base_utils import BaseLLMModelInfo
from ..base_llm.chat.transformation import BaseLLMException
@@ -11,7 +12,29 @@ class TopazException(BaseLLMException):
class TopazModelInfo(BaseLLMModelInfo):
- def get_models(self) -> List[str]:
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ if api_key is None:
+ raise ValueError(
+ "API key is required for Topaz image variations. Set via `TOPAZ_API_KEY` or `api_key=..`"
+ )
+ return {
+ # "Content-Type": "multipart/form-data",
+ "Accept": "image/jpeg",
+ "X-API-Key": api_key,
+ }
+
+ def get_models(
+ self, api_key: Optional[str] = None, api_base: Optional[str] = None
+ ) -> List[str]:
return [
"topaz/Standard V2",
"topaz/Low Resolution V2",
diff --git a/litellm/llms/topaz/image_variations/transformation.py b/litellm/llms/topaz/image_variations/transformation.py
index 8b95deed04..41b51a558c 100644
--- a/litellm/llms/topaz/image_variations/transformation.py
+++ b/litellm/llms/topaz/image_variations/transformation.py
@@ -10,10 +10,7 @@ from litellm.llms.base_llm.chat.transformation import (
BaseLLMException,
LiteLLMLoggingObj,
)
-from litellm.types.llms.openai import (
- AllMessageValues,
- OpenAIImageVariationOptionalParams,
-)
+from litellm.types.llms.openai import OpenAIImageVariationOptionalParams
from litellm.types.utils import (
FileTypes,
HttpHandlerRequestFields,
@@ -22,37 +19,19 @@ from litellm.types.utils import (
)
from ...base_llm.image_variations.transformation import BaseImageVariationConfig
-from ..common_utils import TopazException
+from ..common_utils import TopazException, TopazModelInfo
-class TopazImageVariationConfig(BaseImageVariationConfig):
+class TopazImageVariationConfig(TopazModelInfo, BaseImageVariationConfig):
def get_supported_openai_params(
self, model: str
) -> List[OpenAIImageVariationOptionalParams]:
return ["response_format", "size"]
- def validate_environment(
- self,
- headers: dict,
- model: str,
- messages: List[AllMessageValues],
- optional_params: dict,
- api_key: Optional[str] = None,
- api_base: Optional[str] = None,
- ) -> dict:
- if api_key is None:
- raise ValueError(
- "API key is required for Topaz image variations. Set via `TOPAZ_API_KEY` or `api_key=..`"
- )
- return {
- # "Content-Type": "multipart/form-data",
- "Accept": "image/jpeg",
- "X-API-Key": api_key,
- }
-
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -121,7 +100,6 @@ class TopazImageVariationConfig(BaseImageVariationConfig):
optional_params: dict,
headers: dict,
) -> HttpHandlerRequestFields:
-
request_params = HttpHandlerRequestFields(
files={"image": self.prepare_file_tuple(image)},
data=optional_params,
@@ -134,7 +112,6 @@ class TopazImageVariationConfig(BaseImageVariationConfig):
image_content: bytes,
response_ms: float,
) -> ImageResponse:
-
# Convert to base64
base64_image = base64.b64encode(image_content).decode("utf-8")
diff --git a/litellm/llms/triton/completion/transformation.py b/litellm/llms/triton/completion/transformation.py
index 56151f89ef..0db83b2d3d 100644
--- a/litellm/llms/triton/completion/transformation.py
+++ b/litellm/llms/triton/completion/transformation.py
@@ -7,6 +7,7 @@ from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional,
from httpx import Headers, Response
+from litellm.constants import DEFAULT_MAX_TOKENS_FOR_TRITON
from litellm.litellm_core_utils.prompt_templates.factory import prompt_factory
from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator
from litellm.llms.base_llm.chat.transformation import (
@@ -47,6 +48,7 @@ class TritonConfig(BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: Dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> Dict:
@@ -70,6 +72,7 @@ class TritonConfig(BaseConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -195,9 +198,9 @@ class TritonGenerateConfig(TritonConfig):
data_for_triton: Dict[str, Any] = {
"text_input": prompt_factory(model=model, messages=messages),
"parameters": {
- "max_tokens": int(optional_params.get("max_tokens", 2000)),
- "bad_words": [""],
- "stop_words": [""],
+ "max_tokens": int(
+ optional_params.get("max_tokens", DEFAULT_MAX_TOKENS_FOR_TRITON)
+ ),
},
"stream": bool(stream),
}
@@ -244,7 +247,6 @@ class TritonInferConfig(TritonConfig):
litellm_params: dict,
headers: dict,
) -> dict:
-
text_input = messages[0].get("content", "")
data_for_triton = {
"inputs": [
diff --git a/litellm/llms/triton/embedding/transformation.py b/litellm/llms/triton/embedding/transformation.py
index 4744ec0834..8ab0277e36 100644
--- a/litellm/llms/triton/embedding/transformation.py
+++ b/litellm/llms/triton/embedding/transformation.py
@@ -42,6 +42,7 @@ class TritonEmbeddingConfig(BaseEmbeddingConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
diff --git a/litellm/llms/vertex_ai/batches/handler.py b/litellm/llms/vertex_ai/batches/handler.py
index b82268bef6..dc3f93857a 100644
--- a/litellm/llms/vertex_ai/batches/handler.py
+++ b/litellm/llms/vertex_ai/batches/handler.py
@@ -35,7 +35,6 @@ class VertexAIBatchPrediction(VertexLLM):
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
) -> Union[LiteLLMBatch, Coroutine[Any, Any, LiteLLMBatch]]:
-
sync_handler = _get_httpx_client()
access_token, project_id = self._ensure_access_token(
@@ -69,10 +68,8 @@ class VertexAIBatchPrediction(VertexLLM):
"Authorization": f"Bearer {access_token}",
}
- vertex_batch_request: VertexAIBatchPredictionJob = (
- VertexAIBatchTransformation.transform_openai_batch_request_to_vertex_ai_batch_request(
- request=create_batch_data
- )
+ vertex_batch_request: VertexAIBatchPredictionJob = VertexAIBatchTransformation.transform_openai_batch_request_to_vertex_ai_batch_request(
+ request=create_batch_data
)
if _is_async is True:
diff --git a/litellm/llms/vertex_ai/common_utils.py b/litellm/llms/vertex_ai/common_utils.py
index f7149c349a..314fb81901 100644
--- a/litellm/llms/vertex_ai/common_utils.py
+++ b/litellm/llms/vertex_ai/common_utils.py
@@ -1,10 +1,14 @@
-from typing import Dict, List, Literal, Optional, Tuple, Union
+import re
+from typing import Any, Dict, List, Literal, Optional, Set, Tuple, Union, get_type_hints
import httpx
+import litellm
from litellm import supports_response_schema, supports_system_messages, verbose_logger
+from litellm.constants import DEFAULT_MAX_RECURSE_DEPTH
+from litellm.litellm_core_utils.prompt_templates.common_utils import unpack_defs
from litellm.llms.base_llm.chat.transformation import BaseLLMException
-from litellm.types.llms.vertex_ai import PartType
+from litellm.types.llms.vertex_ai import PartType, Schema
class VertexAIError(BaseLLMException):
@@ -27,6 +31,10 @@ def get_supports_system_message(
supports_system_message = supports_system_messages(
model=model, custom_llm_provider=_custom_llm_provider
)
+
+ # Vertex Models called in the `/gemini` request/response format also support system messages
+ if litellm.VertexGeminiConfig._is_model_gemini_spec_model(model):
+ supports_system_message = True
except Exception as e:
verbose_logger.warning(
"Unable to identify if system message supported. Defaulting to 'False'. Received error message - {}\nAdd it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json".format(
@@ -54,7 +62,9 @@ def get_supports_response_schema(
from typing import Literal, Optional
-all_gemini_url_modes = Literal["chat", "embedding", "batch_embedding"]
+all_gemini_url_modes = Literal[
+ "chat", "embedding", "batch_embedding", "image_generation"
+]
def _get_vertex_url(
@@ -67,6 +77,8 @@ def _get_vertex_url(
) -> Tuple[str, str]:
url: Optional[str] = None
endpoint: Optional[str] = None
+
+ model = litellm.VertexGeminiConfig.get_model_for_vertex_ai_url(model=model)
if mode == "chat":
### SET RUNTIME ENDPOINT ###
endpoint = "generateContent"
@@ -90,7 +102,11 @@ def _get_vertex_url(
if model.isdigit():
# https://us-central1-aiplatform.googleapis.com/v1/projects/$PROJECT_ID/locations/us-central1/endpoints/$ENDPOINT_ID:predict
url = f"https://{vertex_location}-aiplatform.googleapis.com/{vertex_api_version}/projects/{vertex_project}/locations/{vertex_location}/endpoints/{model}:{endpoint}"
-
+ elif mode == "image_generation":
+ endpoint = "predict"
+ url = f"https://{vertex_location}-aiplatform.googleapis.com/v1/projects/{vertex_project}/locations/{vertex_location}/publishers/google/models/{model}:{endpoint}"
+ if model.isdigit():
+ url = f"https://{vertex_location}-aiplatform.googleapis.com/{vertex_api_version}/projects/{vertex_project}/locations/{vertex_location}/endpoints/{model}:{endpoint}"
if not url or not endpoint:
raise ValueError(f"Unable to get vertex url/endpoint for mode: {mode}")
return url, endpoint
@@ -126,6 +142,10 @@ def _get_gemini_url(
url = "https://generativelanguage.googleapis.com/v1beta/{}:{}?key={}".format(
_gemini_model_name, endpoint, gemini_api_key
)
+ elif mode == "image_generation":
+ raise ValueError(
+ "LiteLLM's `gemini/` route does not support image generation yet. Let us know if you need this feature by opening an issue at https://github.com/BerriAI/litellm/issues"
+ )
return url, endpoint
@@ -145,10 +165,22 @@ def _check_text_in_content(parts: List[PartType]) -> bool:
return has_text_param
-def _build_vertex_schema(parameters: dict):
+def _build_vertex_schema(parameters: dict, add_property_ordering: bool = False):
"""
This is a modified version of https://github.com/google-gemini/generative-ai-python/blob/8f77cc6ac99937cd3a81299ecf79608b91b06bbb/google/generativeai/types/content_types.py#L419
+
+ Updates the input parameters, removing extraneous fields, adjusting types, unwinding $defs, and adding propertyOrdering if specified, returning the updated parameters.
+
+ Parameters:
+ parameters: dict - the json schema to build from
+ add_property_ordering: bool - whether to add propertyOrdering to the schema. This is only applicable to schemas for structured outputs. See
+ set_schema_property_ordering for more details.
+ Returns:
+ parameters: dict - the input parameters, modified in place
"""
+ # Get valid fields from Schema TypedDict
+ valid_schema_fields = set(get_type_hints(Schema).keys())
+
defs = parameters.pop("$defs", {})
# flatten the defs
for name, value in defs.items():
@@ -159,85 +191,121 @@ def _build_vertex_schema(parameters: dict):
# * https://github.com/pydantic/pydantic/issues/1270
# * https://stackoverflow.com/a/58841311
# * https://github.com/pydantic/pydantic/discussions/4872
- convert_to_nullable(parameters)
+ convert_anyof_null_to_nullable(parameters)
add_object_type(parameters)
# Postprocessing
- # 4. Suppress unnecessary title generation:
- # * https://github.com/pydantic/pydantic/issues/1051
- # * http://cl/586221780
- strip_field(parameters, field_name="title")
-
- strip_field(
- parameters, field_name="$schema"
- ) # 5. Remove $schema - json schema value, not supported by OpenAPI - causes vertex errors.
- strip_field(
- parameters, field_name="$id"
- ) # 6. Remove id - json schema value, not supported by OpenAPI - causes vertex errors.
+ # Filter out fields that don't exist in Schema
+ parameters = filter_schema_fields(parameters, valid_schema_fields)
+ if add_property_ordering:
+ set_schema_property_ordering(parameters)
return parameters
-def unpack_defs(schema, defs):
- properties = schema.get("properties", None)
- if properties is None:
- return
+def set_schema_property_ordering(
+ schema: Dict[str, Any], depth: int = 0
+) -> Dict[str, Any]:
+ """
+ vertex ai and generativeai apis order output of fields alphabetically, unless you specify the order.
+ python dicts retain order, so we just use that. Note that this field only applies to structured outputs, and not tools.
+ Function tools are not afflicted by the same alphabetical ordering issue, (the order of keys returned seems to be arbitrary, up to the model)
+ https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.cachedContents#Schema.FIELDS.property_ordering
- for name, value in properties.items():
- ref_key = value.get("$ref", None)
- if ref_key is not None:
- ref = defs[ref_key.split("defs/")[-1]]
- unpack_defs(ref, defs)
- properties[name] = ref
+ Args:
+ schema: The schema dictionary to process
+ depth: Current recursion depth to prevent infinite loops
+ """
+ if depth > DEFAULT_MAX_RECURSE_DEPTH:
+ raise ValueError(
+ f"Max depth of {DEFAULT_MAX_RECURSE_DEPTH} exceeded while processing schema. Please check the schema for excessive nesting."
+ )
+
+ if "properties" in schema and isinstance(schema["properties"], dict):
+ # retain propertyOrdering as an escape hatch if user already specifies it
+ if "propertyOrdering" not in schema:
+ schema["propertyOrdering"] = [k for k, v in schema["properties"].items()]
+ for k, v in schema["properties"].items():
+ set_schema_property_ordering(v, depth + 1)
+ if "items" in schema:
+ set_schema_property_ordering(schema["items"], depth + 1)
+ return schema
+
+
+def filter_schema_fields(
+ schema_dict: Dict[str, Any], valid_fields: Set[str], processed=None
+) -> Dict[str, Any]:
+ """
+ Recursively filter a schema dictionary to keep only valid fields.
+ """
+ if processed is None:
+ processed = set()
+
+ # Handle circular references
+ schema_id = id(schema_dict)
+ if schema_id in processed:
+ return schema_dict
+ processed.add(schema_id)
+
+ if not isinstance(schema_dict, dict):
+ return schema_dict
+
+ result = {}
+ for key, value in schema_dict.items():
+ if key not in valid_fields:
continue
- anyof = value.get("anyOf", None)
- if anyof is not None:
- for i, atype in enumerate(anyof):
- ref_key = atype.get("$ref", None)
- if ref_key is not None:
- ref = defs[ref_key.split("defs/")[-1]]
- unpack_defs(ref, defs)
- anyof[i] = ref
- continue
-
- items = value.get("items", None)
- if items is not None:
- ref_key = items.get("$ref", None)
- if ref_key is not None:
- ref = defs[ref_key.split("defs/")[-1]]
- unpack_defs(ref, defs)
- value["items"] = ref
- continue
-
-
-def convert_to_nullable(schema):
- anyof = schema.pop("anyOf", None)
- if anyof is not None:
- if len(anyof) != 2:
- raise ValueError(
- "Invalid input: Type Unions are not supported, except for `Optional` types. "
- "Please provide an `Optional` type or a non-Union type."
- )
- a, b = anyof
- if a == {"type": "null"}:
- schema.update(b)
- elif b == {"type": "null"}:
- schema.update(a)
+ if key == "properties" and isinstance(value, dict):
+ result[key] = {
+ k: filter_schema_fields(v, valid_fields, processed)
+ for k, v in value.items()
+ }
+ elif key == "items" and isinstance(value, dict):
+ result[key] = filter_schema_fields(value, valid_fields, processed)
+ elif key == "anyOf" and isinstance(value, list):
+ result[key] = [
+ filter_schema_fields(item, valid_fields, processed) for item in value # type: ignore
+ ]
else:
+ result[key] = value
+
+ return result
+
+
+def convert_anyof_null_to_nullable(schema, depth=0):
+ if depth > DEFAULT_MAX_RECURSE_DEPTH:
+ raise ValueError(
+ f"Max depth of {DEFAULT_MAX_RECURSE_DEPTH} exceeded while processing schema. Please check the schema for excessive nesting."
+ )
+ """ Converts null objects within anyOf by removing them and adding nullable to all remaining objects """
+ anyof = schema.get("anyOf", None)
+ if anyof is not None:
+ contains_null = False
+ for atype in anyof:
+ if atype == {"type": "null"}:
+ # remove null type
+ anyof.remove(atype)
+ contains_null = True
+
+ if len(anyof) == 0:
+ # Edge case: response schema with only null type present is invalid in Vertex AI
raise ValueError(
- "Invalid input: Type Unions are not supported, except for `Optional` types. "
- "Please provide an `Optional` type or a non-Union type."
+ "Invalid input: AnyOf schema with only null type is not supported. "
+ "Please provide a non-null type."
)
- schema["nullable"] = True
+
+ if contains_null:
+ # set all types to nullable following guidance found here: https://cloud.google.com/vertex-ai/generative-ai/docs/samples/generativeaionvertexai-gemini-controlled-generation-response-schema-3#generativeaionvertexai_gemini_controlled_generation_response_schema_3-python
+ for atype in anyof:
+ atype["nullable"] = True
properties = schema.get("properties", None)
if properties is not None:
for name, value in properties.items():
- convert_to_nullable(value)
+ convert_anyof_null_to_nullable(value, depth=depth + 1)
items = schema.get("items", None)
if items is not None:
- convert_to_nullable(items)
+ convert_anyof_null_to_nullable(items, depth=depth + 1)
def add_object_type(schema):
@@ -280,3 +348,82 @@ def _convert_vertex_datetime_to_openai_datetime(vertex_datetime: str) -> int:
dt = datetime.strptime(vertex_datetime, "%Y-%m-%dT%H:%M:%S.%fZ")
# Convert to Unix timestamp (seconds since epoch)
return int(dt.timestamp())
+
+
+def get_vertex_project_id_from_url(url: str) -> Optional[str]:
+ """
+ Get the vertex project id from the url
+
+ `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent`
+ """
+ match = re.search(r"/projects/([^/]+)", url)
+ return match.group(1) if match else None
+
+
+def get_vertex_location_from_url(url: str) -> Optional[str]:
+ """
+ Get the vertex location from the url
+
+ `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent`
+ """
+ match = re.search(r"/locations/([^/]+)", url)
+ return match.group(1) if match else None
+
+
+def replace_project_and_location_in_route(
+ requested_route: str, vertex_project: str, vertex_location: str
+) -> str:
+ """
+ Replace project and location values in the route with the provided values
+ """
+ # Replace project and location values while keeping route structure
+ modified_route = re.sub(
+ r"/projects/[^/]+/locations/[^/]+/",
+ f"/projects/{vertex_project}/locations/{vertex_location}/",
+ requested_route,
+ )
+ return modified_route
+
+
+def construct_target_url(
+ base_url: str,
+ requested_route: str,
+ vertex_location: Optional[str],
+ vertex_project: Optional[str],
+) -> httpx.URL:
+ """
+ Allow user to specify their own project id / location.
+
+ If missing, use defaults
+
+ Handle cachedContent scenario - https://github.com/BerriAI/litellm/issues/5460
+
+ Constructed Url:
+ POST https://LOCATION-aiplatform.googleapis.com/{version}/projects/PROJECT_ID/locations/LOCATION/cachedContents
+ """
+
+ new_base_url = httpx.URL(base_url)
+ if "locations" in requested_route: # contains the target project id + location
+ if vertex_project and vertex_location:
+ requested_route = replace_project_and_location_in_route(
+ requested_route, vertex_project, vertex_location
+ )
+ return new_base_url.copy_with(path=requested_route)
+
+ """
+ - Add endpoint version (e.g. v1beta for cachedContent, v1 for rest)
+ - Add default project id
+ - Add default location
+ """
+ vertex_version: Literal["v1", "v1beta1"] = "v1"
+ if "cachedContent" in requested_route:
+ vertex_version = "v1beta1"
+
+ base_requested_route = "{}/projects/{}/locations/{}".format(
+ vertex_version, vertex_project, vertex_location
+ )
+
+ updated_requested_route = "/" + base_requested_route + requested_route
+
+ updated_url = new_base_url.copy_with(path=updated_requested_route)
+ return updated_url
diff --git a/litellm/llms/vertex_ai/cost_calculator.py b/litellm/llms/vertex_ai/cost_calculator.py
index fd23886045..119ba2b036 100644
--- a/litellm/llms/vertex_ai/cost_calculator.py
+++ b/litellm/llms/vertex_ai/cost_calculator.py
@@ -4,7 +4,11 @@ from typing import Literal, Optional, Tuple, Union
import litellm
from litellm import verbose_logger
-from litellm.litellm_core_utils.llm_cost_calc.utils import _is_above_128k
+from litellm.litellm_core_utils.llm_cost_calc.utils import (
+ _is_above_128k,
+ generic_cost_per_token,
+)
+from litellm.types.utils import ModelInfo, Usage
"""
Gemini pricing covers:
@@ -20,7 +24,7 @@ Vertex AI -> character based pricing
Google AI Studio -> token based pricing
"""
-models_without_dynamic_pricing = ["gemini-1.0-pro", "gemini-pro"]
+models_without_dynamic_pricing = ["gemini-1.0-pro", "gemini-pro", "gemini-2"]
def cost_router(
@@ -46,14 +50,15 @@ def cost_router(
call_type == "embedding" or call_type == "aembedding"
):
return "cost_per_token"
+ elif custom_llm_provider == "vertex_ai" and ("gemini-2" in model):
+ return "cost_per_token"
return "cost_per_character"
def cost_per_character(
model: str,
custom_llm_provider: str,
- prompt_tokens: float,
- completion_tokens: float,
+ usage: Usage,
prompt_characters: Optional[float] = None,
completion_characters: Optional[float] = None,
) -> Tuple[float, float]:
@@ -86,8 +91,7 @@ def cost_per_character(
prompt_cost, _ = cost_per_token(
model=model,
custom_llm_provider=custom_llm_provider,
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
+ usage=usage,
)
else:
try:
@@ -124,8 +128,7 @@ def cost_per_character(
prompt_cost, _ = cost_per_token(
model=model,
custom_llm_provider=custom_llm_provider,
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
+ usage=usage,
)
## CALCULATE OUTPUT COST
@@ -133,10 +136,10 @@ def cost_per_character(
_, completion_cost = cost_per_token(
model=model,
custom_llm_provider=custom_llm_provider,
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
+ usage=usage,
)
else:
+ completion_tokens = usage.completion_tokens
try:
if (
_is_above_128k(tokens=completion_characters * 4) # 1 token = 4 char
@@ -172,18 +175,54 @@ def cost_per_character(
_, completion_cost = cost_per_token(
model=model,
custom_llm_provider=custom_llm_provider,
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
+ usage=usage,
)
return prompt_cost, completion_cost
+def _handle_128k_pricing(
+ model_info: ModelInfo,
+ usage: Usage,
+) -> Tuple[float, float]:
+ ## CALCULATE INPUT COST
+ input_cost_per_token_above_128k_tokens = model_info.get(
+ "input_cost_per_token_above_128k_tokens"
+ )
+ output_cost_per_token_above_128k_tokens = model_info.get(
+ "output_cost_per_token_above_128k_tokens"
+ )
+
+ prompt_tokens = usage.prompt_tokens
+ completion_tokens = usage.completion_tokens
+
+ if (
+ _is_above_128k(tokens=prompt_tokens)
+ and input_cost_per_token_above_128k_tokens is not None
+ ):
+ prompt_cost = prompt_tokens * input_cost_per_token_above_128k_tokens
+ else:
+ prompt_cost = prompt_tokens * model_info["input_cost_per_token"]
+
+ ## CALCULATE OUTPUT COST
+ output_cost_per_token_above_128k_tokens = model_info.get(
+ "output_cost_per_token_above_128k_tokens"
+ )
+ if (
+ _is_above_128k(tokens=completion_tokens)
+ and output_cost_per_token_above_128k_tokens is not None
+ ):
+ completion_cost = completion_tokens * output_cost_per_token_above_128k_tokens
+ else:
+ completion_cost = completion_tokens * model_info["output_cost_per_token"]
+
+ return prompt_cost, completion_cost
+
+
def cost_per_token(
model: str,
custom_llm_provider: str,
- prompt_tokens: float,
- completion_tokens: float,
+ usage: Usage,
) -> Tuple[float, float]:
"""
Calculates the cost per token for a given model, prompt tokens, and completion tokens.
@@ -200,43 +239,30 @@ def cost_per_token(
Raises:
Exception if model requires >128k pricing, but model cost not mapped
"""
+
## GET MODEL INFO
model_info = litellm.get_model_info(
model=model, custom_llm_provider=custom_llm_provider
)
- ## CALCULATE INPUT COST
+ ## HANDLE 128k+ PRICING
+ input_cost_per_token_above_128k_tokens = model_info.get(
+ "input_cost_per_token_above_128k_tokens"
+ )
+ output_cost_per_token_above_128k_tokens = model_info.get(
+ "output_cost_per_token_above_128k_tokens"
+ )
if (
- _is_above_128k(tokens=prompt_tokens)
- and model not in models_without_dynamic_pricing
+ input_cost_per_token_above_128k_tokens is not None
+ or output_cost_per_token_above_128k_tokens is not None
):
- assert (
- "input_cost_per_token_above_128k_tokens" in model_info
- and model_info["input_cost_per_token_above_128k_tokens"] is not None
- ), "model info for model={} does not have pricing for > 128k tokens\nmodel_info={}".format(
- model, model_info
+ return _handle_128k_pricing(
+ model_info=model_info,
+ usage=usage,
)
- prompt_cost = (
- prompt_tokens * model_info["input_cost_per_token_above_128k_tokens"]
- )
- else:
- prompt_cost = prompt_tokens * model_info["input_cost_per_token"]
- ## CALCULATE OUTPUT COST
- if (
- _is_above_128k(tokens=completion_tokens)
- and model not in models_without_dynamic_pricing
- ):
- assert (
- "output_cost_per_token_above_128k_tokens" in model_info
- and model_info["output_cost_per_token_above_128k_tokens"] is not None
- ), "model info for model={} does not have pricing for > 128k tokens\nmodel_info={}".format(
- model, model_info
- )
- completion_cost = (
- completion_tokens * model_info["output_cost_per_token_above_128k_tokens"]
- )
- else:
- completion_cost = completion_tokens * model_info["output_cost_per_token"]
-
- return prompt_cost, completion_cost
+ return generic_cost_per_token(
+ model=model,
+ custom_llm_provider=custom_llm_provider,
+ usage=usage,
+ )
diff --git a/litellm/llms/vertex_ai/files/handler.py b/litellm/llms/vertex_ai/files/handler.py
index 266169cdfb..a666a2c37f 100644
--- a/litellm/llms/vertex_ai/files/handler.py
+++ b/litellm/llms/vertex_ai/files/handler.py
@@ -1,3 +1,4 @@
+import asyncio
from typing import Any, Coroutine, Optional, Union
import httpx
@@ -8,12 +9,12 @@ from litellm.integrations.gcs_bucket.gcs_bucket_base import (
GCSLoggingConfig,
)
from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
-from litellm.types.llms.openai import CreateFileRequest, FileObject
+from litellm.types.llms.openai import CreateFileRequest, OpenAIFileObject
from litellm.types.llms.vertex_ai import VERTEX_CREDENTIALS_TYPES
-from .transformation import VertexAIFilesTransformation
+from .transformation import VertexAIJsonlFilesTransformation
-vertex_ai_files_transformation = VertexAIFilesTransformation()
+vertex_ai_files_transformation = VertexAIJsonlFilesTransformation()
class VertexAIFilesHandler(GCSBucketBase):
@@ -29,8 +30,6 @@ class VertexAIFilesHandler(GCSBucketBase):
llm_provider=LlmProviders.VERTEX_AI,
)
- pass
-
async def async_create_file(
self,
create_file_data: CreateFileRequest,
@@ -40,7 +39,7 @@ class VertexAIFilesHandler(GCSBucketBase):
vertex_location: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
- ):
+ ) -> OpenAIFileObject:
gcs_logging_config: GCSLoggingConfig = await self.get_gcs_logging_config(
kwargs={}
)
@@ -49,10 +48,11 @@ class VertexAIFilesHandler(GCSBucketBase):
service_account_json=gcs_logging_config["path_service_account"],
)
bucket_name = gcs_logging_config["bucket_name"]
- logging_payload, object_name = (
- vertex_ai_files_transformation.transform_openai_file_content_to_vertex_ai_file_content(
- openai_file_content=create_file_data.get("file")
- )
+ (
+ logging_payload,
+ object_name,
+ ) = vertex_ai_files_transformation.transform_openai_file_content_to_vertex_ai_file_content(
+ openai_file_content=create_file_data.get("file")
)
gcs_upload_response = await self._log_json_data_on_gcs(
headers=headers,
@@ -76,7 +76,7 @@ class VertexAIFilesHandler(GCSBucketBase):
vertex_location: Optional[str],
timeout: Union[float, httpx.Timeout],
max_retries: Optional[int],
- ) -> Union[FileObject, Coroutine[Any, Any, FileObject]]:
+ ) -> Union[OpenAIFileObject, Coroutine[Any, Any, OpenAIFileObject]]:
"""
Creates a file on VertexAI GCS Bucket
@@ -93,5 +93,15 @@ class VertexAIFilesHandler(GCSBucketBase):
timeout=timeout,
max_retries=max_retries,
)
-
- return None # type: ignore
+ else:
+ return asyncio.run(
+ self.async_create_file(
+ create_file_data=create_file_data,
+ api_base=api_base,
+ vertex_credentials=vertex_credentials,
+ vertex_project=vertex_project,
+ vertex_location=vertex_location,
+ timeout=timeout,
+ max_retries=max_retries,
+ )
+ )
diff --git a/litellm/llms/vertex_ai/files/transformation.py b/litellm/llms/vertex_ai/files/transformation.py
index a124e20583..c795367e48 100644
--- a/litellm/llms/vertex_ai/files/transformation.py
+++ b/litellm/llms/vertex_ai/files/transformation.py
@@ -1,7 +1,17 @@
import json
+import os
+import time
import uuid
from typing import Any, Dict, List, Optional, Tuple, Union
+from httpx import Headers, Response
+
+from litellm.litellm_core_utils.prompt_templates.common_utils import extract_file_data
+from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.llms.base_llm.files.transformation import (
+ BaseFilesConfig,
+ LiteLLMLoggingObj,
+)
from litellm.llms.vertex_ai.common_utils import (
_convert_vertex_datetime_to_openai_datetime,
)
@@ -9,10 +19,318 @@ from litellm.llms.vertex_ai.gemini.transformation import _transform_request_body
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
VertexGeminiConfig,
)
-from litellm.types.llms.openai import CreateFileRequest, FileObject, FileTypes, PathLike
+from litellm.types.llms.openai import (
+ AllMessageValues,
+ CreateFileRequest,
+ FileTypes,
+ OpenAICreateFileRequestOptionalParams,
+ OpenAIFileObject,
+ PathLike,
+)
+from litellm.types.llms.vertex_ai import GcsBucketResponse
+from litellm.types.utils import ExtractedFileData, LlmProviders
+
+from ..common_utils import VertexAIError
+from ..vertex_llm_base import VertexBase
-class VertexAIFilesTransformation(VertexGeminiConfig):
+class VertexAIFilesConfig(VertexBase, BaseFilesConfig):
+ """
+ Config for VertexAI Files
+ """
+
+ def __init__(self):
+ self.jsonl_transformation = VertexAIJsonlFilesTransformation()
+ super().__init__()
+
+ @property
+ def custom_llm_provider(self) -> LlmProviders:
+ return LlmProviders.VERTEX_AI
+
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ if not api_key:
+ api_key, _ = self.get_access_token(
+ credentials=litellm_params.get("vertex_credentials"),
+ project_id=litellm_params.get("vertex_project"),
+ )
+ if not api_key:
+ raise ValueError("api_key is required")
+ headers["Authorization"] = f"Bearer {api_key}"
+ return headers
+
+ def _get_content_from_openai_file(self, openai_file_content: FileTypes) -> str:
+ """
+ Helper to extract content from various OpenAI file types and return as string.
+
+ Handles:
+ - Direct content (str, bytes, IO[bytes])
+ - Tuple formats: (filename, content, [content_type], [headers])
+ - PathLike objects
+ """
+ content: Union[str, bytes] = b""
+ # Extract file content from tuple if necessary
+ if isinstance(openai_file_content, tuple):
+ # Take the second element which is always the file content
+ file_content = openai_file_content[1]
+ else:
+ file_content = openai_file_content
+
+ # Handle different file content types
+ if isinstance(file_content, str):
+ # String content can be used directly
+ content = file_content
+ elif isinstance(file_content, bytes):
+ # Bytes content can be decoded
+ content = file_content
+ elif isinstance(file_content, PathLike): # PathLike
+ with open(str(file_content), "rb") as f:
+ content = f.read()
+ elif hasattr(file_content, "read"): # IO[bytes]
+ # File-like objects need to be read
+ content = file_content.read()
+
+ # Ensure content is string
+ if isinstance(content, bytes):
+ content = content.decode("utf-8")
+
+ return content
+
+ def _get_gcs_object_name_from_batch_jsonl(
+ self,
+ openai_jsonl_content: List[Dict[str, Any]],
+ ) -> str:
+ """
+ Gets a unique GCS object name for the VertexAI batch prediction job
+
+ named as: litellm-vertex-{model}-{uuid}
+ """
+ _model = openai_jsonl_content[0].get("body", {}).get("model", "")
+ if "publishers/google/models" not in _model:
+ _model = f"publishers/google/models/{_model}"
+ object_name = f"litellm-vertex-files/{_model}/{uuid.uuid4()}"
+ return object_name
+
+ def get_object_name(
+ self, extracted_file_data: ExtractedFileData, purpose: str
+ ) -> str:
+ """
+ Get the object name for the request
+ """
+ extracted_file_data_content = extracted_file_data.get("content")
+
+ if extracted_file_data_content is None:
+ raise ValueError("file content is required")
+
+ if purpose == "batch":
+ ## 1. If jsonl, check if there's a model name
+ file_content = self._get_content_from_openai_file(
+ extracted_file_data_content
+ )
+
+ # Split into lines and parse each line as JSON
+ openai_jsonl_content = [
+ json.loads(line) for line in file_content.splitlines() if line.strip()
+ ]
+ if len(openai_jsonl_content) > 0:
+ return self._get_gcs_object_name_from_batch_jsonl(openai_jsonl_content)
+
+ ## 2. If not jsonl, return the filename
+ filename = extracted_file_data.get("filename")
+ if filename:
+ return filename
+ ## 3. If no file name, return timestamp
+ return str(int(time.time()))
+
+ def get_complete_file_url(
+ self,
+ api_base: Optional[str],
+ api_key: Optional[str],
+ model: str,
+ optional_params: Dict,
+ litellm_params: Dict,
+ data: CreateFileRequest,
+ ) -> str:
+ """
+ Get the complete url for the request
+ """
+ bucket_name = litellm_params.get("bucket_name") or os.getenv("GCS_BUCKET_NAME")
+ if not bucket_name:
+ raise ValueError("GCS bucket_name is required")
+ file_data = data.get("file")
+ purpose = data.get("purpose")
+ if file_data is None:
+ raise ValueError("file is required")
+ if purpose is None:
+ raise ValueError("purpose is required")
+ extracted_file_data = extract_file_data(file_data)
+ object_name = self.get_object_name(extracted_file_data, purpose)
+ endpoint = (
+ f"upload/storage/v1/b/{bucket_name}/o?uploadType=media&name={object_name}"
+ )
+ api_base = api_base or "https://storage.googleapis.com"
+ if not api_base:
+ raise ValueError("api_base is required")
+
+ return f"{api_base}/{endpoint}"
+
+ def get_supported_openai_params(
+ self, model: str
+ ) -> List[OpenAICreateFileRequestOptionalParams]:
+ return []
+
+ def map_openai_params(
+ self,
+ non_default_params: dict,
+ optional_params: dict,
+ model: str,
+ drop_params: bool,
+ ) -> dict:
+ return optional_params
+
+ def _map_openai_to_vertex_params(
+ self,
+ openai_request_body: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """
+ wrapper to call VertexGeminiConfig.map_openai_params
+ """
+ from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
+ VertexGeminiConfig,
+ )
+
+ config = VertexGeminiConfig()
+ _model = openai_request_body.get("model", "")
+ vertex_params = config.map_openai_params(
+ model=_model,
+ non_default_params=openai_request_body,
+ optional_params={},
+ drop_params=False,
+ )
+ return vertex_params
+
+ def _transform_openai_jsonl_content_to_vertex_ai_jsonl_content(
+ self, openai_jsonl_content: List[Dict[str, Any]]
+ ) -> List[Dict[str, Any]]:
+ """
+ Transforms OpenAI JSONL content to VertexAI JSONL content
+
+ jsonl body for vertex is {"request": }
+ Example Vertex jsonl
+ {"request":{"contents": [{"role": "user", "parts": [{"text": "What is the relation between the following video and image samples?"}, {"fileData": {"fileUri": "gs://cloud-samples-data/generative-ai/video/animals.mp4", "mimeType": "video/mp4"}}, {"fileData": {"fileUri": "gs://cloud-samples-data/generative-ai/image/cricket.jpeg", "mimeType": "image/jpeg"}}]}]}}
+ {"request":{"contents": [{"role": "user", "parts": [{"text": "Describe what is happening in this video."}, {"fileData": {"fileUri": "gs://cloud-samples-data/generative-ai/video/another_video.mov", "mimeType": "video/mov"}}]}]}}
+ """
+
+ vertex_jsonl_content = []
+ for _openai_jsonl_content in openai_jsonl_content:
+ openai_request_body = _openai_jsonl_content.get("body") or {}
+ vertex_request_body = _transform_request_body(
+ messages=openai_request_body.get("messages", []),
+ model=openai_request_body.get("model", ""),
+ optional_params=self._map_openai_to_vertex_params(openai_request_body),
+ custom_llm_provider="vertex_ai",
+ litellm_params={},
+ cached_content=None,
+ )
+ vertex_jsonl_content.append({"request": vertex_request_body})
+ return vertex_jsonl_content
+
+ def transform_create_file_request(
+ self,
+ model: str,
+ create_file_data: CreateFileRequest,
+ optional_params: dict,
+ litellm_params: dict,
+ ) -> Union[bytes, str, dict]:
+ """
+ 2 Cases:
+ 1. Handle basic file upload
+ 2. Handle batch file upload (.jsonl)
+ """
+ file_data = create_file_data.get("file")
+ if file_data is None:
+ raise ValueError("file is required")
+ extracted_file_data = extract_file_data(file_data)
+ extracted_file_data_content = extracted_file_data.get("content")
+ if (
+ create_file_data.get("purpose") == "batch"
+ and extracted_file_data.get("content_type") == "application/jsonl"
+ and extracted_file_data_content is not None
+ ):
+ ## 1. If jsonl, check if there's a model name
+ file_content = self._get_content_from_openai_file(
+ extracted_file_data_content
+ )
+
+ # Split into lines and parse each line as JSON
+ openai_jsonl_content = [
+ json.loads(line) for line in file_content.splitlines() if line.strip()
+ ]
+ vertex_jsonl_content = (
+ self._transform_openai_jsonl_content_to_vertex_ai_jsonl_content(
+ openai_jsonl_content
+ )
+ )
+ return json.dumps(vertex_jsonl_content)
+ elif isinstance(extracted_file_data_content, bytes):
+ return extracted_file_data_content
+ else:
+ raise ValueError("Unsupported file content type")
+
+ def transform_create_file_response(
+ self,
+ model: Optional[str],
+ raw_response: Response,
+ logging_obj: LiteLLMLoggingObj,
+ litellm_params: dict,
+ ) -> OpenAIFileObject:
+ """
+ Transform VertexAI File upload response into OpenAI-style FileObject
+ """
+ response_json = raw_response.json()
+
+ try:
+ response_object = GcsBucketResponse(**response_json) # type: ignore
+ except Exception as e:
+ raise VertexAIError(
+ status_code=raw_response.status_code,
+ message=f"Error reading GCS bucket response: {e}",
+ headers=raw_response.headers,
+ )
+
+ gcs_id = response_object.get("id", "")
+ # Remove the last numeric ID from the path
+ gcs_id = "/".join(gcs_id.split("/")[:-1]) if gcs_id else ""
+
+ return OpenAIFileObject(
+ purpose=response_object.get("purpose", "batch"),
+ id=f"gs://{gcs_id}",
+ filename=response_object.get("name", ""),
+ created_at=_convert_vertex_datetime_to_openai_datetime(
+ vertex_datetime=response_object.get("timeCreated", "")
+ ),
+ status="uploaded",
+ bytes=int(response_object.get("size", 0)),
+ object="file",
+ )
+
+ def get_error_class(
+ self, error_message: str, status_code: int, headers: Union[Dict, Headers]
+ ) -> BaseLLMException:
+ return VertexAIError(
+ status_code=status_code, message=error_message, headers=headers
+ )
+
+
+class VertexAIJsonlFilesTransformation(VertexGeminiConfig):
"""
Transforms OpenAI /v1/files/* requests to VertexAI /v1/files/* requests
"""
@@ -142,7 +460,7 @@ class VertexAIFilesTransformation(VertexGeminiConfig):
def transform_gcs_bucket_response_to_openai_file_object(
self, create_file_data: CreateFileRequest, gcs_upload_response: Dict[str, Any]
- ) -> FileObject:
+ ) -> OpenAIFileObject:
"""
Transforms GCS Bucket upload file response to OpenAI FileObject
"""
@@ -150,7 +468,7 @@ class VertexAIFilesTransformation(VertexGeminiConfig):
# Remove the last numeric ID from the path
gcs_id = "/".join(gcs_id.split("/")[:-1]) if gcs_id else ""
- return FileObject(
+ return OpenAIFileObject(
purpose=create_file_data.get("purpose", "batch"),
id=f"gs://{gcs_id}",
filename=gcs_upload_response.get("name", ""),
diff --git a/litellm/llms/vertex_ai/fine_tuning/handler.py b/litellm/llms/vertex_ai/fine_tuning/handler.py
index 3cf409c78e..7ea8527fd4 100644
--- a/litellm/llms/vertex_ai/fine_tuning/handler.py
+++ b/litellm/llms/vertex_ai/fine_tuning/handler.py
@@ -36,7 +36,6 @@ class VertexFineTuningAPI(VertexLLM):
def convert_response_created_at(self, response: ResponseTuningJob):
try:
-
create_time_str = response.get("createTime", "") or ""
create_time_datetime = datetime.fromisoformat(
create_time_str.replace("Z", "+00:00")
@@ -65,9 +64,9 @@ class VertexFineTuningAPI(VertexLLM):
)
if create_fine_tuning_job_data.validation_file:
- supervised_tuning_spec["validation_dataset"] = (
- create_fine_tuning_job_data.validation_file
- )
+ supervised_tuning_spec[
+ "validation_dataset"
+ ] = create_fine_tuning_job_data.validation_file
_vertex_hyperparameters = (
self._transform_openai_hyperparameters_to_vertex_hyperparameters(
@@ -175,7 +174,6 @@ class VertexFineTuningAPI(VertexLLM):
headers: dict,
request_data: FineTuneJobCreate,
):
-
try:
verbose_logger.debug(
"about to create fine tuning job: %s, request_data: %s",
@@ -229,7 +227,6 @@ class VertexFineTuningAPI(VertexLLM):
kwargs: Optional[dict] = None,
original_hyperparameters: Optional[dict] = {},
):
-
verbose_logger.debug(
"creating fine tuning job, args= %s", create_fine_tuning_job_data
)
@@ -346,9 +343,9 @@ class VertexFineTuningAPI(VertexLLM):
elif "cachedContents" in request_route:
_model = request_data.get("model")
if _model is not None and "/publishers/google/models/" not in _model:
- request_data["model"] = (
- f"projects/{vertex_project}/locations/{vertex_location}/publishers/google/models/{_model}"
- )
+ request_data[
+ "model"
+ ] = f"projects/{vertex_project}/locations/{vertex_location}/publishers/google/models/{_model}"
url = f"https://{vertex_location}-aiplatform.googleapis.com/v1beta1/projects/{vertex_project}/locations/{vertex_location}{request_route}"
else:
diff --git a/litellm/llms/vertex_ai/gemini/transformation.py b/litellm/llms/vertex_ai/gemini/transformation.py
index d6bafc7c60..e50954b8f9 100644
--- a/litellm/llms/vertex_ai/gemini/transformation.py
+++ b/litellm/llms/vertex_ai/gemini/transformation.py
@@ -12,6 +12,9 @@ from pydantic import BaseModel
import litellm
from litellm._logging import verbose_logger
+from litellm.litellm_core_utils.prompt_templates.common_utils import (
+ _get_image_mime_type_from_url,
+)
from litellm.litellm_core_utils.prompt_templates.factory import (
convert_to_anthropic_image_obj,
convert_to_gemini_tool_call_invoke,
@@ -27,6 +30,8 @@ from litellm.types.files import (
from litellm.types.llms.openai import (
AllMessageValues,
ChatCompletionAssistantMessage,
+ ChatCompletionAudioObject,
+ ChatCompletionFileObject,
ChatCompletionImageObject,
ChatCompletionTextObject,
)
@@ -85,7 +90,6 @@ def _process_gemini_image(image_url: str, format: Optional[str] = None) -> PartT
and (image_type := format or _get_image_mime_type_from_url(image_url))
is not None
):
-
file_data = FileDataType(file_uri=image_url, mime_type=image_type)
return PartType(file_data=file_data)
elif "http://" in image_url or "https://" in image_url or "base64" in image_url:
@@ -98,33 +102,6 @@ def _process_gemini_image(image_url: str, format: Optional[str] = None) -> PartT
raise e
-def _get_image_mime_type_from_url(url: str) -> Optional[str]:
- """
- Get mime type for common image URLs
- See gemini mime types: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/image-understanding#image-requirements
-
- Supported by Gemini:
- - PNG (`image/png`)
- - JPEG (`image/jpeg`)
- - WebP (`image/webp`)
- Example:
- url = https://example.com/image.jpg
- Returns: image/jpeg
- """
- url = url.lower()
- if url.endswith((".jpg", ".jpeg")):
- return "image/jpeg"
- elif url.endswith(".png"):
- return "image/png"
- elif url.endswith(".webp"):
- return "image/webp"
- elif url.endswith(".mp4"):
- return "video/mp4"
- elif url.endswith(".pdf"):
- return "application/pdf"
- return None
-
-
def _gemini_convert_messages_with_history( # noqa: PLR0915
messages: List[AllMessageValues],
) -> List[ContentType]:
@@ -153,7 +130,7 @@ def _gemini_convert_messages_with_history( # noqa: PLR0915
_message_content = messages[msg_i].get("content")
if _message_content is not None and isinstance(_message_content, list):
_parts: List[PartType] = []
- for element in _message_content:
+ for element_idx, element in enumerate(_message_content):
if (
element["type"] == "text"
and "text" in element
@@ -175,6 +152,35 @@ def _gemini_convert_messages_with_history( # noqa: PLR0915
image_url=image_url, format=format
)
_parts.append(_part)
+ elif element["type"] == "input_audio":
+ audio_element = cast(ChatCompletionAudioObject, element)
+ if audio_element["input_audio"].get("data") is not None:
+ _part = _process_gemini_image(
+ image_url=audio_element["input_audio"]["data"],
+ format=audio_element["input_audio"].get("format"),
+ )
+ _parts.append(_part)
+ elif element["type"] == "file":
+ file_element = cast(ChatCompletionFileObject, element)
+ file_id = file_element["file"].get("file_id")
+ format = file_element["file"].get("format")
+ file_data = file_element["file"].get("file_data")
+ passed_file = file_id or file_data
+ if passed_file is None:
+ raise Exception(
+ "Unknown file type. Please pass in a file_id or file_data"
+ )
+ try:
+ _part = _process_gemini_image(
+ image_url=passed_file, format=format
+ )
+ _parts.append(_part)
+ except Exception:
+ raise Exception(
+ "Unable to determine mime type for file_id: {}, set this explicitly using message[{}].content[{}].file.format".format(
+ file_id, msg_i, element_idx
+ )
+ )
user_content.extend(_parts)
elif (
_message_content is not None
@@ -210,6 +216,11 @@ def _gemini_convert_messages_with_history( # noqa: PLR0915
msg_dict = messages[msg_i] # type: ignore
assistant_msg = ChatCompletionAssistantMessage(**msg_dict) # type: ignore
_message_content = assistant_msg.get("content", None)
+ reasoning_content = assistant_msg.get("reasoning_content", None)
+ if reasoning_content is not None:
+ assistant_content.append(
+ PartType(thought=True, text=reasoning_content)
+ )
if _message_content is not None and isinstance(_message_content, list):
_parts = []
for element in _message_content:
@@ -217,6 +228,7 @@ def _gemini_convert_messages_with_history( # noqa: PLR0915
if element["type"] == "text":
_part = PartType(text=element["text"])
_parts.append(_part)
+
assistant_content.extend(_parts)
elif (
_message_content is not None
@@ -414,18 +426,19 @@ async def async_transform_request_body(
context_caching_endpoints = ContextCachingEndpoints()
if gemini_api_key is not None:
- messages, cached_content = (
- await context_caching_endpoints.async_check_and_create_cache(
- messages=messages,
- api_key=gemini_api_key,
- api_base=api_base,
- model=model,
- client=client,
- timeout=timeout,
- extra_headers=extra_headers,
- cached_content=optional_params.pop("cached_content", None),
- logging_obj=logging_obj,
- )
+ (
+ messages,
+ cached_content,
+ ) = await context_caching_endpoints.async_check_and_create_cache(
+ messages=messages,
+ api_key=gemini_api_key,
+ api_base=api_base,
+ model=model,
+ client=client,
+ timeout=timeout,
+ extra_headers=extra_headers,
+ cached_content=optional_params.pop("cached_content", None),
+ logging_obj=logging_obj,
)
else: # [TODO] implement context caching for gemini as well
cached_content = optional_params.pop("cached_content", None)
diff --git a/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py b/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py
index 9ac1b1ffc4..9ea1c2ee12 100644
--- a/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py
+++ b/litellm/llms/vertex_ai/gemini/vertex_and_google_ai_studio_gemini.py
@@ -24,6 +24,11 @@ import litellm
import litellm.litellm_core_utils
import litellm.litellm_core_utils.litellm_logging
from litellm import verbose_logger
+from litellm.constants import (
+ DEFAULT_REASONING_EFFORT_HIGH_THINKING_BUDGET,
+ DEFAULT_REASONING_EFFORT_LOW_THINKING_BUDGET,
+ DEFAULT_REASONING_EFFORT_MEDIUM_THINKING_BUDGET,
+)
from litellm.litellm_core_utils.core_helpers import map_finish_reason
from litellm.llms.base_llm.chat.transformation import BaseConfig, BaseLLMException
from litellm.llms.custom_httpx.http_handler import (
@@ -31,6 +36,7 @@ from litellm.llms.custom_httpx.http_handler import (
HTTPHandler,
get_async_httpx_client,
)
+from litellm.types.llms.anthropic import AnthropicThinkingParam
from litellm.types.llms.openai import (
AllMessageValues,
ChatCompletionResponseMessage,
@@ -45,11 +51,13 @@ from litellm.types.llms.vertex_ai import (
ContentType,
FunctionCallingConfig,
FunctionDeclaration,
+ GeminiThinkingConfig,
GenerateContentResponseBody,
HttpxPartType,
LogprobsResult,
ToolConfig,
Tools,
+ UsageMetadata,
)
from litellm.types.utils import (
ChatCompletionTokenLogprob,
@@ -59,7 +67,7 @@ from litellm.types.utils import (
TopLogprob,
Usage,
)
-from litellm.utils import CustomStreamWrapper, ModelResponse
+from litellm.utils import CustomStreamWrapper, ModelResponse, supports_reasoning
from ....utils import _remove_additional_properties, _remove_strict_from_schema
from ..common_utils import VertexAIError, _build_vertex_schema
@@ -190,7 +198,7 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
return super().get_config()
def get_supported_openai_params(self, model: str) -> List[str]:
- return [
+ supported_params = [
"temperature",
"top_p",
"max_tokens",
@@ -207,7 +215,13 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
"extra_headers",
"seed",
"logprobs",
+ "top_logprobs",
+ "modalities",
]
+ if supports_reasoning(model):
+ supported_params.append("reasoning_effort")
+ supported_params.append("thinking")
+ return supported_params
def map_tool_choice_values(
self, model: str, tool_choice: Union[str, dict]
@@ -238,6 +252,7 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
gtool_func_declarations = []
googleSearch: Optional[dict] = None
googleSearchRetrieval: Optional[dict] = None
+ enterpriseWebSearch: Optional[dict] = None
code_execution: Optional[dict] = None
# remove 'additionalProperties' from tools
value = _remove_additional_properties(value)
@@ -245,9 +260,9 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
value = _remove_strict_from_schema(value)
for tool in value:
- openai_function_object: Optional[ChatCompletionToolParamFunctionChunk] = (
- None
- )
+ openai_function_object: Optional[
+ ChatCompletionToolParamFunctionChunk
+ ] = None
if "function" in tool: # tools list
_openai_function_object = ChatCompletionToolParamFunctionChunk( # type: ignore
**tool["function"]
@@ -271,6 +286,8 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
googleSearch = tool["googleSearch"]
elif tool.get("googleSearchRetrieval", None) is not None:
googleSearchRetrieval = tool["googleSearchRetrieval"]
+ elif tool.get("enterpriseWebSearch", None) is not None:
+ enterpriseWebSearch = tool["enterpriseWebSearch"]
elif tool.get("code_execution", None) is not None:
code_execution = tool["code_execution"]
elif openai_function_object is not None:
@@ -297,6 +314,8 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
_tools["googleSearch"] = googleSearch
if googleSearchRetrieval is not None:
_tools["googleSearchRetrieval"] = googleSearchRetrieval
+ if enterpriseWebSearch is not None:
+ _tools["enterpriseWebSearch"] = enterpriseWebSearch
if code_execution is not None:
_tools["code_execution"] = code_execution
return [_tools]
@@ -306,11 +325,77 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
if isinstance(old_schema, list):
for item in old_schema:
if isinstance(item, dict):
- item = _build_vertex_schema(parameters=item)
+ item = _build_vertex_schema(
+ parameters=item, add_property_ordering=True
+ )
+
elif isinstance(old_schema, dict):
- old_schema = _build_vertex_schema(parameters=old_schema)
+ old_schema = _build_vertex_schema(
+ parameters=old_schema, add_property_ordering=True
+ )
return old_schema
+ def apply_response_schema_transformation(self, value: dict, optional_params: dict):
+ # remove 'additionalProperties' from json schema
+ value = _remove_additional_properties(value)
+ # remove 'strict' from json schema
+ value = _remove_strict_from_schema(value)
+ if value["type"] == "json_object":
+ optional_params["response_mime_type"] = "application/json"
+ elif value["type"] == "text":
+ optional_params["response_mime_type"] = "text/plain"
+ if "response_schema" in value:
+ optional_params["response_mime_type"] = "application/json"
+ optional_params["response_schema"] = value["response_schema"]
+ elif value["type"] == "json_schema": # type: ignore
+ if "json_schema" in value and "schema" in value["json_schema"]: # type: ignore
+ optional_params["response_mime_type"] = "application/json"
+ optional_params["response_schema"] = value["json_schema"]["schema"] # type: ignore
+
+ if "response_schema" in optional_params and isinstance(
+ optional_params["response_schema"], dict
+ ):
+ optional_params["response_schema"] = self._map_response_schema(
+ value=optional_params["response_schema"]
+ )
+
+ @staticmethod
+ def _map_reasoning_effort_to_thinking_budget(
+ reasoning_effort: str,
+ ) -> GeminiThinkingConfig:
+ if reasoning_effort == "low":
+ return {
+ "thinkingBudget": DEFAULT_REASONING_EFFORT_LOW_THINKING_BUDGET,
+ "includeThoughts": True,
+ }
+ elif reasoning_effort == "medium":
+ return {
+ "thinkingBudget": DEFAULT_REASONING_EFFORT_MEDIUM_THINKING_BUDGET,
+ "includeThoughts": True,
+ }
+ elif reasoning_effort == "high":
+ return {
+ "thinkingBudget": DEFAULT_REASONING_EFFORT_HIGH_THINKING_BUDGET,
+ "includeThoughts": True,
+ }
+ else:
+ raise ValueError(f"Invalid reasoning effort: {reasoning_effort}")
+
+ @staticmethod
+ def _map_thinking_param(
+ thinking_param: AnthropicThinkingParam,
+ ) -> GeminiThinkingConfig:
+ thinking_enabled = thinking_param.get("type") == "enabled"
+ thinking_budget = thinking_param.get("budget_tokens")
+
+ params: GeminiThinkingConfig = {}
+ if thinking_enabled:
+ params["includeThoughts"] = True
+ if thinking_budget is not None and isinstance(thinking_budget, int):
+ params["thinkingBudget"] = thinking_budget
+
+ return params
+
def map_openai_params(
self,
non_default_params: Dict,
@@ -321,56 +406,43 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
for param, value in non_default_params.items():
if param == "temperature":
optional_params["temperature"] = value
- if param == "top_p":
+ elif param == "top_p":
optional_params["top_p"] = value
- if (
+ elif (
param == "stream" and value is True
): # sending stream = False, can cause it to get passed unchecked and raise issues
optional_params["stream"] = value
- if param == "n":
+ elif param == "n":
optional_params["candidate_count"] = value
- if param == "stop":
+ elif param == "stop":
if isinstance(value, str):
optional_params["stop_sequences"] = [value]
elif isinstance(value, list):
optional_params["stop_sequences"] = value
- if param == "max_tokens" or param == "max_completion_tokens":
+ elif param == "max_tokens" or param == "max_completion_tokens":
optional_params["max_output_tokens"] = value
- if param == "response_format" and isinstance(value, dict): # type: ignore
- # remove 'additionalProperties' from json schema
- value = _remove_additional_properties(value)
- # remove 'strict' from json schema
- value = _remove_strict_from_schema(value)
- if value["type"] == "json_object":
- optional_params["response_mime_type"] = "application/json"
- elif value["type"] == "text":
- optional_params["response_mime_type"] = "text/plain"
- if "response_schema" in value:
- optional_params["response_mime_type"] = "application/json"
- optional_params["response_schema"] = value["response_schema"]
- elif value["type"] == "json_schema": # type: ignore
- if "json_schema" in value and "schema" in value["json_schema"]: # type: ignore
- optional_params["response_mime_type"] = "application/json"
- optional_params["response_schema"] = value["json_schema"]["schema"] # type: ignore
-
- if "response_schema" in optional_params and isinstance(
- optional_params["response_schema"], dict
- ):
- optional_params["response_schema"] = self._map_response_schema(
- value=optional_params["response_schema"]
- )
- if param == "frequency_penalty":
+ elif param == "response_format" and isinstance(value, dict): # type: ignore
+ self.apply_response_schema_transformation(
+ value=value, optional_params=optional_params
+ )
+ elif param == "frequency_penalty":
optional_params["frequency_penalty"] = value
- if param == "presence_penalty":
+ elif param == "presence_penalty":
optional_params["presence_penalty"] = value
- if param == "logprobs":
+ elif param == "logprobs":
optional_params["responseLogprobs"] = value
- if (param == "tools" or param == "functions") and isinstance(value, list):
+ elif param == "top_logprobs":
+ optional_params["logprobs"] = value
+ elif (
+ (param == "tools" or param == "functions")
+ and isinstance(value, list)
+ and value
+ ):
optional_params["tools"] = self._map_function(value=value)
optional_params["litellm_param_is_function_call"] = (
True if param == "functions" else False
)
- if param == "tool_choice" and (
+ elif param == "tool_choice" and (
isinstance(value, str) or isinstance(value, dict)
):
_tool_choice_value = self.map_tool_choice_values(
@@ -378,8 +450,28 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
)
if _tool_choice_value is not None:
optional_params["tool_choice"] = _tool_choice_value
- if param == "seed":
+ elif param == "seed":
optional_params["seed"] = value
+ elif param == "reasoning_effort" and isinstance(value, str):
+ optional_params[
+ "thinkingConfig"
+ ] = VertexGeminiConfig._map_reasoning_effort_to_thinking_budget(value)
+ elif param == "thinking":
+ optional_params[
+ "thinkingConfig"
+ ] = VertexGeminiConfig._map_thinking_param(
+ cast(AnthropicThinkingParam, value)
+ )
+ elif param == "modalities" and isinstance(value, list):
+ response_modalities = []
+ for modality in value:
+ if modality == "text":
+ response_modalities.append("TEXT")
+ elif modality == "image":
+ response_modalities.append("IMAGE")
+ else:
+ response_modalities.append("MODALITY_UNSPECIFIED")
+ optional_params["responseModalities"] = response_modalities
if litellm.vertex_ai_safety_settings is not None:
optional_params["safety_settings"] = litellm.vertex_ai_safety_settings
@@ -416,6 +508,49 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
"europe-west9",
]
+ @staticmethod
+ def get_model_for_vertex_ai_url(model: str) -> str:
+ """
+ Returns the model name to use in the request to Vertex AI
+
+ Handles 2 cases:
+ 1. User passed `model="vertex_ai/gemini/ft-uuid"`, we need to return `ft-uuid` for the request to Vertex AI
+ 2. User passed `model="vertex_ai/gemini-2.0-flash-001"`, we need to return `gemini-2.0-flash-001` for the request to Vertex AI
+
+ Args:
+ model (str): The model name to use in the request to Vertex AI
+
+ Returns:
+ str: The model name to use in the request to Vertex AI
+ """
+ if VertexGeminiConfig._is_model_gemini_spec_model(model):
+ return VertexGeminiConfig._get_model_name_from_gemini_spec_model(model)
+ return model
+
+ @staticmethod
+ def _is_model_gemini_spec_model(model: Optional[str]) -> bool:
+ """
+ Returns true if user is trying to call custom model in `/gemini` request/response format
+ """
+ if model is None:
+ return False
+ if "gemini/" in model:
+ return True
+ return False
+
+ @staticmethod
+ def _get_model_name_from_gemini_spec_model(model: str) -> str:
+ """
+ Returns the model name if model="vertex_ai/gemini/"
+
+ Example:
+ - model = "gemini/1234567890"
+ - returns "1234567890"
+ """
+ if "gemini/" in model:
+ return model.split("/")[-1]
+ return model
+
def get_flagged_finish_reasons(self) -> Dict[str, str]:
"""
Return Dictionary of finish reasons which indicate response was flagged
@@ -442,14 +577,28 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
def get_assistant_content_message(
self, parts: List[HttpxPartType]
- ) -> Optional[str]:
- _content_str = ""
+ ) -> Tuple[Optional[str], Optional[str]]:
+ content_str: Optional[str] = None
+ reasoning_content_str: Optional[str] = None
for part in parts:
+ _content_str = ""
if "text" in part:
_content_str += part["text"]
- if _content_str:
- return _content_str
- return None
+ elif "inlineData" in part: # base64 encoded image
+ _content_str += "data:{};base64,{}".format(
+ part["inlineData"]["mimeType"], part["inlineData"]["data"]
+ )
+ if len(_content_str) > 0:
+ if part.get("thought") is True:
+ if reasoning_content_str is None:
+ reasoning_content_str = ""
+ reasoning_content_str += _content_str
+ else:
+ if content_str is None:
+ content_str = ""
+ content_str += _content_str
+
+ return content_str, reasoning_content_str
def _transform_parts(
self,
@@ -592,35 +741,141 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
return model_response
+ def is_candidate_token_count_inclusive(self, usage_metadata: UsageMetadata) -> bool:
+ """
+ Check if the candidate token count is inclusive of the thinking token count
+
+ if prompttokencount + candidatesTokenCount == totalTokenCount, then the candidate token count is inclusive of the thinking token count
+
+ else the candidate token count is exclusive of the thinking token count
+
+ Addresses - https://github.com/BerriAI/litellm/pull/10141#discussion_r2052272035
+ """
+ if usage_metadata.get("promptTokenCount", 0) + usage_metadata.get(
+ "candidatesTokenCount", 0
+ ) == usage_metadata.get("totalTokenCount", 0):
+ return True
+ else:
+ return False
+
def _calculate_usage(
self,
completion_response: GenerateContentResponseBody,
) -> Usage:
cached_tokens: Optional[int] = None
+ audio_tokens: Optional[int] = None
+ text_tokens: Optional[int] = None
prompt_tokens_details: Optional[PromptTokensDetailsWrapper] = None
+ reasoning_tokens: Optional[int] = None
if "cachedContentTokenCount" in completion_response["usageMetadata"]:
cached_tokens = completion_response["usageMetadata"][
"cachedContentTokenCount"
]
+ if "promptTokensDetails" in completion_response["usageMetadata"]:
+ for detail in completion_response["usageMetadata"]["promptTokensDetails"]:
+ if detail["modality"] == "AUDIO":
+ audio_tokens = detail["tokenCount"]
+ elif detail["modality"] == "TEXT":
+ text_tokens = detail["tokenCount"]
+ if "thoughtsTokenCount" in completion_response["usageMetadata"]:
+ reasoning_tokens = completion_response["usageMetadata"][
+ "thoughtsTokenCount"
+ ]
+ prompt_tokens_details = PromptTokensDetailsWrapper(
+ cached_tokens=cached_tokens,
+ audio_tokens=audio_tokens,
+ text_tokens=text_tokens,
+ )
- if cached_tokens is not None:
- prompt_tokens_details = PromptTokensDetailsWrapper(
- cached_tokens=cached_tokens,
+ completion_tokens = completion_response["usageMetadata"].get(
+ "candidatesTokenCount", 0
+ )
+ if (
+ not self.is_candidate_token_count_inclusive(
+ completion_response["usageMetadata"]
)
+ and reasoning_tokens
+ ):
+ completion_tokens = reasoning_tokens + completion_tokens
## GET USAGE ##
usage = Usage(
prompt_tokens=completion_response["usageMetadata"].get(
"promptTokenCount", 0
),
- completion_tokens=completion_response["usageMetadata"].get(
- "candidatesTokenCount", 0
- ),
+ completion_tokens=completion_tokens,
total_tokens=completion_response["usageMetadata"].get("totalTokenCount", 0),
prompt_tokens_details=prompt_tokens_details,
+ reasoning_tokens=reasoning_tokens,
)
return usage
+ def _process_candidates(self, _candidates, model_response, litellm_params):
+ """Helper method to process candidates and extract metadata"""
+ grounding_metadata: List[dict] = []
+ safety_ratings: List = []
+ citation_metadata: List = []
+ chat_completion_message: ChatCompletionResponseMessage = {"role": "assistant"}
+ chat_completion_logprobs: Optional[ChoiceLogprobs] = None
+ tools: Optional[List[ChatCompletionToolCallChunk]] = []
+ functions: Optional[ChatCompletionToolCallFunctionChunk] = None
+
+ for idx, candidate in enumerate(_candidates):
+ if "content" not in candidate:
+ continue
+
+ if "groundingMetadata" in candidate:
+ grounding_metadata.append(candidate["groundingMetadata"]) # type: ignore
+
+ if "safetyRatings" in candidate:
+ safety_ratings.append(candidate["safetyRatings"])
+
+ if "citationMetadata" in candidate:
+ citation_metadata.append(candidate["citationMetadata"])
+
+ if "parts" in candidate["content"]:
+ (
+ content,
+ reasoning_content,
+ ) = VertexGeminiConfig().get_assistant_content_message(
+ parts=candidate["content"]["parts"]
+ )
+ if content is not None:
+ chat_completion_message["content"] = content
+ if reasoning_content is not None:
+ chat_completion_message["reasoning_content"] = reasoning_content
+
+ functions, tools = self._transform_parts(
+ parts=candidate["content"]["parts"],
+ index=candidate.get("index", idx),
+ is_function_call=litellm_params.get(
+ "litellm_param_is_function_call"
+ ),
+ )
+
+ if "logprobsResult" in candidate:
+ chat_completion_logprobs = self._transform_logprobs(
+ logprobs_result=candidate["logprobsResult"]
+ )
+
+ if tools:
+ chat_completion_message["tool_calls"] = tools
+
+ if functions is not None:
+ chat_completion_message["function_call"] = functions
+
+ choice = litellm.Choices(
+ finish_reason=candidate.get("finishReason", "stop"),
+ index=candidate.get("index", idx),
+ message=chat_completion_message, # type: ignore
+ logprobs=chat_completion_logprobs,
+ enhancements=None,
+ )
+
+ model_response.choices.append(choice)
+
+ return grounding_metadata, safety_ratings, citation_metadata
+
def transform_response(
self,
model: str,
@@ -682,90 +937,38 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
completion_response=completion_response,
)
- model_response.choices = [] # type: ignore
+ model_response.choices = []
try:
- ## CHECK IF GROUNDING METADATA IN REQUEST
- grounding_metadata: List[dict] = []
- safety_ratings: List = []
- citation_metadata: List = []
- ## GET TEXT ##
- chat_completion_message: ChatCompletionResponseMessage = {
- "role": "assistant"
- }
- chat_completion_logprobs: Optional[ChoiceLogprobs] = None
- tools: Optional[List[ChatCompletionToolCallChunk]] = []
- functions: Optional[ChatCompletionToolCallFunctionChunk] = None
+ grounding_metadata, safety_ratings, citation_metadata = [], [], []
if _candidates:
- for idx, candidate in enumerate(_candidates):
- if "content" not in candidate:
- continue
-
- if "groundingMetadata" in candidate:
- grounding_metadata.append(candidate["groundingMetadata"]) # type: ignore
-
- if "safetyRatings" in candidate:
- safety_ratings.append(candidate["safetyRatings"])
-
- if "citationMetadata" in candidate:
- citation_metadata.append(candidate["citationMetadata"])
- if "parts" in candidate["content"]:
- chat_completion_message[
- "content"
- ] = VertexGeminiConfig().get_assistant_content_message(
- parts=candidate["content"]["parts"]
- )
-
- functions, tools = self._transform_parts(
- parts=candidate["content"]["parts"],
- index=candidate.get("index", idx),
- is_function_call=litellm_params.get(
- "litellm_param_is_function_call"
- ),
- )
-
- if "logprobsResult" in candidate:
- chat_completion_logprobs = self._transform_logprobs(
- logprobs_result=candidate["logprobsResult"]
- )
-
- if tools:
- chat_completion_message["tool_calls"] = tools
-
- if functions is not None:
- chat_completion_message["function_call"] = functions
- choice = litellm.Choices(
- finish_reason=candidate.get("finishReason", "stop"),
- index=candidate.get("index", idx),
- message=chat_completion_message, # type: ignore
- logprobs=chat_completion_logprobs,
- enhancements=None,
- )
-
- model_response.choices.append(choice)
+ (
+ grounding_metadata,
+ safety_ratings,
+ citation_metadata,
+ ) = self._process_candidates(
+ _candidates, model_response, litellm_params
+ )
usage = self._calculate_usage(completion_response=completion_response)
setattr(model_response, "usage", usage)
- ## ADD GROUNDING METADATA ##
+ ## ADD METADATA TO RESPONSE ##
setattr(model_response, "vertex_ai_grounding_metadata", grounding_metadata)
model_response._hidden_params[
"vertex_ai_grounding_metadata"
- ] = ( # older approach - maintaining to prevent regressions
- grounding_metadata
- )
+ ] = grounding_metadata
- ## ADD SAFETY RATINGS ##
setattr(model_response, "vertex_ai_safety_results", safety_ratings)
- model_response._hidden_params["vertex_ai_safety_results"] = (
- safety_ratings # older approach - maintaining to prevent regressions
- )
+ model_response._hidden_params[
+ "vertex_ai_safety_results"
+ ] = safety_ratings # older approach - maintaining to prevent regressions
## ADD CITATION METADATA ##
setattr(model_response, "vertex_ai_citation_metadata", citation_metadata)
- model_response._hidden_params["vertex_ai_citation_metadata"] = (
- citation_metadata # older approach - maintaining to prevent regressions
- )
+ model_response._hidden_params[
+ "vertex_ai_citation_metadata"
+ ] = citation_metadata # older approach - maintaining to prevent regressions
except Exception as e:
raise VertexAIError(
@@ -808,6 +1011,7 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
model: str,
messages: List[AllMessageValues],
optional_params: Dict,
+ litellm_params: Dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> Dict:
@@ -925,7 +1129,7 @@ class VertexLLM(VertexBase):
logging_obj,
stream,
optional_params: dict,
- litellm_params=None,
+ litellm_params: dict,
logger_fn=None,
api_base: Optional[str] = None,
client: Optional[AsyncHTTPHandler] = None,
@@ -966,6 +1170,7 @@ class VertexLLM(VertexBase):
model=model,
messages=messages,
optional_params=optional_params,
+ litellm_params=litellm_params,
)
## LOGGING
@@ -973,7 +1178,7 @@ class VertexLLM(VertexBase):
input=messages,
api_key="",
additional_args={
- "complete_input_dict": data,
+ "complete_input_dict": request_body,
"api_base": api_base,
"headers": headers,
},
@@ -1052,6 +1257,7 @@ class VertexLLM(VertexBase):
model=model,
messages=messages,
optional_params=optional_params,
+ litellm_params=litellm_params,
)
request_body = await async_transform_request_body(**data) # type: ignore
@@ -1225,6 +1431,7 @@ class VertexLLM(VertexBase):
model=model,
messages=messages,
optional_params=optional_params,
+ litellm_params=litellm_params,
)
## TRANSFORMATION ##
diff --git a/litellm/llms/vertex_ai/gemini_embeddings/batch_embed_content_handler.py b/litellm/llms/vertex_ai/gemini_embeddings/batch_embed_content_handler.py
index 0fe5145a14..ecfe2ee8b4 100644
--- a/litellm/llms/vertex_ai/gemini_embeddings/batch_embed_content_handler.py
+++ b/litellm/llms/vertex_ai/gemini_embeddings/batch_embed_content_handler.py
@@ -47,7 +47,6 @@ class GoogleBatchEmbeddings(VertexLLM):
timeout=300,
client=None,
) -> EmbeddingResponse:
-
_auth_header, vertex_project = self._ensure_access_token(
credentials=vertex_credentials,
project_id=vertex_project,
diff --git a/litellm/llms/vertex_ai/gemini_embeddings/batch_embed_content_transformation.py b/litellm/llms/vertex_ai/gemini_embeddings/batch_embed_content_transformation.py
index 592dac5846..2c0f5dad22 100644
--- a/litellm/llms/vertex_ai/gemini_embeddings/batch_embed_content_transformation.py
+++ b/litellm/llms/vertex_ai/gemini_embeddings/batch_embed_content_transformation.py
@@ -52,7 +52,6 @@ def process_response(
model: str,
_predictions: VertexAIBatchEmbeddingsResponseObject,
) -> EmbeddingResponse:
-
openai_embeddings: List[Embedding] = []
for embedding in _predictions["embeddings"]:
openai_embedding = Embedding(
diff --git a/litellm/llms/vertex_ai/image_generation/image_generation_handler.py b/litellm/llms/vertex_ai/image_generation/image_generation_handler.py
index 1d5322c08d..e83f4b6f03 100644
--- a/litellm/llms/vertex_ai/image_generation/image_generation_handler.py
+++ b/litellm/llms/vertex_ai/image_generation/image_generation_handler.py
@@ -43,22 +43,23 @@ class VertexImageGeneration(VertexLLM):
def image_generation(
self,
prompt: str,
+ api_base: Optional[str],
vertex_project: Optional[str],
vertex_location: Optional[str],
vertex_credentials: Optional[VERTEX_CREDENTIALS_TYPES],
model_response: ImageResponse,
logging_obj: Any,
- model: Optional[
- str
- ] = "imagegeneration", # vertex ai uses imagegeneration as the default model
+ model: str = "imagegeneration", # vertex ai uses imagegeneration as the default model
client: Optional[Any] = None,
optional_params: Optional[dict] = None,
timeout: Optional[int] = None,
aimg_generation=False,
+ extra_headers: Optional[dict] = None,
) -> ImageResponse:
if aimg_generation is True:
return self.aimage_generation( # type: ignore
prompt=prompt,
+ api_base=api_base,
vertex_project=vertex_project,
vertex_location=vertex_location,
vertex_credentials=vertex_credentials,
@@ -83,13 +84,27 @@ class VertexImageGeneration(VertexLLM):
else:
sync_handler = client # type: ignore
- url = f"https://{vertex_location}-aiplatform.googleapis.com/v1/projects/{vertex_project}/locations/{vertex_location}/publishers/google/models/{model}:predict"
+ # url = f"https://{vertex_location}-aiplatform.googleapis.com/v1/projects/{vertex_project}/locations/{vertex_location}/publishers/google/models/{model}:predict"
+ auth_header: Optional[str] = None
auth_header, _ = self._ensure_access_token(
credentials=vertex_credentials,
project_id=vertex_project,
custom_llm_provider="vertex_ai",
)
+ auth_header, api_base = self._get_token_and_url(
+ model=model,
+ gemini_api_key=None,
+ auth_header=auth_header,
+ vertex_project=vertex_project,
+ vertex_location=vertex_location,
+ vertex_credentials=vertex_credentials,
+ stream=False,
+ custom_llm_provider="vertex_ai",
+ api_base=api_base,
+ should_use_v1beta1_features=False,
+ mode="image_generation",
+ )
optional_params = optional_params or {
"sampleCount": 1
} # default optional params
@@ -99,31 +114,21 @@ class VertexImageGeneration(VertexLLM):
"parameters": optional_params,
}
- request_str = f"\n curl -X POST \\\n -H \"Authorization: Bearer {auth_header[:10] + 'XXXXXXXXXX'}\" \\\n -H \"Content-Type: application/json; charset=utf-8\" \\\n -d {request_data} \\\n \"{url}\""
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
+ headers = self.set_headers(auth_header=auth_header, extra_headers=extra_headers)
logging_obj.pre_call(
input=prompt,
- api_key=None,
+ api_key="",
additional_args={
"complete_input_dict": optional_params,
- "request_str": request_str,
+ "api_base": api_base,
+ "headers": headers,
},
)
response = sync_handler.post(
- url=url,
- headers={
- "Content-Type": "application/json; charset=utf-8",
- "Authorization": f"Bearer {auth_header}",
- },
+ url=api_base,
+ headers=headers,
data=json.dumps(request_data),
)
@@ -138,17 +143,17 @@ class VertexImageGeneration(VertexLLM):
async def aimage_generation(
self,
prompt: str,
+ api_base: Optional[str],
vertex_project: Optional[str],
vertex_location: Optional[str],
vertex_credentials: Optional[VERTEX_CREDENTIALS_TYPES],
model_response: litellm.ImageResponse,
logging_obj: Any,
- model: Optional[
- str
- ] = "imagegeneration", # vertex ai uses imagegeneration as the default model
+ model: str = "imagegeneration", # vertex ai uses imagegeneration as the default model
client: Optional[AsyncHTTPHandler] = None,
optional_params: Optional[dict] = None,
timeout: Optional[int] = None,
+ extra_headers: Optional[dict] = None,
):
response = None
if client is None:
@@ -169,7 +174,6 @@ class VertexImageGeneration(VertexLLM):
# make POST request to
# https://us-central1-aiplatform.googleapis.com/v1/projects/PROJECT_ID/locations/us-central1/publishers/google/models/imagegeneration:predict
- url = f"https://{vertex_location}-aiplatform.googleapis.com/v1/projects/{vertex_project}/locations/{vertex_location}/publishers/google/models/{model}:predict"
"""
Docs link: https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/imagegeneration?project=adroit-crow-413218
@@ -188,11 +192,25 @@ class VertexImageGeneration(VertexLLM):
} \
"https://us-central1-aiplatform.googleapis.com/v1/projects/PROJECT_ID/locations/us-central1/publishers/google/models/imagegeneration:predict"
"""
+ auth_header: Optional[str] = None
auth_header, _ = self._ensure_access_token(
credentials=vertex_credentials,
project_id=vertex_project,
custom_llm_provider="vertex_ai",
)
+ auth_header, api_base = self._get_token_and_url(
+ model=model,
+ gemini_api_key=None,
+ auth_header=auth_header,
+ vertex_project=vertex_project,
+ vertex_location=vertex_location,
+ vertex_credentials=vertex_credentials,
+ stream=False,
+ custom_llm_provider="vertex_ai",
+ api_base=api_base,
+ should_use_v1beta1_features=False,
+ mode="image_generation",
+ )
optional_params = optional_params or {
"sampleCount": 1
} # default optional params
@@ -202,22 +220,21 @@ class VertexImageGeneration(VertexLLM):
"parameters": optional_params,
}
- request_str = f"\n curl -X POST \\\n -H \"Authorization: Bearer {auth_header[:10] + 'XXXXXXXXXX'}\" \\\n -H \"Content-Type: application/json; charset=utf-8\" \\\n -d {request_data} \\\n \"{url}\""
+ headers = self.set_headers(auth_header=auth_header, extra_headers=extra_headers)
+
logging_obj.pre_call(
input=prompt,
- api_key=None,
+ api_key="",
additional_args={
"complete_input_dict": optional_params,
- "request_str": request_str,
+ "api_base": api_base,
+ "headers": headers,
},
)
response = await self.async_handler.post(
- url=url,
- headers={
- "Content-Type": "application/json; charset=utf-8",
- "Authorization": f"Bearer {auth_header}",
- },
+ url=api_base,
+ headers=headers,
data=json.dumps(request_data),
)
diff --git a/litellm/llms/vertex_ai/multimodal_embeddings/embedding_handler.py b/litellm/llms/vertex_ai/multimodal_embeddings/embedding_handler.py
index f63d1ce11e..8aebd83cc4 100644
--- a/litellm/llms/vertex_ai/multimodal_embeddings/embedding_handler.py
+++ b/litellm/llms/vertex_ai/multimodal_embeddings/embedding_handler.py
@@ -1,5 +1,5 @@
import json
-from typing import List, Literal, Optional, Union
+from typing import Literal, Optional, Union
import httpx
@@ -14,15 +14,11 @@ from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
VertexAIError,
VertexLLM,
)
-from litellm.types.llms.vertex_ai import (
- Instance,
- InstanceImage,
- InstanceVideo,
- MultimodalPredictions,
- VertexMultimodalEmbeddingRequest,
-)
-from litellm.types.utils import Embedding, EmbeddingResponse
-from litellm.utils import is_base64_encoded
+from litellm.types.utils import EmbeddingResponse
+
+from .transformation import VertexAIMultimodalEmbeddingConfig
+
+vertex_multimodal_embedding_handler = VertexAIMultimodalEmbeddingConfig()
class VertexMultimodalEmbedding(VertexLLM):
@@ -41,9 +37,11 @@ class VertexMultimodalEmbedding(VertexLLM):
model_response: EmbeddingResponse,
custom_llm_provider: Literal["gemini", "vertex_ai"],
optional_params: dict,
+ litellm_params: dict,
logging_obj: LiteLLMLoggingObj,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
+ headers: dict = {},
encoding=None,
vertex_project=None,
vertex_location=None,
@@ -52,7 +50,6 @@ class VertexMultimodalEmbedding(VertexLLM):
timeout=300,
client=None,
) -> EmbeddingResponse:
-
_auth_header, vertex_project = self._ensure_access_token(
credentials=vertex_credentials,
project_id=vertex_project,
@@ -86,31 +83,19 @@ class VertexMultimodalEmbedding(VertexLLM):
else:
sync_handler = client # type: ignore
- optional_params = optional_params or {}
+ request_data = vertex_multimodal_embedding_handler.transform_embedding_request(
+ model, input, optional_params, headers
+ )
- request_data = VertexMultimodalEmbeddingRequest()
-
- if "instances" in optional_params:
- request_data["instances"] = optional_params["instances"]
- elif isinstance(input, list):
- vertex_instances: List[Instance] = self.process_openai_embedding_input(
- _input=input
- )
- request_data["instances"] = vertex_instances
-
- else:
- # construct instances
- vertex_request_instance = Instance(**optional_params)
-
- if isinstance(input, str):
- vertex_request_instance = self._process_input_element(input)
-
- request_data["instances"] = [vertex_request_instance]
-
- headers = {
- "Content-Type": "application/json; charset=utf-8",
- "Authorization": f"Bearer {auth_header}",
- }
+ headers = vertex_multimodal_embedding_handler.validate_environment(
+ headers=headers,
+ model=model,
+ messages=[],
+ optional_params=optional_params,
+ api_key=auth_header,
+ api_base=api_base,
+ litellm_params=litellm_params,
+ )
## LOGGING
logging_obj.pre_call(
@@ -132,6 +117,10 @@ class VertexMultimodalEmbedding(VertexLLM):
headers=headers,
client=client,
model_response=model_response,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
+ logging_obj=logging_obj,
+ api_key=api_key,
)
response = sync_handler.post(
@@ -140,34 +129,30 @@ class VertexMultimodalEmbedding(VertexLLM):
data=json.dumps(request_data),
)
- if response.status_code != 200:
- raise Exception(f"Error: {response.status_code} {response.text}")
-
- _json_response = response.json()
- if "predictions" not in _json_response:
- raise litellm.InternalServerError(
- message=f"embedding response does not contain 'predictions', got {_json_response}",
- llm_provider="vertex_ai",
- model=model,
- )
- _predictions = _json_response["predictions"]
- vertex_predictions = MultimodalPredictions(predictions=_predictions)
- model_response.data = self.transform_embedding_response_to_openai(
- predictions=vertex_predictions
+ return vertex_multimodal_embedding_handler.transform_embedding_response(
+ model=model,
+ raw_response=response,
+ model_response=model_response,
+ logging_obj=logging_obj,
+ api_key=api_key,
+ request_data=request_data,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
)
- model_response.model = model
-
- return model_response
async def async_multimodal_embedding(
self,
model: str,
api_base: str,
- data: VertexMultimodalEmbeddingRequest,
+ optional_params: dict,
+ litellm_params: dict,
+ data: dict,
model_response: litellm.EmbeddingResponse,
timeout: Optional[Union[float, httpx.Timeout]],
+ logging_obj: LiteLLMLoggingObj,
headers={},
client: Optional[AsyncHTTPHandler] = None,
+ api_key: Optional[str] = None,
) -> litellm.EmbeddingResponse:
if client is None:
_params = {}
@@ -191,104 +176,13 @@ class VertexMultimodalEmbedding(VertexLLM):
except httpx.TimeoutException:
raise VertexAIError(status_code=408, message="Timeout error occurred.")
- _json_response = response.json()
- if "predictions" not in _json_response:
- raise litellm.InternalServerError(
- message=f"embedding response does not contain 'predictions', got {_json_response}",
- llm_provider="vertex_ai",
- model=model,
- )
- _predictions = _json_response["predictions"]
-
- vertex_predictions = MultimodalPredictions(predictions=_predictions)
- model_response.data = self.transform_embedding_response_to_openai(
- predictions=vertex_predictions
+ return vertex_multimodal_embedding_handler.transform_embedding_response(
+ model=model,
+ raw_response=response,
+ model_response=model_response,
+ logging_obj=logging_obj,
+ api_key=api_key,
+ request_data=data,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
)
- model_response.model = model
-
- return model_response
-
- def _process_input_element(self, input_element: str) -> Instance:
- """
- Process the input element for multimodal embedding requests. checks if the if the input is gcs uri, base64 encoded image or plain text.
-
- Args:
- input_element (str): The input element to process.
-
- Returns:
- Dict[str, Any]: A dictionary representing the processed input element.
- """
- if len(input_element) == 0:
- return Instance(text=input_element)
- elif "gs://" in input_element:
- if "mp4" in input_element:
- return Instance(video=InstanceVideo(gcsUri=input_element))
- else:
- return Instance(image=InstanceImage(gcsUri=input_element))
- elif is_base64_encoded(s=input_element):
- return Instance(image=InstanceImage(bytesBase64Encoded=input_element))
- else:
- return Instance(text=input_element)
-
- def process_openai_embedding_input(
- self, _input: Union[list, str]
- ) -> List[Instance]:
- """
- Process the input for multimodal embedding requests.
-
- Args:
- _input (Union[list, str]): The input data to process.
-
- Returns:
- List[Instance]: A list of processed VertexAI Instance objects.
- """
-
- _input_list = None
- if not isinstance(_input, list):
- _input_list = [_input]
- else:
- _input_list = _input
-
- processed_instances = []
- for element in _input_list:
- if isinstance(element, str):
- instance = Instance(**self._process_input_element(element))
- elif isinstance(element, dict):
- instance = Instance(**element)
- else:
- raise ValueError(f"Unsupported input type: {type(element)}")
- processed_instances.append(instance)
-
- return processed_instances
-
- def transform_embedding_response_to_openai(
- self, predictions: MultimodalPredictions
- ) -> List[Embedding]:
-
- openai_embeddings: List[Embedding] = []
- if "predictions" in predictions:
- for idx, _prediction in enumerate(predictions["predictions"]):
- if _prediction:
- if "textEmbedding" in _prediction:
- openai_embedding_object = Embedding(
- embedding=_prediction["textEmbedding"],
- index=idx,
- object="embedding",
- )
- openai_embeddings.append(openai_embedding_object)
- elif "imageEmbedding" in _prediction:
- openai_embedding_object = Embedding(
- embedding=_prediction["imageEmbedding"],
- index=idx,
- object="embedding",
- )
- openai_embeddings.append(openai_embedding_object)
- elif "videoEmbeddings" in _prediction:
- for video_embedding in _prediction["videoEmbeddings"]:
- openai_embedding_object = Embedding(
- embedding=video_embedding["embedding"],
- index=idx,
- object="embedding",
- )
- openai_embeddings.append(openai_embedding_object)
- return openai_embeddings
diff --git a/litellm/llms/vertex_ai/multimodal_embeddings/transformation.py b/litellm/llms/vertex_ai/multimodal_embeddings/transformation.py
new file mode 100644
index 0000000000..5bf02ad765
--- /dev/null
+++ b/litellm/llms/vertex_ai/multimodal_embeddings/transformation.py
@@ -0,0 +1,297 @@
+from typing import List, Optional, Union, cast
+
+from httpx import Headers, Response
+
+from litellm.exceptions import InternalServerError
+from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.llms.base_llm.embedding.transformation import LiteLLMLoggingObj
+from litellm.types.llms.openai import AllEmbeddingInputValues, AllMessageValues
+from litellm.types.llms.vertex_ai import (
+ Instance,
+ InstanceImage,
+ InstanceVideo,
+ MultimodalPredictions,
+ VertexMultimodalEmbeddingRequest,
+)
+from litellm.types.utils import (
+ Embedding,
+ EmbeddingResponse,
+ PromptTokensDetailsWrapper,
+ Usage,
+)
+from litellm.utils import _count_characters, is_base64_encoded
+
+from ...base_llm.embedding.transformation import BaseEmbeddingConfig
+from ..common_utils import VertexAIError
+
+
+class VertexAIMultimodalEmbeddingConfig(BaseEmbeddingConfig):
+ def get_supported_openai_params(self, model: str) -> list:
+ return ["dimensions"]
+
+ def map_openai_params(
+ self,
+ non_default_params: dict,
+ optional_params: dict,
+ model: str,
+ drop_params: bool,
+ ) -> dict:
+ for param, value in non_default_params.items():
+ if param == "dimensions":
+ optional_params["outputDimensionality"] = value
+ return optional_params
+
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ default_headers = {
+ "Content-Type": "application/json; charset=utf-8",
+ "Authorization": f"Bearer {api_key}",
+ }
+ headers.update(default_headers)
+ return headers
+
+ def _process_input_element(self, input_element: str) -> Instance:
+ """
+ Process the input element for multimodal embedding requests. checks if the if the input is gcs uri, base64 encoded image or plain text.
+
+ Args:
+ input_element (str): The input element to process.
+
+ Returns:
+ Dict[str, Any]: A dictionary representing the processed input element.
+ """
+ if len(input_element) == 0:
+ return Instance(text=input_element)
+ elif "gs://" in input_element:
+ if "mp4" in input_element:
+ return Instance(video=InstanceVideo(gcsUri=input_element))
+ else:
+ return Instance(image=InstanceImage(gcsUri=input_element))
+ elif is_base64_encoded(s=input_element):
+ return Instance(
+ image=InstanceImage(
+ bytesBase64Encoded=(
+ input_element.split(",")[1]
+ if "," in input_element
+ else input_element
+ )
+ )
+ )
+ else:
+ return Instance(text=input_element)
+
+ def process_openai_embedding_input(
+ self, _input: Union[list, str]
+ ) -> List[Instance]:
+ """
+ Process the input for multimodal embedding requests.
+
+ Args:
+ _input (Union[list, str]): The input data to process.
+
+ Returns:
+ Union[Instance, List[Instance]]: Either a single Instance or list of Instance objects.
+ """
+ _input_list = [_input] if not isinstance(_input, list) else _input
+ processed_instances = []
+
+ i = 0
+ while i < len(_input_list):
+ current = _input_list[i]
+
+ # Look ahead for potential media elements
+ next_elem = _input_list[i + 1] if i + 1 < len(_input_list) else None
+
+ # If current is a text and next is a GCS URI, or current is a GCS URI
+ if isinstance(current, str):
+ instance_args: Instance = {}
+
+ # Process current element
+ if "gs://" not in current:
+ instance_args["text"] = current
+ elif "mp4" in current:
+ instance_args["video"] = InstanceVideo(gcsUri=current)
+ else:
+ instance_args["image"] = InstanceImage(gcsUri=current)
+
+ # Check next element if it's a GCS URI
+ if next_elem and isinstance(next_elem, str) and "gs://" in next_elem:
+ if "mp4" in next_elem:
+ instance_args["video"] = InstanceVideo(gcsUri=next_elem)
+ else:
+ instance_args["image"] = InstanceImage(gcsUri=next_elem)
+ i += 2 # Skip next element since we processed it
+ else:
+ i += 1 # Move to next element
+
+ processed_instances.append(instance_args)
+ continue
+
+ # Handle dict or other types
+ if isinstance(current, dict):
+ instance = Instance(**current)
+ processed_instances.append(instance)
+ else:
+ raise ValueError(f"Unsupported input type: {type(current)}")
+ i += 1
+
+ return processed_instances
+
+ def transform_embedding_request(
+ self,
+ model: str,
+ input: AllEmbeddingInputValues,
+ optional_params: dict,
+ headers: dict,
+ ) -> dict:
+ optional_params = optional_params or {}
+
+ request_data = VertexMultimodalEmbeddingRequest(instances=[])
+
+ if "instances" in optional_params:
+ request_data["instances"] = optional_params["instances"]
+ elif isinstance(input, list):
+ vertex_instances: List[Instance] = self.process_openai_embedding_input(
+ _input=input
+ )
+ request_data["instances"] = vertex_instances
+
+ else:
+ # construct instances
+ vertex_request_instance = Instance(**optional_params)
+
+ if isinstance(input, str):
+ vertex_request_instance = self._process_input_element(input)
+
+ request_data["instances"] = [vertex_request_instance]
+
+ return cast(dict, request_data)
+
+ def transform_embedding_response(
+ self,
+ model: str,
+ raw_response: Response,
+ model_response: EmbeddingResponse,
+ logging_obj: LiteLLMLoggingObj,
+ api_key: Optional[str],
+ request_data: dict,
+ optional_params: dict,
+ litellm_params: dict,
+ ) -> EmbeddingResponse:
+ if raw_response.status_code != 200:
+ raise Exception(f"Error: {raw_response.status_code} {raw_response.text}")
+
+ _json_response = raw_response.json()
+ if "predictions" not in _json_response:
+ raise InternalServerError(
+ message=f"embedding response does not contain 'predictions', got {_json_response}",
+ llm_provider="vertex_ai",
+ model=model,
+ )
+ _predictions = _json_response["predictions"]
+ vertex_predictions = MultimodalPredictions(predictions=_predictions)
+ model_response.data = self.transform_embedding_response_to_openai(
+ predictions=vertex_predictions
+ )
+ model_response.model = model
+
+ model_response.usage = self.calculate_usage(
+ request_data=cast(VertexMultimodalEmbeddingRequest, request_data),
+ vertex_predictions=vertex_predictions,
+ )
+
+ return model_response
+
+ def calculate_usage(
+ self,
+ request_data: VertexMultimodalEmbeddingRequest,
+ vertex_predictions: MultimodalPredictions,
+ ) -> Usage:
+ ## Calculate text embeddings usage
+ prompt: Optional[str] = None
+ character_count: Optional[int] = None
+
+ for instance in request_data["instances"]:
+ text = instance.get("text")
+ if text:
+ if prompt is None:
+ prompt = text
+ else:
+ prompt += text
+
+ if prompt is not None:
+ character_count = _count_characters(prompt)
+
+ ## Calculate image embeddings usage
+ image_count = 0
+ for instance in request_data["instances"]:
+ if instance.get("image"):
+ image_count += 1
+
+ ## Calculate video embeddings usage
+ video_length_seconds = 0
+ for prediction in vertex_predictions["predictions"]:
+ video_embeddings = prediction.get("videoEmbeddings")
+ if video_embeddings:
+ for embedding in video_embeddings:
+ duration = embedding["endOffsetSec"] - embedding["startOffsetSec"]
+ video_length_seconds += duration
+
+ prompt_tokens_details = PromptTokensDetailsWrapper(
+ character_count=character_count,
+ image_count=image_count,
+ video_length_seconds=video_length_seconds,
+ )
+
+ return Usage(
+ prompt_tokens=0,
+ completion_tokens=0,
+ total_tokens=0,
+ prompt_tokens_details=prompt_tokens_details,
+ )
+
+ def transform_embedding_response_to_openai(
+ self, predictions: MultimodalPredictions
+ ) -> List[Embedding]:
+ openai_embeddings: List[Embedding] = []
+ if "predictions" in predictions:
+ for idx, _prediction in enumerate(predictions["predictions"]):
+ if _prediction:
+ if "textEmbedding" in _prediction:
+ openai_embedding_object = Embedding(
+ embedding=_prediction["textEmbedding"],
+ index=idx,
+ object="embedding",
+ )
+ openai_embeddings.append(openai_embedding_object)
+ elif "imageEmbedding" in _prediction:
+ openai_embedding_object = Embedding(
+ embedding=_prediction["imageEmbedding"],
+ index=idx,
+ object="embedding",
+ )
+ openai_embeddings.append(openai_embedding_object)
+ elif "videoEmbeddings" in _prediction:
+ for video_embedding in _prediction["videoEmbeddings"]:
+ openai_embedding_object = Embedding(
+ embedding=video_embedding["embedding"],
+ index=idx,
+ object="embedding",
+ )
+ openai_embeddings.append(openai_embedding_object)
+ return openai_embeddings
+
+ def get_error_class(
+ self, error_message: str, status_code: int, headers: Union[dict, Headers]
+ ) -> BaseLLMException:
+ return VertexAIError(
+ status_code=status_code, message=error_message, headers=headers
+ )
diff --git a/litellm/llms/vertex_ai/vertex_ai_non_gemini.py b/litellm/llms/vertex_ai/vertex_ai_non_gemini.py
index 744e1eb317..df267d9623 100644
--- a/litellm/llms/vertex_ai/vertex_ai_non_gemini.py
+++ b/litellm/llms/vertex_ai/vertex_ai_non_gemini.py
@@ -323,7 +323,6 @@ def completion( # noqa: PLR0915
)
completion_response = chat.send_message(prompt, **optional_params).text
elif mode == "text":
-
if fake_stream is not True and stream is True:
request_str += (
f"llm_model.predict_streaming({prompt}, **{optional_params})\n"
@@ -506,7 +505,6 @@ async def async_completion( # noqa: PLR0915
Add support for acompletion calls for gemini-pro
"""
try:
-
response_obj = None
completion_response = None
if mode == "chat":
diff --git a/litellm/llms/vertex_ai/vertex_ai_partner_models/ai21/transformation.py b/litellm/llms/vertex_ai/vertex_ai_partner_models/ai21/transformation.py
index d87b2e0311..8ffc00cc95 100644
--- a/litellm/llms/vertex_ai/vertex_ai_partner_models/ai21/transformation.py
+++ b/litellm/llms/vertex_ai/vertex_ai_partner_models/ai21/transformation.py
@@ -2,9 +2,10 @@ import types
from typing import Optional
import litellm
+from litellm.llms.openai.chat.gpt_transformation import OpenAIGPTConfig
-class VertexAIAi21Config:
+class VertexAIAi21Config(OpenAIGPTConfig):
"""
Reference: https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/ai21
@@ -40,9 +41,6 @@ class VertexAIAi21Config:
and v is not None
}
- def get_supported_openai_params(self):
- return litellm.OpenAIConfig().get_supported_openai_params(model="gpt-3.5-turbo")
-
def map_openai_params(
self,
non_default_params: dict,
diff --git a/litellm/llms/vertex_ai/vertex_ai_partner_models/main.py b/litellm/llms/vertex_ai/vertex_ai_partner_models/main.py
index fb2393631b..b8d2658f80 100644
--- a/litellm/llms/vertex_ai/vertex_ai_partner_models/main.py
+++ b/litellm/llms/vertex_ai/vertex_ai_partner_models/main.py
@@ -110,7 +110,6 @@ class VertexAIPartnerModels(VertexBase):
message="""Upgrade vertex ai. Run `pip install "google-cloud-aiplatform>=1.38"`""",
)
try:
-
vertex_httpx_logic = VertexLLM()
access_token, project_id = vertex_httpx_logic._ensure_access_token(
diff --git a/litellm/llms/vertex_ai/vertex_embeddings/embedding_handler.py b/litellm/llms/vertex_ai/vertex_embeddings/embedding_handler.py
index 3ef40703e8..1167ca285f 100644
--- a/litellm/llms/vertex_ai/vertex_embeddings/embedding_handler.py
+++ b/litellm/llms/vertex_ai/vertex_embeddings/embedding_handler.py
@@ -86,10 +86,8 @@ class VertexEmbedding(VertexBase):
mode="embedding",
)
headers = self.set_headers(auth_header=auth_header, extra_headers=extra_headers)
- vertex_request: VertexEmbeddingRequest = (
- litellm.vertexAITextEmbeddingConfig.transform_openai_request_to_vertex_embedding_request(
- input=input, optional_params=optional_params, model=model
- )
+ vertex_request: VertexEmbeddingRequest = litellm.vertexAITextEmbeddingConfig.transform_openai_request_to_vertex_embedding_request(
+ input=input, optional_params=optional_params, model=model
)
_client_params = {}
@@ -111,7 +109,7 @@ class VertexEmbedding(VertexBase):
)
try:
- response = client.post(api_base, headers=headers, json=vertex_request) # type: ignore
+ response = client.post(url=api_base, headers=headers, json=vertex_request) # type: ignore
response.raise_for_status()
except httpx.HTTPStatusError as err:
error_code = err.response.status_code
@@ -178,10 +176,8 @@ class VertexEmbedding(VertexBase):
mode="embedding",
)
headers = self.set_headers(auth_header=auth_header, extra_headers=extra_headers)
- vertex_request: VertexEmbeddingRequest = (
- litellm.vertexAITextEmbeddingConfig.transform_openai_request_to_vertex_embedding_request(
- input=input, optional_params=optional_params, model=model
- )
+ vertex_request: VertexEmbeddingRequest = litellm.vertexAITextEmbeddingConfig.transform_openai_request_to_vertex_embedding_request(
+ input=input, optional_params=optional_params, model=model
)
_async_client_params = {}
diff --git a/litellm/llms/vertex_ai/vertex_embeddings/transformation.py b/litellm/llms/vertex_ai/vertex_embeddings/transformation.py
index 0e9c073f8d..97af558041 100644
--- a/litellm/llms/vertex_ai/vertex_embeddings/transformation.py
+++ b/litellm/llms/vertex_ai/vertex_embeddings/transformation.py
@@ -79,7 +79,7 @@ class VertexAITextEmbeddingConfig(BaseModel):
):
for param, value in non_default_params.items():
if param == "dimensions":
- optional_params["output_dimensionality"] = value
+ optional_params["outputDimensionality"] = value
if "input_type" in kwargs:
optional_params["task_type"] = kwargs.pop("input_type")
@@ -212,7 +212,6 @@ class VertexAITextEmbeddingConfig(BaseModel):
embedding_response = []
input_tokens: int = 0
for idx, element in enumerate(_predictions):
-
embedding = element["embeddings"]
embedding_response.append(
{
diff --git a/litellm/llms/vertex_ai/vertex_llm_base.py b/litellm/llms/vertex_ai/vertex_llm_base.py
index 8286cb515f..8f3037c791 100644
--- a/litellm/llms/vertex_ai/vertex_llm_base.py
+++ b/litellm/llms/vertex_ai/vertex_llm_base.py
@@ -6,11 +6,10 @@ Handles Authentication and generating request urls for Vertex AI and Google AI S
import json
import os
-from typing import TYPE_CHECKING, Any, Literal, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Dict, Literal, Optional, Tuple
from litellm._logging import verbose_logger
from litellm.litellm_core_utils.asyncify import asyncify
-from litellm.llms.base import BaseLLM
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
from litellm.types.llms.vertex_ai import VERTEX_CREDENTIALS_TYPES
@@ -22,12 +21,16 @@ else:
GoogleCredentialsObject = Any
-class VertexBase(BaseLLM):
+class VertexBase:
def __init__(self) -> None:
super().__init__()
self.access_token: Optional[str] = None
self.refresh_token: Optional[str] = None
self._credentials: Optional[GoogleCredentialsObject] = None
+ self._credentials_project_mapping: Dict[
+ Tuple[Optional[VERTEX_CREDENTIALS_TYPES], Optional[str]],
+ GoogleCredentialsObject,
+ ] = {}
self.project_id: Optional[str] = None
self.async_handler: Optional[AsyncHTTPHandler] = None
@@ -128,32 +131,11 @@ class VertexBase(BaseLLM):
"""
if custom_llm_provider == "gemini":
return "", ""
- if self.access_token is not None:
- if project_id is not None:
- return self.access_token, project_id
- elif self.project_id is not None:
- return self.access_token, self.project_id
-
- if not self._credentials:
- self._credentials, cred_project_id = self.load_auth(
- credentials=credentials, project_id=project_id
- )
- if not self.project_id:
- self.project_id = project_id or cred_project_id
else:
- if self._credentials.expired or not self._credentials.token:
- self.refresh_auth(self._credentials)
-
- if not self.project_id:
- self.project_id = self._credentials.quota_project_id
-
- if not self.project_id:
- raise ValueError("Could not resolve project_id")
-
- if not self._credentials or not self._credentials.token:
- raise RuntimeError("Could not resolve API token from the environment")
-
- return self._credentials.token, project_id or self.project_id
+ return self.get_access_token(
+ credentials=credentials,
+ project_id=project_id,
+ )
def is_using_v1beta1_features(self, optional_params: dict) -> bool:
"""
@@ -259,6 +241,101 @@ class VertexBase(BaseLLM):
url=url,
)
+ def get_access_token(
+ self,
+ credentials: Optional[VERTEX_CREDENTIALS_TYPES],
+ project_id: Optional[str],
+ ) -> Tuple[str, str]:
+ """
+ Get access token and project id
+
+ 1. Check if credentials are already in self._credentials_project_mapping
+ 2. If not, load credentials and add to self._credentials_project_mapping
+ 3. Check if loaded credentials have expired
+ 4. If expired, refresh credentials
+ 5. Return access token and project id
+ """
+
+ # Convert dict credentials to string for caching
+ cache_credentials = (
+ json.dumps(credentials) if isinstance(credentials, dict) else credentials
+ )
+ credential_cache_key = (cache_credentials, project_id)
+ _credentials: Optional[GoogleCredentialsObject] = None
+
+ verbose_logger.debug(
+ f"Checking cached credentials for project_id: {project_id}"
+ )
+
+ if credential_cache_key in self._credentials_project_mapping:
+ verbose_logger.debug(
+ f"Cached credentials found for project_id: {project_id}."
+ )
+ _credentials = self._credentials_project_mapping[credential_cache_key]
+ verbose_logger.debug("Using cached credentials")
+ credential_project_id = _credentials.quota_project_id or getattr(
+ _credentials, "project_id", None
+ )
+
+ else:
+ verbose_logger.debug(
+ f"Credential cache key not found for project_id: {project_id}, loading new credentials"
+ )
+
+ try:
+ _credentials, credential_project_id = self.load_auth(
+ credentials=credentials, project_id=project_id
+ )
+ except Exception as e:
+ verbose_logger.exception(
+ "Failed to load vertex credentials. Check to see if credentials containing partial/invalid information."
+ )
+ raise e
+
+ if _credentials is None:
+ raise ValueError(
+ "Could not resolve credentials - either dynamically or from environment, for project_id: {}".format(
+ project_id
+ )
+ )
+
+ self._credentials_project_mapping[credential_cache_key] = _credentials
+
+ ## VALIDATE CREDENTIALS
+ verbose_logger.debug(f"Validating credentials for project_id: {project_id}")
+ if (
+ project_id is not None
+ and credential_project_id
+ and credential_project_id != project_id
+ ):
+ raise ValueError(
+ "Could not resolve project_id. Credential project_id: {} does not match requested project_id: {}".format(
+ _credentials.quota_project_id, project_id
+ )
+ )
+ elif (
+ project_id is None
+ and credential_project_id is not None
+ and isinstance(credential_project_id, str)
+ ):
+ project_id = credential_project_id
+
+ if _credentials.expired:
+ self.refresh_auth(_credentials)
+
+ ## VALIDATION STEP
+ if _credentials.token is None or not isinstance(_credentials.token, str):
+ raise ValueError(
+ "Could not resolve credentials token. Got None or non-string token - {}".format(
+ _credentials.token
+ )
+ )
+
+ if project_id is None:
+ raise ValueError("Could not resolve project_id")
+
+ return _credentials.token, project_id
+
async def _ensure_access_token_async(
self,
credentials: Optional[VERTEX_CREDENTIALS_TYPES],
@@ -272,38 +349,14 @@ class VertexBase(BaseLLM):
"""
if custom_llm_provider == "gemini":
return "", ""
- if self.access_token is not None:
- if project_id is not None:
- return self.access_token, project_id
- elif self.project_id is not None:
- return self.access_token, self.project_id
-
- if not self._credentials:
- try:
- self._credentials, cred_project_id = await asyncify(self.load_auth)(
- credentials=credentials, project_id=project_id
- )
- except Exception:
- verbose_logger.exception(
- "Failed to load vertex credentials. Check to see if credentials containing partial/invalid information."
- )
- raise
- if not self.project_id:
- self.project_id = project_id or cred_project_id
else:
- if self._credentials.expired or not self._credentials.token:
- await asyncify(self.refresh_auth)(self._credentials)
-
- if not self.project_id:
- self.project_id = self._credentials.quota_project_id
-
- if not self.project_id:
- raise ValueError("Could not resolve project_id")
-
- if not self._credentials or not self._credentials.token:
- raise RuntimeError("Could not resolve API token from the environment")
-
- return self._credentials.token, project_id or self.project_id
+ try:
+ return await asyncify(self.get_access_token)(
+ credentials=credentials,
+ project_id=project_id,
+ )
+ except Exception as e:
+ raise e
def set_headers(
self, auth_header: Optional[str], extra_headers: Optional[dict]
diff --git a/litellm/llms/vertex_ai/vertex_model_garden/main.py b/litellm/llms/vertex_ai/vertex_model_garden/main.py
index 7b54d4e34b..1c57096734 100644
--- a/litellm/llms/vertex_ai/vertex_model_garden/main.py
+++ b/litellm/llms/vertex_ai/vertex_model_garden/main.py
@@ -76,7 +76,6 @@ class VertexAIModelGardenModels(VertexBase):
VertexLLM,
)
except Exception as e:
-
raise VertexAIError(
status_code=400,
message=f"""vertexai import failed please run `pip install -U "google-cloud-aiplatform>=1.38"`. Got error: {e}""",
diff --git a/litellm/llms/vllm/common_utils.py b/litellm/llms/vllm/common_utils.py
new file mode 100644
index 0000000000..8dca3e1de2
--- /dev/null
+++ b/litellm/llms/vllm/common_utils.py
@@ -0,0 +1,75 @@
+from typing import List, Optional, Union
+
+import httpx
+
+import litellm
+from litellm.llms.base_llm.base_utils import BaseLLMModelInfo
+from litellm.llms.base_llm.chat.transformation import BaseLLMException
+from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import AllMessageValues
+from litellm.utils import _add_path_to_api_base
+
+
+class VLLMError(BaseLLMException):
+ pass
+
+
+class VLLMModelInfo(BaseLLMModelInfo):
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ """Google AI Studio sends api key in query params"""
+ return headers
+
+ @staticmethod
+ def get_api_base(api_base: Optional[str] = None) -> Optional[str]:
+ api_base = api_base or get_secret_str("VLLM_API_BASE")
+ if api_base is None:
+ raise ValueError(
+ "VLLM_API_BASE is not set. Please set the environment variable, to use VLLM's pass-through - `{LITELLM_API_BASE}/vllm/{endpoint}`."
+ )
+ return api_base
+
+ @staticmethod
+ def get_api_key(api_key: Optional[str] = None) -> Optional[str]:
+ return None
+
+ @staticmethod
+ def get_base_model(model: str) -> Optional[str]:
+ return model
+
+ def get_models(
+ self, api_key: Optional[str] = None, api_base: Optional[str] = None
+ ) -> List[str]:
+ api_base = VLLMModelInfo.get_api_base(api_base)
+ api_key = VLLMModelInfo.get_api_key(api_key)
+ endpoint = "/v1/models"
+ if api_base is None or api_key is None:
+ raise ValueError(
+ "GEMINI_API_BASE or GEMINI_API_KEY is not set. Please set the environment variable, to query Gemini's `/models` endpoint."
+ )
+
+ url = _add_path_to_api_base(api_base, endpoint)
+ response = litellm.module_level_client.get(
+ url=url,
+ )
+
+ response.raise_for_status()
+
+ models = response.json()["data"]
+
+ return [model["id"] for model in models]
+
+ def get_error_class(
+ self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
+ ) -> BaseLLMException:
+ return VLLMError(
+ status_code=status_code, message=error_message, headers=headers
+ )
diff --git a/litellm/llms/voyage/embedding/transformation.py b/litellm/llms/voyage/embedding/transformation.py
index 51abc9e43a..91811e0392 100644
--- a/litellm/llms/voyage/embedding/transformation.py
+++ b/litellm/llms/voyage/embedding/transformation.py
@@ -41,6 +41,7 @@ class VoyageEmbeddingConfig(BaseEmbeddingConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
@@ -82,6 +83,7 @@ class VoyageEmbeddingConfig(BaseEmbeddingConfig):
model: str,
messages: List[AllMessageValues],
optional_params: dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
@@ -132,7 +134,7 @@ class VoyageEmbeddingConfig(BaseEmbeddingConfig):
model_response.object = raw_response_json.get("object")
usage = Usage(
- prompt_tokens=raw_response_json.get("usage", {}).get("prompt_tokens", 0),
+ prompt_tokens=raw_response_json.get("usage", {}).get("total_tokens", 0),
total_tokens=raw_response_json.get("usage", {}).get("total_tokens", 0),
)
model_response.usage = usage
diff --git a/litellm/llms/watsonx/chat/handler.py b/litellm/llms/watsonx/chat/handler.py
index 8ea19d413e..45378c5529 100644
--- a/litellm/llms/watsonx/chat/handler.py
+++ b/litellm/llms/watsonx/chat/handler.py
@@ -49,6 +49,7 @@ class WatsonXChatHandler(OpenAILikeChatHandler):
messages=messages,
optional_params=optional_params,
api_key=api_key,
+ litellm_params=litellm_params,
)
## UPDATE PAYLOAD (optional params)
@@ -61,6 +62,7 @@ class WatsonXChatHandler(OpenAILikeChatHandler):
## GET API URL
api_base = watsonx_chat_transformation.get_complete_url(
api_base=api_base,
+ api_key=api_key,
model=model,
optional_params=optional_params,
litellm_params=litellm_params,
diff --git a/litellm/llms/watsonx/chat/transformation.py b/litellm/llms/watsonx/chat/transformation.py
index f253da6f5b..3c2d1c6f0b 100644
--- a/litellm/llms/watsonx/chat/transformation.py
+++ b/litellm/llms/watsonx/chat/transformation.py
@@ -15,7 +15,6 @@ from ..common_utils import IBMWatsonXMixin
class IBMWatsonXChatConfig(IBMWatsonXMixin, OpenAIGPTConfig):
-
def get_supported_openai_params(self, model: str) -> List:
return [
"temperature", # equivalent to temperature
@@ -81,6 +80,7 @@ class IBMWatsonXChatConfig(IBMWatsonXMixin, OpenAIGPTConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
diff --git a/litellm/llms/watsonx/common_utils.py b/litellm/llms/watsonx/common_utils.py
index 4916cd1c75..d6f296c608 100644
--- a/litellm/llms/watsonx/common_utils.py
+++ b/litellm/llms/watsonx/common_utils.py
@@ -38,7 +38,7 @@ def generate_iam_token(api_key=None, **params) -> str:
headers = {}
headers["Content-Type"] = "application/x-www-form-urlencoded"
if api_key is None:
- api_key = get_secret_str("WX_API_KEY") or get_secret_str("WATSONX_API_KEY")
+ api_key = get_secret_str("WX_API_KEY") or get_secret_str("WATSONX_API_KEY") or get_secret_str("WATSONX_APIKEY")
if api_key is None:
raise ValueError("API key is required")
headers["Accept"] = "application/json"
@@ -165,6 +165,7 @@ class IBMWatsonXMixin:
model: str,
messages: List[AllMessageValues],
optional_params: Dict,
+ litellm_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> Dict:
diff --git a/litellm/llms/watsonx/completion/transformation.py b/litellm/llms/watsonx/completion/transformation.py
index f414354e2a..d45704840f 100644
--- a/litellm/llms/watsonx/completion/transformation.py
+++ b/litellm/llms/watsonx/completion/transformation.py
@@ -316,6 +316,7 @@ class IBMWatsonXAIConfig(IBMWatsonXMixin, BaseConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
diff --git a/litellm/llms/watsonx/embed/transformation.py b/litellm/llms/watsonx/embed/transformation.py
index 359137ee5e..21f508da01 100644
--- a/litellm/llms/watsonx/embed/transformation.py
+++ b/litellm/llms/watsonx/embed/transformation.py
@@ -52,6 +52,7 @@ class IBMWatsonXEmbeddingConfig(IBMWatsonXMixin, BaseEmbeddingConfig):
def get_complete_url(
self,
api_base: Optional[str],
+ api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
diff --git a/litellm/llms/xai/chat/transformation.py b/litellm/llms/xai/chat/transformation.py
index 734c6eb2e0..804abe30f0 100644
--- a/litellm/llms/xai/chat/transformation.py
+++ b/litellm/llms/xai/chat/transformation.py
@@ -1,6 +1,12 @@
-from typing import Optional, Tuple
+from typing import List, Optional, Tuple
+import litellm
+from litellm._logging import verbose_logger
+from litellm.litellm_core_utils.prompt_templates.common_utils import (
+ strip_name_from_messages,
+)
from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import AllMessageValues
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
@@ -8,6 +14,10 @@ XAI_API_BASE = "https://api.x.ai/v1"
class XAIChatConfig(OpenAIGPTConfig):
+ @property
+ def custom_llm_provider(self) -> Optional[str]:
+ return "xai"
+
def _get_openai_compatible_provider_info(
self, api_base: Optional[str], api_key: Optional[str]
) -> Tuple[Optional[str], Optional[str]]:
@@ -16,7 +26,7 @@ class XAIChatConfig(OpenAIGPTConfig):
return api_base, dynamic_api_key
def get_supported_openai_params(self, model: str) -> list:
- return [
+ base_openai_params = [
"frequency_penalty",
"logit_bias",
"logprobs",
@@ -35,6 +45,15 @@ class XAIChatConfig(OpenAIGPTConfig):
"top_p",
"user",
]
+ try:
+ if litellm.supports_reasoning(
+ model=model, custom_llm_provider=self.custom_llm_provider
+ ):
+ base_openai_params.append("reasoning_effort")
+ except Exception as e:
+ verbose_logger.debug(f"Error checking if model supports reasoning: {e}")
+
+ return base_openai_params
def map_openai_params(
self,
@@ -51,3 +70,21 @@ class XAIChatConfig(OpenAIGPTConfig):
if value is not None:
optional_params[param] = value
return optional_params
+
+ def transform_request(
+ self,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ headers: dict,
+ ) -> dict:
+ """
+ Handle https://github.com/BerriAI/litellm/issues/9720
+
+ Filter out 'name' from messages
+ """
+ messages = strip_name_from_messages(messages)
+ return super().transform_request(
+ model, messages, optional_params, litellm_params, headers
+ )
diff --git a/litellm/llms/xai/common_utils.py b/litellm/llms/xai/common_utils.py
new file mode 100644
index 0000000000..a26dc1e043
--- /dev/null
+++ b/litellm/llms/xai/common_utils.py
@@ -0,0 +1,71 @@
+from typing import List, Optional
+
+import httpx
+
+import litellm
+from litellm.llms.base_llm.base_utils import BaseLLMModelInfo
+from litellm.secret_managers.main import get_secret_str
+from litellm.types.llms.openai import AllMessageValues
+
+
+class XAIModelInfo(BaseLLMModelInfo):
+ def validate_environment(
+ self,
+ headers: dict,
+ model: str,
+ messages: List[AllMessageValues],
+ optional_params: dict,
+ litellm_params: dict,
+ api_key: Optional[str] = None,
+ api_base: Optional[str] = None,
+ ) -> dict:
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+
+ # Ensure Content-Type is set to application/json
+ if "content-type" not in headers and "Content-Type" not in headers:
+ headers["Content-Type"] = "application/json"
+
+ return headers
+
+ @staticmethod
+ def get_api_base(api_base: Optional[str] = None) -> Optional[str]:
+ return api_base or get_secret_str("XAI_API_BASE") or "https://api.x.ai"
+
+ @staticmethod
+ def get_api_key(api_key: Optional[str] = None) -> Optional[str]:
+ return api_key or get_secret_str("XAI_API_KEY")
+
+ @staticmethod
+ def get_base_model(model: str) -> Optional[str]:
+ return model.replace("xai/", "")
+
+ def get_models(
+ self, api_key: Optional[str] = None, api_base: Optional[str] = None
+ ) -> List[str]:
+ api_base = self.get_api_base(api_base)
+ api_key = self.get_api_key(api_key)
+ if api_base is None or api_key is None:
+ raise ValueError(
+ "XAI_API_BASE or XAI_API_KEY is not set. Please set the environment variable, to query XAI's `/models` endpoint."
+ )
+ response = litellm.module_level_client.get(
+ url=f"{api_base}/v1/models",
+ headers={"Authorization": f"Bearer {api_key}"},
+ )
+
+ try:
+ response.raise_for_status()
+ except httpx.HTTPStatusError:
+ raise Exception(
+ f"Failed to fetch models from XAI. Status code: {response.status_code}, Response: {response.text}"
+ )
+
+ models = response.json()["data"]
+
+ litellm_model_names = []
+ for model in models:
+ stripped_model_name = model["id"]
+ litellm_model_name = "xai/" + stripped_model_name
+ litellm_model_names.append(litellm_model_name)
+ return litellm_model_names
diff --git a/litellm/main.py b/litellm/main.py
index 95ccbb8452..6c464d0d95 100644
--- a/litellm/main.py
+++ b/litellm/main.py
@@ -25,6 +25,7 @@ from functools import partial
from typing import (
Any,
Callable,
+ Coroutine,
Dict,
List,
Literal,
@@ -50,6 +51,10 @@ from litellm import ( # type: ignore
get_litellm_params,
get_optional_params,
)
+from litellm.constants import (
+ DEFAULT_MOCK_RESPONSE_COMPLETION_TOKEN_COUNT,
+ DEFAULT_MOCK_RESPONSE_PROMPT_TOKEN_COUNT,
+)
from litellm.exceptions import LiteLLMUnknownProvider
from litellm.integrations.custom_logger import CustomLogger
from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_for_health_check
@@ -105,7 +110,10 @@ from .litellm_core_utils.fallback_utils import (
async_completion_with_fallbacks,
completion_with_fallbacks,
)
-from .litellm_core_utils.prompt_templates.common_utils import get_completion_messages
+from .litellm_core_utils.prompt_templates.common_utils import (
+ get_completion_messages,
+ update_messages_with_model_file_ids,
+)
from .litellm_core_utils.prompt_templates.factory import (
custom_prompt,
function_call_prompt,
@@ -130,11 +138,10 @@ from .llms.cohere.embed import handler as cohere_embed
from .llms.custom_httpx.aiohttp_handler import BaseLLMAIOHTTPHandler
from .llms.custom_httpx.llm_http_handler import BaseLLMHTTPHandler
from .llms.custom_llm import CustomLLM, custom_chat_llm_router
-from .llms.databricks.chat.handler import DatabricksChatCompletion
from .llms.databricks.embed.handler import DatabricksEmbeddingHandler
from .llms.deprecated_providers import aleph_alpha, palm
from .llms.groq.chat.handler import GroqChatCompletion
-from .llms.huggingface.chat.handler import Huggingface
+from .llms.huggingface.embedding.handler import HuggingFaceEmbedding
from .llms.nlp_cloud.chat.handler import completion as nlp_cloud_chat_completion
from .llms.ollama.completion import handler as ollama
from .llms.oobabooga.chat import oobabooga
@@ -207,7 +214,6 @@ openai_chat_completions = OpenAIChatCompletion()
openai_text_completions = OpenAITextCompletion()
openai_audio_transcriptions = OpenAIAudioTranscription()
openai_image_variations = OpenAIImageVariationsHandler()
-databricks_chat_completions = DatabricksChatCompletion()
groq_chat_completions = GroqChatCompletion()
azure_ai_embedding = AzureAIEmbedding()
anthropic_chat_completions = AnthropicChatCompletion()
@@ -215,7 +221,7 @@ azure_chat_completions = AzureChatCompletion()
azure_o1_chat_completions = AzureOpenAIO1ChatCompletion()
azure_text_completions = AzureTextCompletion()
azure_audio_transcriptions = AzureAudioTranscription()
-huggingface = Huggingface()
+huggingface_embed = HuggingFaceEmbedding()
predibase_chat_completions = PredibaseChatCompletion()
codestral_text_completions = CodestralTextCompletion()
bedrock_converse_chat_completion = BedrockConverseLLM()
@@ -444,7 +450,7 @@ async def acompletion(
fallbacks = fallbacks or litellm.model_fallbacks
if fallbacks is not None:
response = await async_completion_with_fallbacks(
- **completion_kwargs, kwargs={"fallbacks": fallbacks}
+ **completion_kwargs, kwargs={"fallbacks": fallbacks, **kwargs}
)
if response is None:
raise Exception(
@@ -739,7 +745,12 @@ def mock_completion(
setattr(
model_response,
"usage",
- Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30),
+ Usage(
+ prompt_tokens=DEFAULT_MOCK_RESPONSE_PROMPT_TOKEN_COUNT,
+ completion_tokens=DEFAULT_MOCK_RESPONSE_COMPLETION_TOKEN_COUNT,
+ total_tokens=DEFAULT_MOCK_RESPONSE_PROMPT_TOKEN_COUNT
+ + DEFAULT_MOCK_RESPONSE_COMPLETION_TOKEN_COUNT,
+ ),
)
try:
@@ -943,16 +954,21 @@ def completion( # type: ignore # noqa: PLR0915
non_default_params = get_non_default_completion_params(kwargs=kwargs)
litellm_params = {} # used to prevent unbound var errors
## PROMPT MANAGEMENT HOOKS ##
-
- if isinstance(litellm_logging_obj, LiteLLMLoggingObj) and prompt_id is not None:
- model, messages, optional_params = (
- litellm_logging_obj.get_chat_completion_prompt(
- model=model,
- messages=messages,
- non_default_params=non_default_params,
- prompt_id=prompt_id,
- prompt_variables=prompt_variables,
- )
+ if isinstance(litellm_logging_obj, LiteLLMLoggingObj) and (
+ litellm_logging_obj.should_run_prompt_management_hooks(
+ prompt_id=prompt_id, non_default_params=non_default_params
+ )
+ ):
+ (
+ model,
+ messages,
+ optional_params,
+ ) = litellm_logging_obj.get_chat_completion_prompt(
+ model=model,
+ messages=messages,
+ non_default_params=non_default_params,
+ prompt_id=prompt_id,
+ prompt_variables=prompt_variables,
)
try:
@@ -1056,6 +1072,15 @@ def completion( # type: ignore # noqa: PLR0915
if eos_token:
custom_prompt_dict[model]["eos_token"] = eos_token
+ if kwargs.get("model_file_id_mapping"):
+ messages = update_messages_with_model_file_ids(
+ messages=messages,
+ model_id=kwargs.get("model_info", {}).get("id", None),
+ model_file_id_mapping=cast(
+ Dict[str, Dict[str, str]], kwargs.get("model_file_id_mapping")
+ ),
+ )
+
provider_config: Optional[BaseConfig] = None
if custom_llm_provider is not None and custom_llm_provider in [
provider.value for provider in LlmProviders
@@ -1112,6 +1137,7 @@ def completion( # type: ignore # noqa: PLR0915
messages=messages,
reasoning_effort=reasoning_effort,
thinking=thinking,
+ allowed_openai_params=kwargs.get("allowed_openai_params"),
**non_default_params,
)
@@ -1245,7 +1271,6 @@ def completion( # type: ignore # noqa: PLR0915
optional_params["max_retries"] = max_retries
if litellm.AzureOpenAIO1Config().is_o_series_model(model=model):
-
## LOAD CONFIG - if set
config = litellm.AzureOpenAIO1Config.get_config()
for k, v in config.items():
@@ -1410,6 +1435,7 @@ def completion( # type: ignore # noqa: PLR0915
custom_llm_provider=custom_llm_provider,
encoding=encoding,
stream=stream,
+ provider_config=provider_config,
)
except Exception as e:
## LOGGING - log the original exception returned
@@ -1571,6 +1597,37 @@ def completion( # type: ignore # noqa: PLR0915
additional_args={"headers": headers},
)
response = _response
+ elif custom_llm_provider == "fireworks_ai":
+ ## COMPLETION CALL
+ try:
+ response = base_llm_http_handler.completion(
+ model=model,
+ messages=messages,
+ headers=headers,
+ model_response=model_response,
+ api_key=api_key,
+ api_base=api_base,
+ acompletion=acompletion,
+ logging_obj=logging,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
+ timeout=timeout, # type: ignore
+ client=client,
+ custom_llm_provider=custom_llm_provider,
+ encoding=encoding,
+ stream=stream,
+ provider_config=provider_config,
+ )
+ except Exception as e:
+ ## LOGGING - log the original exception returned
+ logging.post_call(
+ input=messages,
+ api_key=api_key,
+ original_response=str(e),
+ additional_args={"headers": headers},
+ )
+ raise e
+
elif custom_llm_provider == "groq":
api_base = (
api_base # for deepinfra/perplexity/anyscale/groq/friendliai we check in get_llm_provider and pass in the api base from there
@@ -1597,24 +1654,22 @@ def completion( # type: ignore # noqa: PLR0915
): # completion(top_k=3) > openai_config(top_k=3) <- allows for dynamic variables to be passed in
optional_params[k] = v
- response = groq_chat_completions.completion(
+ response = base_llm_http_handler.completion(
model=model,
+ stream=stream,
messages=messages,
- headers=headers,
- model_response=model_response,
- print_verbose=print_verbose,
- api_key=api_key,
- api_base=api_base,
acompletion=acompletion,
- logging_obj=logging,
+ api_base=api_base,
+ model_response=model_response,
optional_params=optional_params,
litellm_params=litellm_params,
- logger_fn=logger_fn,
- timeout=timeout, # type: ignore
- custom_prompt_dict=custom_prompt_dict,
- client=client, # pass AsyncOpenAI, OpenAI client
custom_llm_provider=custom_llm_provider,
+ timeout=timeout,
+ headers=headers,
encoding=encoding,
+ api_key=api_key,
+ logging_obj=logging, # model call logging done inside the class as we make need to modify I/O to fit aleph alpha's requirements
+ client=client,
)
elif custom_llm_provider == "aiohttp_openai":
# NEW aiohttp provider for 10-100x higher RPS
@@ -2120,7 +2175,6 @@ def completion( # type: ignore # noqa: PLR0915
response = model_response
elif custom_llm_provider == "huggingface":
- custom_llm_provider = "huggingface"
huggingface_key = (
api_key
or litellm.huggingface_key
@@ -2129,40 +2183,23 @@ def completion( # type: ignore # noqa: PLR0915
or litellm.api_key
)
hf_headers = headers or litellm.headers
-
- custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict
- model_response = huggingface.completion(
+ response = base_llm_http_handler.completion(
model=model,
messages=messages,
- api_base=api_base, # type: ignore
- headers=hf_headers or {},
+ headers=hf_headers,
model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
api_key=huggingface_key,
+ api_base=api_base,
acompletion=acompletion,
logging_obj=logging,
- custom_prompt_dict=custom_prompt_dict,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
timeout=timeout, # type: ignore
client=client,
+ custom_llm_provider=custom_llm_provider,
+ encoding=encoding,
+ stream=stream,
)
- if (
- "stream" in optional_params
- and optional_params["stream"] is True
- and acompletion is False
- ):
- # don't try to access stream object,
- response = CustomStreamWrapper(
- model_response,
- model,
- custom_llm_provider="huggingface",
- logging_obj=logging,
- )
- return response
- response = model_response
elif custom_llm_provider == "oobabooga":
custom_llm_provider = "oobabooga"
model_response = oobabooga.completion(
@@ -2207,24 +2244,22 @@ def completion( # type: ignore # noqa: PLR0915
## COMPLETION CALL
try:
- response = databricks_chat_completions.completion(
+ response = base_llm_http_handler.completion(
model=model,
+ stream=stream,
messages=messages,
- headers=headers,
- model_response=model_response,
- print_verbose=print_verbose,
- api_key=api_key,
- api_base=api_base,
acompletion=acompletion,
- logging_obj=logging,
+ api_base=api_base,
+ model_response=model_response,
optional_params=optional_params,
litellm_params=litellm_params,
- logger_fn=logger_fn,
- timeout=timeout, # type: ignore
- custom_prompt_dict=custom_prompt_dict,
- client=client, # pass AsyncOpenAI, OpenAI client
- encoding=encoding,
custom_llm_provider="databricks",
+ timeout=timeout,
+ headers=headers,
+ encoding=encoding,
+ api_key=api_key,
+ logging_obj=logging, # model call logging done inside the class as we make need to modify I/O to fit aleph alpha's requirements
+ client=client,
)
except Exception as e:
## LOGGING - log the original exception returned
@@ -2245,7 +2280,12 @@ def completion( # type: ignore # noqa: PLR0915
additional_args={"headers": headers},
)
elif custom_llm_provider == "openrouter":
- api_base = api_base or litellm.api_base or "https://openrouter.ai/api/v1"
+ api_base = (
+ api_base
+ or litellm.api_base
+ or get_secret_str("OPENROUTER_API_BASE")
+ or "https://openrouter.ai/api/v1"
+ )
api_key = (
api_key
@@ -2344,6 +2384,8 @@ def completion( # type: ignore # noqa: PLR0915
or litellm.api_key
)
+ api_base = api_base or litellm.api_base or get_secret("GEMINI_API_BASE")
+
new_params = deepcopy(optional_params)
response = vertex_chat_completion.completion( # type: ignore
model=model,
@@ -2386,6 +2428,8 @@ def completion( # type: ignore # noqa: PLR0915
or get_secret("VERTEXAI_CREDENTIALS")
)
+ api_base = api_base or litellm.api_base or get_secret("VERTEXAI_API_BASE")
+
new_params = deepcopy(optional_params)
if (
model.startswith("meta/")
@@ -2598,6 +2642,7 @@ def completion( # type: ignore # noqa: PLR0915
encoding=encoding,
logging_obj=logging,
acompletion=acompletion,
+ client=client,
)
## RESPONSE OBJECT
@@ -2643,9 +2688,9 @@ def completion( # type: ignore # noqa: PLR0915
"aws_region_name" not in optional_params
or optional_params["aws_region_name"] is None
):
- optional_params["aws_region_name"] = (
- aws_bedrock_client.meta.region_name
- )
+ optional_params[
+ "aws_region_name"
+ ] = aws_bedrock_client.meta.region_name
bedrock_route = BedrockModelInfo.get_bedrock_route(model)
if bedrock_route == "converse":
@@ -3054,7 +3099,7 @@ def completion( # type: ignore # noqa: PLR0915
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
- "top_k": kwargs.get("top_k", 40),
+ "top_k": kwargs.get("top_k"),
},
},
)
@@ -3288,7 +3333,7 @@ def embedding( # noqa: PLR0915
litellm_call_id=None,
logger_fn=None,
**kwargs,
-) -> EmbeddingResponse:
+) -> Union[EmbeddingResponse, Coroutine[Any, Any, EmbeddingResponse]]:
"""
Embedding function that calls an API to generate embeddings for the given input.
@@ -3409,7 +3454,9 @@ def embedding( # noqa: PLR0915
if mock_response is not None:
return mock_embedding(model=model, mock_response=mock_response)
try:
- response: Optional[EmbeddingResponse] = None
+ response: Optional[
+ Union[EmbeddingResponse, Coroutine[Any, Any, EmbeddingResponse]]
+ ] = None
if azure is True or custom_llm_provider == "azure":
# azure configs
@@ -3593,7 +3640,7 @@ def embedding( # noqa: PLR0915
or get_secret("HUGGINGFACE_API_KEY")
or litellm.api_key
) # type: ignore
- response = huggingface.embedding(
+ response = huggingface_embed.embedding(
model=model,
input=input,
encoding=encoding, # type: ignore
@@ -3604,6 +3651,7 @@ def embedding( # noqa: PLR0915
optional_params=optional_params,
client=client,
aembedding=aembedding,
+ litellm_params=litellm_params_dict,
)
elif custom_llm_provider == "bedrock":
if isinstance(input, str):
@@ -3649,6 +3697,8 @@ def embedding( # noqa: PLR0915
api_key or get_secret_str("GEMINI_API_KEY") or litellm.api_key
)
+ api_base = api_base or litellm.api_base or get_secret_str("GEMINI_API_BASE")
+
response = google_batch_embeddings.batch_embeddings( # type: ignore
model=model,
input=input,
@@ -3663,6 +3713,8 @@ def embedding( # noqa: PLR0915
print_verbose=print_verbose,
custom_llm_provider="gemini",
api_key=gemini_api_key,
+ api_base=api_base,
+ client=client,
)
elif custom_llm_provider == "vertex_ai":
@@ -3687,6 +3739,13 @@ def embedding( # noqa: PLR0915
or get_secret_str("VERTEX_CREDENTIALS")
)
+ api_base = (
+ api_base
+ or litellm.api_base
+ or get_secret_str("VERTEXAI_API_BASE")
+ or get_secret_str("VERTEX_API_BASE")
+ )
+
if (
"image" in optional_params
or "video" in optional_params
@@ -3700,6 +3759,7 @@ def embedding( # noqa: PLR0915
encoding=encoding,
logging_obj=logging,
optional_params=optional_params,
+ litellm_params=litellm_params_dict,
model_response=EmbeddingResponse(),
vertex_project=vertex_ai_project,
vertex_location=vertex_ai_location,
@@ -3707,6 +3767,8 @@ def embedding( # noqa: PLR0915
aembedding=aembedding,
print_verbose=print_verbose,
custom_llm_provider="vertex_ai",
+ client=client,
+ api_base=api_base,
)
else:
response = vertex_embedding.embedding(
@@ -3724,6 +3786,8 @@ def embedding( # noqa: PLR0915
aembedding=aembedding,
print_verbose=print_verbose,
api_key=api_key,
+ api_base=api_base,
+ client=client,
)
elif custom_llm_provider == "oobabooga":
response = oobabooga.embedding(
@@ -3821,6 +3885,21 @@ def embedding( # noqa: PLR0915
aembedding=aembedding,
litellm_params={},
)
+ elif custom_llm_provider == "infinity":
+ response = base_llm_http_handler.embedding(
+ model=model,
+ input=input,
+ custom_llm_provider=custom_llm_provider,
+ api_base=api_base,
+ api_key=api_key,
+ logging_obj=logging,
+ timeout=timeout,
+ model_response=EmbeddingResponse(),
+ optional_params=optional_params,
+ client=client,
+ aembedding=aembedding,
+ litellm_params={},
+ )
elif custom_llm_provider == "watsonx":
credentials = IBMWatsonXMixin.get_watsonx_credentials(
optional_params=optional_params, api_key=api_key, api_base=api_base
@@ -3902,7 +3981,11 @@ def embedding( # noqa: PLR0915
raise LiteLLMUnknownProvider(
model=model, custom_llm_provider=custom_llm_provider
)
- if response is not None and hasattr(response, "_hidden_params"):
+ if (
+ response is not None
+ and hasattr(response, "_hidden_params")
+ and isinstance(response, EmbeddingResponse)
+ ):
response._hidden_params["custom_llm_provider"] = custom_llm_provider
if response is None:
@@ -4330,9 +4413,9 @@ def adapter_completion(
new_kwargs = translation_obj.translate_completion_input_params(kwargs=kwargs)
response: Union[ModelResponse, CustomStreamWrapper] = completion(**new_kwargs) # type: ignore
- translated_response: Optional[Union[BaseModel, AdapterCompletionStreamWrapper]] = (
- None
- )
+ translated_response: Optional[
+ Union[BaseModel, AdapterCompletionStreamWrapper]
+ ] = None
if isinstance(response, ModelResponse):
translated_response = translation_obj.translate_completion_output_params(
response=response
@@ -4404,13 +4487,16 @@ async def amoderation(
optional_params = GenericLiteLLMParams(**kwargs)
try:
- model, _custom_llm_provider, _dynamic_api_key, _dynamic_api_base = (
- litellm.get_llm_provider(
- model=model or "",
- custom_llm_provider=custom_llm_provider,
- api_base=optional_params.api_base,
- api_key=optional_params.api_key,
- )
+ (
+ model,
+ _custom_llm_provider,
+ _dynamic_api_key,
+ _dynamic_api_base,
+ ) = litellm.get_llm_provider(
+ model=model or "",
+ custom_llm_provider=custom_llm_provider,
+ api_base=optional_params.api_base,
+ api_key=optional_params.api_key,
)
except litellm.BadRequestError:
# `model` is optional field for moderation - get_llm_provider will throw BadRequestError if model is not set / not recognized
@@ -4682,6 +4768,14 @@ def image_generation( # noqa: PLR0915
or optional_params.pop("vertex_ai_credentials", None)
or get_secret_str("VERTEXAI_CREDENTIALS")
)
+
+ api_base = (
+ api_base
+ or litellm.api_base
+ or get_secret_str("VERTEXAI_API_BASE")
+ or get_secret_str("VERTEX_API_BASE")
+ )
+
model_response = vertex_image_generation.image_generation(
model=model,
prompt=prompt,
@@ -4693,6 +4787,8 @@ def image_generation( # noqa: PLR0915
vertex_location=vertex_ai_location,
vertex_credentials=vertex_credentials,
aimg_generation=aimg_generation,
+ api_base=api_base,
+ client=client,
)
elif (
custom_llm_provider in litellm._custom_providers
@@ -4945,6 +5041,10 @@ async def atranscription(*args, **kwargs) -> TranscriptionResponse:
else:
# Call the synchronous function using run_in_executor
response = await loop.run_in_executor(None, func_with_context)
+ if not isinstance(response, TranscriptionResponse):
+ raise ValueError(
+ f"Invalid response from transcription provider, expected TranscriptionResponse, but got {type(response)}"
+ )
return response
except Exception as e:
custom_llm_provider = custom_llm_provider or "openai"
@@ -4978,7 +5078,7 @@ def transcription(
max_retries: Optional[int] = None,
custom_llm_provider=None,
**kwargs,
-) -> TranscriptionResponse:
+) -> Union[TranscriptionResponse, Coroutine[Any, Any, TranscriptionResponse]]:
"""
Calls openai + azure whisper endpoints.
@@ -5047,7 +5147,15 @@ def transcription(
custom_llm_provider=custom_llm_provider,
)
- response: Optional[TranscriptionResponse] = None
+ response: Optional[
+ Union[TranscriptionResponse, Coroutine[Any, Any, TranscriptionResponse]]
+ ] = None
+
+ provider_config = ProviderConfigManager.get_provider_audio_transcription_config(
+ model=model,
+ provider=LlmProviders(custom_llm_provider),
+ )
+
if custom_llm_provider == "azure":
# azure configs
api_base = api_base or litellm.api_base or get_secret_str("AZURE_API_BASE")
@@ -5114,12 +5222,15 @@ def transcription(
max_retries=max_retries,
api_base=api_base,
api_key=api_key,
+ provider_config=provider_config,
+ litellm_params=litellm_params_dict,
)
elif custom_llm_provider == "deepgram":
response = base_llm_http_handler.audio_transcriptions(
model=model,
audio_file=file,
optional_params=optional_params,
+ litellm_params=litellm_params_dict,
model_response=model_response,
atranscription=atranscription,
client=(
@@ -5138,6 +5249,7 @@ def transcription(
api_key=api_key,
custom_llm_provider="deepgram",
headers={},
+ provider_config=provider_config,
)
if response is None:
raise ValueError("Unmapped provider passed in. Unable to get the response.")
@@ -5347,7 +5459,6 @@ def speech( # noqa: PLR0915
litellm_params=litellm_params_dict,
)
elif custom_llm_provider == "vertex_ai" or custom_llm_provider == "vertex_ai_beta":
-
generic_optional_params = GenericLiteLLMParams(**kwargs)
api_base = generic_optional_params.api_base or ""
@@ -5402,7 +5513,6 @@ def speech( # noqa: PLR0915
async def ahealth_check_wildcard_models(
model: str, custom_llm_provider: str, model_params: dict
) -> dict:
-
# this is a wildcard model, we need to pick a random model from the provider
cheapest_models = pick_cheapest_chat_models_from_llm_provider(
custom_llm_provider=custom_llm_provider, n=3
@@ -5725,9 +5835,22 @@ def stream_chunk_builder( # noqa: PLR0915
]
if len(content_chunks) > 0:
- response["choices"][0]["message"]["content"] = (
- processor.get_combined_content(content_chunks)
- )
+ response["choices"][0]["message"][
+ "content"
+ ] = processor.get_combined_content(content_chunks)
+
+ reasoning_chunks = [
+ chunk
+ for chunk in chunks
+ if len(chunk["choices"]) > 0
+ and "reasoning_content" in chunk["choices"][0]["delta"]
+ and chunk["choices"][0]["delta"]["reasoning_content"] is not None
+ ]
+
+ if len(reasoning_chunks) > 0:
+ response["choices"][0]["message"][
+ "reasoning_content"
+ ] = processor.get_combined_reasoning_content(reasoning_chunks)
audio_chunks = [
chunk
@@ -5743,11 +5866,14 @@ def stream_chunk_builder( # noqa: PLR0915
completion_output = get_content_from_model_response(response)
+ reasoning_tokens = processor.count_reasoning_tokens(response)
+
usage = processor.calculate_usage(
chunks=chunks,
model=model,
completion_output=completion_output,
messages=messages,
+ reasoning_tokens=reasoning_tokens,
)
setattr(response, "usage", usage)
diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json
index fa9c7ffbd5..95543d09c2 100644
--- a/litellm/model_prices_and_context_window_backup.json
+++ b/litellm/model_prices_and_context_window_backup.json
@@ -5,6 +5,7 @@
"max_output_tokens": "max output tokens, if the provider specifies it. if not default to max_tokens",
"input_cost_per_token": 0.0000,
"output_cost_per_token": 0.000,
+ "output_cost_per_reasoning_token": 0.000,
"litellm_provider": "one of https://docs.litellm.ai/docs/providers",
"mode": "one of: chat, embedding, completion, image_generation, audio_transcription, audio_speech, image_generation, moderation, rerank",
"supports_function_calling": true,
@@ -15,6 +16,13 @@
"supports_prompt_caching": true,
"supports_response_schema": true,
"supports_system_messages": true,
+ "supports_reasoning": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 0.0000,
+ "search_context_size_medium": 0.0000,
+ "search_context_size_high": 0.0000
+ },
"deprecation_date": "date when the model becomes deprecated in the format YYYY-MM-DD"
},
"omni-moderation-latest": {
@@ -57,6 +65,168 @@
"supports_system_messages": true,
"supports_tool_choice": true
},
+ "gpt-4.1": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 2e-6,
+ "output_cost_per_token": 8e-6,
+ "input_cost_per_token_batches": 1e-6,
+ "output_cost_per_token_batches": 4e-6,
+ "cache_read_input_token_cost": 0.5e-6,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 30e-3,
+ "search_context_size_medium": 35e-3,
+ "search_context_size_high": 50e-3
+ }
+ },
+ "gpt-4.1-2025-04-14": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 2e-6,
+ "output_cost_per_token": 8e-6,
+ "input_cost_per_token_batches": 1e-6,
+ "output_cost_per_token_batches": 4e-6,
+ "cache_read_input_token_cost": 0.5e-6,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 30e-3,
+ "search_context_size_medium": 35e-3,
+ "search_context_size_high": 50e-3
+ }
+ },
+ "gpt-4.1-mini": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 0.4e-6,
+ "output_cost_per_token": 1.6e-6,
+ "input_cost_per_token_batches": 0.2e-6,
+ "output_cost_per_token_batches": 0.8e-6,
+ "cache_read_input_token_cost": 0.1e-6,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 25e-3,
+ "search_context_size_medium": 27.5e-3,
+ "search_context_size_high": 30e-3
+ }
+ },
+ "gpt-4.1-mini-2025-04-14": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 0.4e-6,
+ "output_cost_per_token": 1.6e-6,
+ "input_cost_per_token_batches": 0.2e-6,
+ "output_cost_per_token_batches": 0.8e-6,
+ "cache_read_input_token_cost": 0.1e-6,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 25e-3,
+ "search_context_size_medium": 27.5e-3,
+ "search_context_size_high": 30e-3
+ }
+ },
+ "gpt-4.1-nano": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 0.1e-6,
+ "output_cost_per_token": 0.4e-6,
+ "input_cost_per_token_batches": 0.05e-6,
+ "output_cost_per_token_batches": 0.2e-6,
+ "cache_read_input_token_cost": 0.025e-6,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true
+ },
+ "gpt-4.1-nano-2025-04-14": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 0.1e-6,
+ "output_cost_per_token": 0.4e-6,
+ "input_cost_per_token_batches": 0.05e-6,
+ "output_cost_per_token_batches": 0.2e-6,
+ "cache_read_input_token_cost": 0.025e-6,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true
+ },
"gpt-4o": {
"max_tokens": 16384,
"max_input_tokens": 128000,
@@ -74,7 +244,81 @@
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true,
- "supports_tool_choice": true
+ "supports_tool_choice": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 0.030,
+ "search_context_size_medium": 0.035,
+ "search_context_size_high": 0.050
+ }
+ },
+ "watsonx/ibm/granite-3-8b-instruct": {
+ "max_tokens": 8192,
+ "max_input_tokens": 8192,
+ "max_output_tokens": 1024,
+ "input_cost_per_token": 0.0002,
+ "output_cost_per_token": 0.0002,
+ "litellm_provider": "watsonx",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_tool_choice": true,
+ "supports_parallel_function_calling": false,
+ "supports_vision": false,
+ "supports_audio_input": false,
+ "supports_audio_output": false,
+ "supports_prompt_caching": true,
+ "supports_response_schema": true,
+ "supports_system_messages": true
+ },
+ "gpt-4o-search-preview-2025-03-11": {
+ "max_tokens": 16384,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 16384,
+ "input_cost_per_token": 0.0000025,
+ "output_cost_per_token": 0.000010,
+ "input_cost_per_token_batches": 0.00000125,
+ "output_cost_per_token_batches": 0.00000500,
+ "cache_read_input_token_cost": 0.00000125,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 0.030,
+ "search_context_size_medium": 0.035,
+ "search_context_size_high": 0.050
+ }
+ },
+ "gpt-4o-search-preview": {
+ "max_tokens": 16384,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 16384,
+ "input_cost_per_token": 0.0000025,
+ "output_cost_per_token": 0.000010,
+ "input_cost_per_token_batches": 0.00000125,
+ "output_cost_per_token_batches": 0.00000500,
+ "cache_read_input_token_cost": 0.00000125,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 0.030,
+ "search_context_size_medium": 0.035,
+ "search_context_size_high": 0.050
+ }
},
"gpt-4.5-preview": {
"max_tokens": 16384,
@@ -199,7 +443,63 @@
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true,
- "supports_tool_choice": true
+ "supports_tool_choice": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 0.025,
+ "search_context_size_medium": 0.0275,
+ "search_context_size_high": 0.030
+ }
+ },
+ "gpt-4o-mini-search-preview-2025-03-11":{
+ "max_tokens": 16384,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 16384,
+ "input_cost_per_token": 0.00000015,
+ "output_cost_per_token": 0.00000060,
+ "input_cost_per_token_batches": 0.000000075,
+ "output_cost_per_token_batches": 0.00000030,
+ "cache_read_input_token_cost": 0.000000075,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 0.025,
+ "search_context_size_medium": 0.0275,
+ "search_context_size_high": 0.030
+ }
+ },
+ "gpt-4o-mini-search-preview": {
+ "max_tokens": 16384,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 16384,
+ "input_cost_per_token": 0.00000015,
+ "output_cost_per_token": 0.00000060,
+ "input_cost_per_token_batches": 0.000000075,
+ "output_cost_per_token_batches": 0.00000030,
+ "cache_read_input_token_cost": 0.000000075,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 0.025,
+ "search_context_size_medium": 0.0275,
+ "search_context_size_high": 0.030
+ }
},
"gpt-4o-mini-2024-07-18": {
"max_tokens": 16384,
@@ -218,7 +518,58 @@
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true,
- "supports_tool_choice": true
+ "supports_tool_choice": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 30.00,
+ "search_context_size_medium": 35.00,
+ "search_context_size_high": 50.00
+ }
+ },
+ "o1-pro": {
+ "max_tokens": 100000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 100000,
+ "input_cost_per_token": 0.00015,
+ "output_cost_per_token": 0.0006,
+ "input_cost_per_token_batches": 0.000075,
+ "output_cost_per_token_batches": 0.0003,
+ "litellm_provider": "openai",
+ "mode": "responses",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_response_schema": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": false,
+ "supports_reasoning": true,
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supported_endpoints": ["/v1/responses", "/v1/batch"]
+ },
+ "o1-pro-2025-03-19": {
+ "max_tokens": 100000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 100000,
+ "input_cost_per_token": 0.00015,
+ "output_cost_per_token": 0.0006,
+ "input_cost_per_token_batches": 0.000075,
+ "output_cost_per_token_batches": 0.0003,
+ "litellm_provider": "openai",
+ "mode": "responses",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_response_schema": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": false,
+ "supports_reasoning": true,
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supported_endpoints": ["/v1/responses", "/v1/batch"]
},
"o1": {
"max_tokens": 100000,
@@ -235,6 +586,7 @@
"supports_prompt_caching": true,
"supports_system_messages": true,
"supports_response_schema": true,
+ "supports_reasoning": true,
"supports_tool_choice": true
},
"o1-mini": {
@@ -249,6 +601,40 @@
"supports_vision": true,
"supports_prompt_caching": true
},
+ "o3": {
+ "max_tokens": 100000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 100000,
+ "input_cost_per_token": 1e-5,
+ "output_cost_per_token": 4e-5,
+ "cache_read_input_token_cost": 2.5e-6,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": false,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_response_schema": true,
+ "supports_reasoning": true,
+ "supports_tool_choice": true
+ },
+ "o3-2025-04-16": {
+ "max_tokens": 100000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 100000,
+ "input_cost_per_token": 1e-5,
+ "output_cost_per_token": 4e-5,
+ "cache_read_input_token_cost": 2.5e-6,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": false,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_response_schema": true,
+ "supports_reasoning": true,
+ "supports_tool_choice": true
+ },
"o3-mini": {
"max_tokens": 100000,
"max_input_tokens": 200000,
@@ -263,6 +649,7 @@
"supports_vision": false,
"supports_prompt_caching": true,
"supports_response_schema": true,
+ "supports_reasoning": true,
"supports_tool_choice": true
},
"o3-mini-2025-01-31": {
@@ -279,6 +666,41 @@
"supports_vision": false,
"supports_prompt_caching": true,
"supports_response_schema": true,
+ "supports_reasoning": true,
+ "supports_tool_choice": true
+ },
+ "o4-mini": {
+ "max_tokens": 100000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 100000,
+ "input_cost_per_token": 1.1e-6,
+ "output_cost_per_token": 4.4e-6,
+ "cache_read_input_token_cost": 2.75e-7,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": false,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_response_schema": true,
+ "supports_reasoning": true,
+ "supports_tool_choice": true
+ },
+ "o4-mini-2025-04-16": {
+ "max_tokens": 100000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 100000,
+ "input_cost_per_token": 1.1e-6,
+ "output_cost_per_token": 4.4e-6,
+ "cache_read_input_token_cost": 2.75e-7,
+ "litellm_provider": "openai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": false,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_response_schema": true,
+ "supports_reasoning": true,
"supports_tool_choice": true
},
"o1-mini-2024-09-12": {
@@ -291,6 +713,7 @@
"litellm_provider": "openai",
"mode": "chat",
"supports_vision": true,
+ "supports_reasoning": true,
"supports_prompt_caching": true
},
"o1-preview": {
@@ -303,6 +726,7 @@
"litellm_provider": "openai",
"mode": "chat",
"supports_vision": true,
+ "supports_reasoning": true,
"supports_prompt_caching": true
},
"o1-preview-2024-09-12": {
@@ -315,6 +739,7 @@
"litellm_provider": "openai",
"mode": "chat",
"supports_vision": true,
+ "supports_reasoning": true,
"supports_prompt_caching": true
},
"o1-2024-12-17": {
@@ -332,6 +757,7 @@
"supports_prompt_caching": true,
"supports_system_messages": true,
"supports_response_schema": true,
+ "supports_reasoning": true,
"supports_tool_choice": true
},
"chatgpt-4o-latest": {
@@ -383,7 +809,13 @@
"supports_vision": true,
"supports_prompt_caching": true,
"supports_system_messages": true,
- "supports_tool_choice": true
+ "supports_tool_choice": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 0.030,
+ "search_context_size_medium": 0.035,
+ "search_context_size_high": 0.050
+ }
},
"gpt-4o-2024-11-20": {
"max_tokens": 16384,
@@ -1005,21 +1437,328 @@
"output_cost_per_pixel": 0.0,
"litellm_provider": "openai"
},
+ "gpt-4o-transcribe": {
+ "mode": "audio_transcription",
+ "input_cost_per_token": 0.0000025,
+ "input_cost_per_audio_token": 0.000006,
+ "output_cost_per_token": 0.00001,
+ "litellm_provider": "openai",
+ "supported_endpoints": ["/v1/audio/transcriptions"]
+ },
+ "gpt-4o-mini-transcribe": {
+ "mode": "audio_transcription",
+ "input_cost_per_token": 0.00000125,
+ "input_cost_per_audio_token": 0.000003,
+ "output_cost_per_token": 0.000005,
+ "litellm_provider": "openai",
+ "supported_endpoints": ["/v1/audio/transcriptions"]
+ },
"whisper-1": {
"mode": "audio_transcription",
"input_cost_per_second": 0.0001,
"output_cost_per_second": 0.0001,
- "litellm_provider": "openai"
+ "litellm_provider": "openai",
+ "supported_endpoints": ["/v1/audio/transcriptions"]
},
"tts-1": {
"mode": "audio_speech",
"input_cost_per_character": 0.000015,
- "litellm_provider": "openai"
+ "litellm_provider": "openai",
+ "supported_endpoints": ["/v1/audio/speech"]
},
"tts-1-hd": {
"mode": "audio_speech",
"input_cost_per_character": 0.000030,
- "litellm_provider": "openai"
+ "litellm_provider": "openai",
+ "supported_endpoints": ["/v1/audio/speech"]
+ },
+ "azure/computer-use-preview": {
+ "max_tokens": 1024,
+ "max_input_tokens": 8192,
+ "max_output_tokens": 1024,
+ "input_cost_per_token": 0.000003,
+ "output_cost_per_token": 0.000012,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": false,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_reasoning": true
+ },
+ "azure/gpt-4o-audio-preview-2024-12-17": {
+ "max_tokens": 16384,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 16384,
+ "input_cost_per_token": 0.0000025,
+ "input_cost_per_audio_token": 0.00004,
+ "output_cost_per_token": 0.00001,
+ "output_cost_per_audio_token": 0.00008,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions"],
+ "supported_modalities": ["text", "audio"],
+ "supported_output_modalities": ["text", "audio"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": false,
+ "supports_vision": false,
+ "supports_prompt_caching": false,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true,
+ "supports_reasoning": false
+ },
+ "azure/gpt-4o-mini-audio-preview-2024-12-17": {
+ "max_tokens": 16384,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 16384,
+ "input_cost_per_token": 0.0000025,
+ "input_cost_per_audio_token": 0.00004,
+ "output_cost_per_token": 0.00001,
+ "output_cost_per_audio_token": 0.00008,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions"],
+ "supported_modalities": ["text", "audio"],
+ "supported_output_modalities": ["text", "audio"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": false,
+ "supports_vision": false,
+ "supports_prompt_caching": false,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true,
+ "supports_reasoning": false
+ },
+ "azure/gpt-4.1": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 2e-6,
+ "output_cost_per_token": 8e-6,
+ "input_cost_per_token_batches": 1e-6,
+ "output_cost_per_token_batches": 4e-6,
+ "cache_read_input_token_cost": 0.5e-6,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 30e-3,
+ "search_context_size_medium": 35e-3,
+ "search_context_size_high": 50e-3
+ }
+ },
+ "azure/gpt-4.1-2025-04-14": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 2e-6,
+ "output_cost_per_token": 8e-6,
+ "input_cost_per_token_batches": 1e-6,
+ "output_cost_per_token_batches": 4e-6,
+ "cache_read_input_token_cost": 0.5e-6,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 30e-3,
+ "search_context_size_medium": 35e-3,
+ "search_context_size_high": 50e-3
+ }
+ },
+ "azure/gpt-4.1-mini": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 0.4e-6,
+ "output_cost_per_token": 1.6e-6,
+ "input_cost_per_token_batches": 0.2e-6,
+ "output_cost_per_token_batches": 0.8e-6,
+ "cache_read_input_token_cost": 0.1e-6,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 25e-3,
+ "search_context_size_medium": 27.5e-3,
+ "search_context_size_high": 30e-3
+ }
+ },
+ "azure/gpt-4.1-mini-2025-04-14": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 0.4e-6,
+ "output_cost_per_token": 1.6e-6,
+ "input_cost_per_token_batches": 0.2e-6,
+ "output_cost_per_token_batches": 0.8e-6,
+ "cache_read_input_token_cost": 0.1e-6,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true,
+ "supports_web_search": true,
+ "search_context_cost_per_query": {
+ "search_context_size_low": 25e-3,
+ "search_context_size_medium": 27.5e-3,
+ "search_context_size_high": 30e-3
+ }
+ },
+ "azure/gpt-4.1-nano": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 0.1e-6,
+ "output_cost_per_token": 0.4e-6,
+ "input_cost_per_token_batches": 0.05e-6,
+ "output_cost_per_token_batches": 0.2e-6,
+ "cache_read_input_token_cost": 0.025e-6,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true
+ },
+ "azure/gpt-4.1-nano-2025-04-14": {
+ "max_tokens": 32768,
+ "max_input_tokens": 1047576,
+ "max_output_tokens": 32768,
+ "input_cost_per_token": 0.1e-6,
+ "output_cost_per_token": 0.4e-6,
+ "input_cost_per_token_batches": 0.05e-6,
+ "output_cost_per_token_batches": 0.2e-6,
+ "cache_read_input_token_cost": 0.025e-6,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true,
+ "supports_native_streaming": true
+ },
+ "azure/o3": {
+ "max_tokens": 100000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 100000,
+ "input_cost_per_token": 1e-5,
+ "output_cost_per_token": 4e-5,
+ "cache_read_input_token_cost": 2.5e-6,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": false,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_response_schema": true,
+ "supports_reasoning": true,
+ "supports_tool_choice": true
+ },
+ "azure/o3-2025-04-16": {
+ "max_tokens": 100000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 100000,
+ "input_cost_per_token": 1e-5,
+ "output_cost_per_token": 4e-5,
+ "cache_read_input_token_cost": 2.5e-6,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": false,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_response_schema": true,
+ "supports_reasoning": true,
+ "supports_tool_choice": true
+ },
+ "azure/o4-mini": {
+ "max_tokens": 100000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 100000,
+ "input_cost_per_token": 1.1e-6,
+ "output_cost_per_token": 4.4e-6,
+ "cache_read_input_token_cost": 2.75e-7,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_endpoints": ["/v1/chat/completions", "/v1/batch", "/v1/responses"],
+ "supported_modalities": ["text", "image"],
+ "supported_output_modalities": ["text"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": false,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_response_schema": true,
+ "supports_reasoning": true,
+ "supports_tool_choice": true
},
"azure/gpt-4o-mini-realtime-preview-2024-12-17": {
"max_tokens": 4096,
@@ -1078,6 +1817,68 @@
"supports_system_messages": true,
"supports_tool_choice": true
},
+ "azure/gpt-4o-realtime-preview-2024-12-17": {
+ "max_tokens": 4096,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 4096,
+ "input_cost_per_token": 0.000005,
+ "input_cost_per_audio_token": 0.00004,
+ "cache_read_input_token_cost": 0.0000025,
+ "output_cost_per_token": 0.00002,
+ "output_cost_per_audio_token": 0.00008,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_modalities": ["text", "audio"],
+ "supported_output_modalities": ["text", "audio"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_audio_input": true,
+ "supports_audio_output": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true
+ },
+ "azure/us/gpt-4o-realtime-preview-2024-12-17": {
+ "max_tokens": 4096,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 4096,
+ "input_cost_per_token": 5.5e-6,
+ "input_cost_per_audio_token": 44e-6,
+ "cache_read_input_token_cost": 2.75e-6,
+ "cache_read_input_audio_token_cost": 2.5e-6,
+ "output_cost_per_token": 22e-6,
+ "output_cost_per_audio_token": 80e-6,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_modalities": ["text", "audio"],
+ "supported_output_modalities": ["text", "audio"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_audio_input": true,
+ "supports_audio_output": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true
+ },
+ "azure/eu/gpt-4o-realtime-preview-2024-12-17": {
+ "max_tokens": 4096,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 4096,
+ "input_cost_per_token": 5.5e-6,
+ "input_cost_per_audio_token": 44e-6,
+ "cache_read_input_token_cost": 2.75e-6,
+ "cache_read_input_audio_token_cost": 2.5e-6,
+ "output_cost_per_token": 22e-6,
+ "output_cost_per_audio_token": 80e-6,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supported_modalities": ["text", "audio"],
+ "supported_output_modalities": ["text", "audio"],
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_audio_input": true,
+ "supports_audio_output": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true
+ },
"azure/gpt-4o-realtime-preview-2024-10-01": {
"max_tokens": 4096,
"max_input_tokens": 128000,
@@ -1135,6 +1936,23 @@
"supports_system_messages": true,
"supports_tool_choice": true
},
+ "azure/o4-mini-2025-04-16": {
+ "max_tokens": 100000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 100000,
+ "input_cost_per_token": 1.1e-6,
+ "output_cost_per_token": 4.4e-6,
+ "cache_read_input_token_cost": 2.75e-7,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": false,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_response_schema": true,
+ "supports_reasoning": true,
+ "supports_tool_choice": true
+ },
"azure/o3-mini-2025-01-31": {
"max_tokens": 100000,
"max_input_tokens": 200000,
@@ -1144,6 +1962,7 @@
"cache_read_input_token_cost": 0.00000055,
"litellm_provider": "azure",
"mode": "chat",
+ "supports_reasoning": true,
"supports_vision": false,
"supports_prompt_caching": true,
"supports_tool_choice": true
@@ -1160,6 +1979,7 @@
"litellm_provider": "azure",
"mode": "chat",
"supports_vision": false,
+ "supports_reasoning": true,
"supports_prompt_caching": true,
"supports_tool_choice": true
},
@@ -1175,6 +1995,7 @@
"litellm_provider": "azure",
"mode": "chat",
"supports_vision": false,
+ "supports_reasoning": true,
"supports_prompt_caching": true,
"supports_tool_choice": true
},
@@ -1205,6 +2026,7 @@
"mode": "chat",
"supports_vision": false,
"supports_prompt_caching": true,
+ "supports_reasoning": true,
"supports_response_schema": true,
"supports_tool_choice": true
},
@@ -1220,6 +2042,7 @@
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": false,
+ "supports_reasoning": true,
"supports_prompt_caching": true
},
"azure/o1-mini-2024-09-12": {
@@ -1234,6 +2057,7 @@
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": false,
+ "supports_reasoning": true,
"supports_prompt_caching": true
},
"azure/us/o1-mini-2024-09-12": {
@@ -1280,6 +2104,7 @@
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": true,
+ "supports_reasoning": true,
"supports_prompt_caching": true,
"supports_tool_choice": true
},
@@ -1295,6 +2120,7 @@
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": true,
+ "supports_reasoning": true,
"supports_prompt_caching": true,
"supports_tool_choice": true
},
@@ -1340,6 +2166,7 @@
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": false,
+ "supports_reasoning": true,
"supports_prompt_caching": true
},
"azure/o1-preview-2024-09-12": {
@@ -1354,6 +2181,7 @@
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_vision": false,
+ "supports_reasoning": true,
"supports_prompt_caching": true
},
"azure/us/o1-preview-2024-09-12": {
@@ -1384,17 +2212,53 @@
"supports_vision": false,
"supports_prompt_caching": true
},
- "azure/gpt-4o": {
- "max_tokens": 4096,
+ "azure/gpt-4.5-preview": {
+ "max_tokens": 16384,
"max_input_tokens": 128000,
- "max_output_tokens": 4096,
- "input_cost_per_token": 0.000005,
- "output_cost_per_token": 0.000015,
+ "max_output_tokens": 16384,
+ "input_cost_per_token": 0.000075,
+ "output_cost_per_token": 0.00015,
+ "input_cost_per_token_batches": 0.0000375,
+ "output_cost_per_token_batches": 0.000075,
+ "cache_read_input_token_cost": 0.0000375,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_system_messages": true,
+ "supports_tool_choice": true
+ },
+ "azure/gpt-4o": {
+ "max_tokens": 16384,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 16384,
+ "input_cost_per_token": 0.0000025,
+ "output_cost_per_token": 0.00001,
"cache_read_input_token_cost": 0.00000125,
"litellm_provider": "azure",
"mode": "chat",
"supports_function_calling": true,
"supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_tool_choice": true
+ },
+ "azure/global/gpt-4o-2024-11-20": {
+ "max_tokens": 16384,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 16384,
+ "input_cost_per_token": 0.0000025,
+ "output_cost_per_token": 0.00001,
+ "cache_read_input_token_cost": 0.00000125,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
"supports_vision": true,
"supports_prompt_caching": true,
"supports_tool_choice": true
@@ -1403,8 +2267,24 @@
"max_tokens": 16384,
"max_input_tokens": 128000,
"max_output_tokens": 16384,
- "input_cost_per_token": 0.00000275,
- "output_cost_per_token": 0.000011,
+ "input_cost_per_token": 0.0000025,
+ "output_cost_per_token": 0.00001,
+ "cache_read_input_token_cost": 0.00000125,
+ "litellm_provider": "azure",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_parallel_function_calling": true,
+ "supports_response_schema": true,
+ "supports_vision": true,
+ "supports_prompt_caching": true,
+ "supports_tool_choice": true
+ },
+ "azure/global/gpt-4o-2024-08-06": {
+ "max_tokens": 16384,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 16384,
+ "input_cost_per_token": 0.0000025,
+ "output_cost_per_token": 0.00001,
"cache_read_input_token_cost": 0.00000125,
"litellm_provider": "azure",
"mode": "chat",
@@ -1421,12 +2301,14 @@
"max_output_tokens": 16384,
"input_cost_per_token": 0.00000275,
"output_cost_per_token": 0.000011,
+ "cache_read_input_token_cost": 0.00000125,
"litellm_provider": "azure",
"mode": "chat",
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_response_schema": true,
"supports_vision": true,
+ "supports_prompt_caching": true,
"supports_tool_choice": true
},
"azure/us/gpt-4o-2024-11-20": {
@@ -1958,6 +2840,7 @@
"litellm_provider": "azure_ai",
"mode": "chat",
"supports_tool_choice": true,
+ "supports_reasoning": true,
"source": "https://techcommunity.microsoft.com/blog/machinelearningblog/deepseek-r1-improved-performance-higher-limits-and-transparent-pricing/4386367"
},
"azure_ai/deepseek-v3": {
@@ -2014,6 +2897,18 @@
"mode": "chat",
"supports_tool_choice": true
},
+ "azure_ai/mistral-small-2503": {
+ "max_tokens": 128000,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 128000,
+ "input_cost_per_token": 0.000001,
+ "output_cost_per_token": 0.000003,
+ "litellm_provider": "azure_ai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_tool_choice": true
+ },
"azure_ai/mistral-large-2407": {
"max_tokens": 4096,
"max_input_tokens": 128000,
@@ -2026,6 +2921,18 @@
"source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.mistral-ai-large-2407-offer?tab=Overview",
"supports_tool_choice": true
},
+ "azure_ai/mistral-large-latest": {
+ "max_tokens": 4096,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 4096,
+ "input_cost_per_token": 0.000002,
+ "output_cost_per_token": 0.000006,
+ "litellm_provider": "azure_ai",
+ "supports_function_calling": true,
+ "mode": "chat",
+ "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.mistral-ai-large-2407-offer?tab=Overview",
+ "supports_tool_choice": true
+ },
"azure_ai/ministral-3b": {
"max_tokens": 4096,
"max_input_tokens": 128000,
@@ -2123,25 +3030,26 @@
"max_tokens": 4096,
"max_input_tokens": 131072,
"max_output_tokens": 4096,
- "input_cost_per_token": 0,
- "output_cost_per_token": 0,
+ "input_cost_per_token": 0.000000075,
+ "output_cost_per_token": 0.0000003,
"litellm_provider": "azure_ai",
"mode": "chat",
"supports_function_calling": true,
- "source": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/models-featured#microsoft"
+ "source": "https://techcommunity.microsoft.com/blog/Azure-AI-Services-blog/announcing-new-phi-pricing-empowering-your-business-with-small-language-models/4395112"
},
"azure_ai/Phi-4-multimodal-instruct": {
"max_tokens": 4096,
"max_input_tokens": 131072,
"max_output_tokens": 4096,
- "input_cost_per_token": 0,
- "output_cost_per_token": 0,
+ "input_cost_per_token": 0.00000008,
+ "input_cost_per_audio_token": 0.000004,
+ "output_cost_per_token": 0.00000032,
"litellm_provider": "azure_ai",
"mode": "chat",
"supports_audio_input": true,
"supports_function_calling": true,
"supports_vision": true,
- "source": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/models-featured#microsoft"
+ "source": "https://techcommunity.microsoft.com/blog/Azure-AI-Services-blog/announcing-new-phi-pricing-empowering-your-business-with-small-language-models/4395112"
},
"azure_ai/Phi-4": {
"max_tokens": 16384,
@@ -2633,6 +3541,7 @@
"supports_function_calling": true,
"supports_assistant_prefill": true,
"supports_tool_choice": true,
+ "supports_reasoning": true,
"supports_prompt_caching": true
},
"deepseek/deepseek-chat": {
@@ -2746,6 +3655,87 @@
"supports_vision": true,
"supports_tool_choice": true
},
+ "xai/grok-3-beta": {
+ "max_tokens": 131072,
+ "max_input_tokens": 131072,
+ "max_output_tokens": 131072,
+ "input_cost_per_token": 0.000003,
+ "output_cost_per_token": 0.000015,
+ "litellm_provider": "xai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_tool_choice": true,
+ "supports_response_schema": false,
+ "source": "https://x.ai/api#pricing"
+ },
+ "xai/grok-3-fast-beta": {
+ "max_tokens": 131072,
+ "max_input_tokens": 131072,
+ "max_output_tokens": 131072,
+ "input_cost_per_token": 0.000005,
+ "output_cost_per_token": 0.000025,
+ "litellm_provider": "xai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_tool_choice": true,
+ "supports_response_schema": false,
+ "source": "https://x.ai/api#pricing"
+ },
+ "xai/grok-3-fast-latest": {
+ "max_tokens": 131072,
+ "max_input_tokens": 131072,
+ "max_output_tokens": 131072,
+ "input_cost_per_token": 0.000005,
+ "output_cost_per_token": 0.000025,
+ "litellm_provider": "xai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_tool_choice": true,
+ "supports_response_schema": false,
+ "source": "https://x.ai/api#pricing"
+ },
+ "xai/grok-3-mini-beta": {
+ "max_tokens": 131072,
+ "max_input_tokens": 131072,
+ "max_output_tokens": 131072,
+ "input_cost_per_token": 0.0000003,
+ "output_cost_per_token": 0.0000005,
+ "litellm_provider": "xai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_tool_choice": true,
+ "supports_reasoning": true,
+ "supports_response_schema": false,
+ "source": "https://x.ai/api#pricing"
+ },
+ "xai/grok-3-mini-fast-beta": {
+ "max_tokens": 131072,
+ "max_input_tokens": 131072,
+ "max_output_tokens": 131072,
+ "input_cost_per_token": 0.0000006,
+ "output_cost_per_token": 0.000004,
+ "litellm_provider": "xai",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_tool_choice": true,
+ "supports_reasoning": true,
+ "supports_response_schema": false,
+ "source": "https://x.ai/api#pricing"
+ },
+ "xai/grok-3-mini-fast-latest": {
+ "max_tokens": 131072,
+ "max_input_tokens": 131072,
+ "max_output_tokens": 131072,
+ "input_cost_per_token": 0.0000006,
+ "output_cost_per_token": 0.000004,
+ "litellm_provider": "xai",
+ "mode": "chat",
+ "supports_reasoning": true,
+ "supports_function_calling": true,
+ "supports_tool_choice": true,
+ "supports_response_schema": false,
+ "source": "https://x.ai/api#pricing"
+ },
"xai/grok-vision-beta": {
"max_tokens": 8192,
"max_input_tokens": 8192,
@@ -2816,6 +3806,7 @@
"mode": "chat",
"supports_system_messages": false,
"supports_function_calling": false,
+ "supports_reasoning": true,
"supports_response_schema": false,
"supports_tool_choice": true
},
@@ -3047,6 +4038,24 @@
"supports_response_schema": true,
"supports_tool_choice": true
},
+ "groq/whisper-large-v3": {
+ "mode": "audio_transcription",
+ "input_cost_per_second": 0.00003083,
+ "output_cost_per_second": 0,
+ "litellm_provider": "groq"
+ },
+ "groq/whisper-large-v3-turbo": {
+ "mode": "audio_transcription",
+ "input_cost_per_second": 0.00001111,
+ "output_cost_per_second": 0,
+ "litellm_provider": "groq"
+ },
+ "groq/distil-whisper-large-v3-en": {
+ "mode": "audio_transcription",
+ "input_cost_per_second": 0.00000556,
+ "output_cost_per_second": 0,
+ "litellm_provider": "groq"
+ },
"cerebras/llama3.1-8b": {
"max_tokens": 128000,
"max_input_tokens": 128000,
@@ -3163,7 +4172,7 @@
"input_cost_per_token": 0.0000008,
"output_cost_per_token": 0.000004,
"cache_creation_input_token_cost": 0.000001,
- "cache_read_input_token_cost": 0.0000008,
+ "cache_read_input_token_cost": 0.00000008,
"litellm_provider": "anthropic",
"mode": "chat",
"supports_function_calling": true,
@@ -3309,7 +4318,8 @@
"supports_prompt_caching": true,
"supports_response_schema": true,
"deprecation_date": "2025-06-01",
- "supports_tool_choice": true
+ "supports_tool_choice": true,
+ "supports_reasoning": true
},
"claude-3-7-sonnet-20250219": {
"max_tokens": 128000,
@@ -3329,7 +4339,8 @@
"supports_prompt_caching": true,
"supports_response_schema": true,
"deprecation_date": "2026-02-01",
- "supports_tool_choice": true
+ "supports_tool_choice": true,
+ "supports_reasoning": true
},
"claude-3-5-sonnet-20241022": {
"max_tokens": 8192,
@@ -4197,30 +5208,20 @@
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models",
"supports_tool_choice": true
},
- "gemini-2.0-pro-exp-02-05": {
- "max_tokens": 8192,
- "max_input_tokens": 2097152,
- "max_output_tokens": 8192,
+ "gemini-2.5-pro-exp-03-25": {
+ "max_tokens": 65536,
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 65536,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
- "input_cost_per_image": 0,
- "input_cost_per_video_per_second": 0,
- "input_cost_per_audio_per_second": 0,
- "input_cost_per_token": 0,
- "input_cost_per_character": 0,
- "input_cost_per_token_above_128k_tokens": 0,
- "input_cost_per_character_above_128k_tokens": 0,
- "input_cost_per_image_above_128k_tokens": 0,
- "input_cost_per_video_per_second_above_128k_tokens": 0,
- "input_cost_per_audio_per_second_above_128k_tokens": 0,
- "output_cost_per_token": 0,
- "output_cost_per_character": 0,
- "output_cost_per_token_above_128k_tokens": 0,
- "output_cost_per_character_above_128k_tokens": 0,
+ "input_cost_per_token": 0.00000125,
+ "input_cost_per_token_above_200k_tokens": 0.0000025,
+ "output_cost_per_token": 0.00001,
+ "output_cost_per_token_above_200k_tokens": 0.000015,
"litellm_provider": "vertex_ai-language-models",
"mode": "chat",
"supports_system_messages": true,
@@ -4231,6 +5232,38 @@
"supports_pdf_input": true,
"supports_response_schema": true,
"supports_tool_choice": true,
+ "supported_endpoints": ["/v1/chat/completions", "/v1/completions"],
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
+ },
+ "gemini-2.0-pro-exp-02-05": {
+ "max_tokens": 8192,
+ "max_input_tokens": 2097152,
+ "max_output_tokens": 8192,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 30,
+ "input_cost_per_token": 0.00000125,
+ "input_cost_per_token_above_200k_tokens": 0.0000025,
+ "output_cost_per_token": 0.00001,
+ "output_cost_per_token_above_200k_tokens": 0.000015,
+ "litellm_provider": "vertex_ai-language-models",
+ "mode": "chat",
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_audio_input": true,
+ "supports_video_input": true,
+ "supports_pdf_input": true,
+ "supports_response_schema": true,
+ "supports_tool_choice": true,
+ "supported_endpoints": ["/v1/chat/completions", "/v1/completions"],
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"gemini-2.0-flash-exp": {
@@ -4264,6 +5297,8 @@
"supports_vision": true,
"supports_response_schema": true,
"supports_audio_output": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text", "image"],
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing",
"supports_tool_choice": true
},
@@ -4288,6 +5323,8 @@
"supports_response_schema": true,
"supports_audio_output": true,
"supports_tool_choice": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text", "image"],
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"gemini-2.0-flash-thinking-exp": {
@@ -4321,6 +5358,8 @@
"supports_vision": true,
"supports_response_schema": true,
"supports_audio_output": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text", "image"],
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
"supports_tool_choice": true
},
@@ -4355,9 +5394,172 @@
"supports_vision": true,
"supports_response_schema": false,
"supports_audio_output": false,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text", "image"],
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
"supports_tool_choice": true
},
+ "gemini/gemini-2.5-flash-preview-04-17": {
+ "max_tokens": 65536,
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 65536,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 30,
+ "input_cost_per_audio_token": 1e-6,
+ "input_cost_per_token": 0.15e-6,
+ "output_cost_per_token": 0.6e-6,
+ "output_cost_per_reasoning_token": 3.5e-6,
+ "litellm_provider": "gemini",
+ "mode": "chat",
+ "rpm": 10,
+ "tpm": 250000,
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_reasoning": true,
+ "supports_response_schema": true,
+ "supports_audio_output": false,
+ "supports_tool_choice": true,
+ "supported_endpoints": ["/v1/chat/completions", "/v1/completions"],
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
+ "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview"
+ },
+ "gemini-2.5-flash-preview-04-17": {
+ "max_tokens": 65536,
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 65536,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 30,
+ "input_cost_per_audio_token": 1e-6,
+ "input_cost_per_token": 0.15e-6,
+ "output_cost_per_token": 0.6e-6,
+ "output_cost_per_reasoning_token": 3.5e-6,
+ "litellm_provider": "vertex_ai-language-models",
+ "mode": "chat",
+ "supports_reasoning": true,
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_response_schema": true,
+ "supports_audio_output": false,
+ "supports_tool_choice": true,
+ "supported_endpoints": ["/v1/chat/completions", "/v1/completions", "/v1/batch"],
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
+ "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview"
+ },
+ "gemini-2.0-flash": {
+ "max_tokens": 8192,
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 8192,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 30,
+ "input_cost_per_audio_token": 0.0000007,
+ "input_cost_per_token": 0.0000001,
+ "output_cost_per_token": 0.0000004,
+ "litellm_provider": "vertex_ai-language-models",
+ "mode": "chat",
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_response_schema": true,
+ "supports_audio_output": true,
+ "supports_audio_input": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text", "image"],
+ "supports_tool_choice": true,
+ "source": "https://ai.google.dev/pricing#2_0flash"
+ },
+ "gemini-2.0-flash-lite": {
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 8192,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 50,
+ "input_cost_per_audio_token": 0.000000075,
+ "input_cost_per_token": 0.000000075,
+ "output_cost_per_token": 0.0000003,
+ "litellm_provider": "vertex_ai-language-models",
+ "mode": "chat",
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_response_schema": true,
+ "supports_audio_output": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
+ "supports_tool_choice": true
+ },
+ "gemini-2.0-flash-lite-001": {
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 8192,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 50,
+ "input_cost_per_audio_token": 0.000000075,
+ "input_cost_per_token": 0.000000075,
+ "output_cost_per_token": 0.0000003,
+ "litellm_provider": "vertex_ai-language-models",
+ "mode": "chat",
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_response_schema": true,
+ "supports_audio_output": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
+ "supports_tool_choice": true
+ },
+ "gemini-2.5-pro-preview-03-25": {
+ "max_tokens": 65536,
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 65536,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 30,
+ "input_cost_per_audio_token": 0.00000125,
+ "input_cost_per_token": 0.00000125,
+ "input_cost_per_token_above_200k_tokens": 0.0000025,
+ "output_cost_per_token": 0.00001,
+ "output_cost_per_token_above_200k_tokens": 0.000015,
+ "litellm_provider": "vertex_ai-language-models",
+ "mode": "chat",
+ "supports_reasoning": true,
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_response_schema": true,
+ "supports_audio_output": false,
+ "supports_tool_choice": true,
+ "supported_endpoints": ["/v1/chat/completions", "/v1/completions", "/v1/batch"],
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
+ "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview"
+ },
"gemini/gemini-2.0-pro-exp-02-05": {
"max_tokens": 8192,
"max_input_tokens": 2097152,
@@ -4418,9 +5620,38 @@
"supports_vision": true,
"supports_response_schema": true,
"supports_audio_output": true,
+ "supports_audio_input": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text", "image"],
"supports_tool_choice": true,
"source": "https://ai.google.dev/pricing#2_0flash"
},
+ "gemini/gemini-2.0-flash-lite": {
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 8192,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 50,
+ "input_cost_per_audio_token": 0.000000075,
+ "input_cost_per_token": 0.000000075,
+ "output_cost_per_token": 0.0000003,
+ "litellm_provider": "gemini",
+ "mode": "chat",
+ "tpm": 4000000,
+ "rpm": 4000,
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_response_schema": true,
+ "supports_audio_output": true,
+ "supports_tool_choice": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
+ "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.0-flash-lite"
+ },
"gemini/gemini-2.0-flash-001": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
@@ -4444,8 +5675,39 @@
"supports_response_schema": true,
"supports_audio_output": false,
"supports_tool_choice": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text", "image"],
"source": "https://ai.google.dev/pricing#2_0flash"
},
+ "gemini/gemini-2.5-pro-preview-03-25": {
+ "max_tokens": 65536,
+ "max_input_tokens": 1048576,
+ "max_output_tokens": 65536,
+ "max_images_per_prompt": 3000,
+ "max_videos_per_prompt": 10,
+ "max_video_length": 1,
+ "max_audio_length_hours": 8.4,
+ "max_audio_per_prompt": 1,
+ "max_pdf_size_mb": 30,
+ "input_cost_per_audio_token": 0.0000007,
+ "input_cost_per_token": 0.00000125,
+ "input_cost_per_token_above_200k_tokens": 0.0000025,
+ "output_cost_per_token": 0.00001,
+ "output_cost_per_token_above_200k_tokens": 0.000015,
+ "litellm_provider": "gemini",
+ "mode": "chat",
+ "rpm": 10000,
+ "tpm": 10000000,
+ "supports_system_messages": true,
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_response_schema": true,
+ "supports_audio_output": false,
+ "supports_tool_choice": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
+ "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview"
+ },
"gemini/gemini-2.0-flash-exp": {
"max_tokens": 8192,
"max_input_tokens": 1048576,
@@ -4479,6 +5741,8 @@
"supports_audio_output": true,
"tpm": 4000000,
"rpm": 10,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text", "image"],
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
"supports_tool_choice": true
},
@@ -4505,6 +5769,8 @@
"supports_response_schema": true,
"supports_audio_output": false,
"supports_tool_choice": true,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text"],
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash-lite"
},
"gemini/gemini-2.0-flash-thinking-exp": {
@@ -4540,6 +5806,8 @@
"supports_audio_output": true,
"tpm": 4000000,
"rpm": 10,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text", "image"],
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
"supports_tool_choice": true
},
@@ -4576,6 +5844,8 @@
"supports_audio_output": true,
"tpm": 4000000,
"rpm": 10,
+ "supported_modalities": ["text", "image", "audio", "video"],
+ "supported_output_modalities": ["text", "image"],
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
"supports_tool_choice": true
},
@@ -4735,6 +6005,7 @@
"supports_prompt_caching": true,
"supports_response_schema": true,
"deprecation_date": "2025-06-01",
+ "supports_reasoning": true,
"supports_tool_choice": true
},
"vertex_ai/claude-3-haiku": {
@@ -4916,6 +6187,29 @@
"supports_function_calling": true,
"supports_tool_choice": true
},
+ "vertex_ai/mistral-small-2503@001": {
+ "max_tokens": 8191,
+ "max_input_tokens": 32000,
+ "max_output_tokens": 8191,
+ "input_cost_per_token": 0.000001,
+ "output_cost_per_token": 0.000003,
+ "litellm_provider": "vertex_ai-mistral_models",
+ "supports_function_calling": true,
+ "mode": "chat",
+ "supports_tool_choice": true
+ },
+ "vertex_ai/mistral-small-2503": {
+ "max_tokens": 128000,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 128000,
+ "input_cost_per_token": 0.000001,
+ "output_cost_per_token": 0.000003,
+ "litellm_provider": "vertex_ai-mistral_models",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_vision": true,
+ "supports_tool_choice": true
+ },
"vertex_ai/jamba-1.5-mini@001": {
"max_tokens": 256000,
"max_input_tokens": 256000,
@@ -5067,6 +6361,51 @@
"mode": "embedding",
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models"
},
+ "multimodalembedding": {
+ "max_tokens": 2048,
+ "max_input_tokens": 2048,
+ "output_vector_size": 768,
+ "input_cost_per_character": 0.0000002,
+ "input_cost_per_image": 0.0001,
+ "input_cost_per_video_per_second": 0.0005,
+ "input_cost_per_video_per_second_above_8s_interval": 0.0010,
+ "input_cost_per_video_per_second_above_15s_interval": 0.0020,
+ "input_cost_per_token": 0.0000008,
+ "output_cost_per_token": 0,
+ "litellm_provider": "vertex_ai-embedding-models",
+ "mode": "embedding",
+ "supported_endpoints": ["/v1/embeddings"],
+ "supported_modalities": ["text", "image", "video"],
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models"
+ },
+ "multimodalembedding@001": {
+ "max_tokens": 2048,
+ "max_input_tokens": 2048,
+ "output_vector_size": 768,
+ "input_cost_per_character": 0.0000002,
+ "input_cost_per_image": 0.0001,
+ "input_cost_per_video_per_second": 0.0005,
+ "input_cost_per_video_per_second_above_8s_interval": 0.0010,
+ "input_cost_per_video_per_second_above_15s_interval": 0.0020,
+ "input_cost_per_token": 0.0000008,
+ "output_cost_per_token": 0,
+ "litellm_provider": "vertex_ai-embedding-models",
+ "mode": "embedding",
+ "supported_endpoints": ["/v1/embeddings"],
+ "supported_modalities": ["text", "image", "video"],
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models"
+ },
+ "text-embedding-large-exp-03-07": {
+ "max_tokens": 8192,
+ "max_input_tokens": 8192,
+ "output_vector_size": 3072,
+ "input_cost_per_character": 0.000000025,
+ "input_cost_per_token": 0.0000001,
+ "output_cost_per_token": 0,
+ "litellm_provider": "vertex_ai-embedding-models",
+ "mode": "embedding",
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models"
+ },
"textembedding-gecko": {
"max_tokens": 3072,
"max_input_tokens": 3072,
@@ -5984,6 +7323,7 @@
"mode": "chat",
"supports_function_calling": true,
"supports_assistant_prefill": true,
+ "supports_reasoning": true,
"supports_tool_choice": true,
"supports_prompt_caching": true
},
@@ -6159,6 +7499,7 @@
"mode": "chat",
"supports_function_calling": true,
"supports_vision": true,
+ "supports_reasoning": true,
"tool_use_system_prompt_tokens": 159,
"supports_assistant_prefill": true,
"supports_tool_choice": true
@@ -6174,6 +7515,7 @@
"mode": "chat",
"supports_function_calling": true,
"supports_vision": true,
+ "supports_reasoning": true,
"tool_use_system_prompt_tokens": 159,
"supports_tool_choice": true
},
@@ -6196,6 +7538,14 @@
"mode": "chat",
"supports_tool_choice": true
},
+ "mistralai/mistral-small-3.1-24b-instruct": {
+ "max_tokens": 32000,
+ "input_cost_per_token": 0.0000001,
+ "output_cost_per_token": 0.0000003,
+ "litellm_provider": "openrouter",
+ "mode": "chat",
+ "supports_tool_choice": true
+ },
"openrouter/cognitivecomputations/dolphin-mixtral-8x7b": {
"max_tokens": 32769,
"input_cost_per_token": 0.0000005,
@@ -6324,12 +7674,40 @@
"supports_vision": false,
"supports_tool_choice": true
},
+ "openrouter/openai/o3-mini": {
+ "max_tokens": 65536,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 65536,
+ "input_cost_per_token": 0.0000011,
+ "output_cost_per_token": 0.0000044,
+ "litellm_provider": "openrouter",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_reasoning": true,
+ "supports_parallel_function_calling": true,
+ "supports_vision": false,
+ "supports_tool_choice": true
+ },
+ "openrouter/openai/o3-mini-high": {
+ "max_tokens": 65536,
+ "max_input_tokens": 128000,
+ "max_output_tokens": 65536,
+ "input_cost_per_token": 0.0000011,
+ "output_cost_per_token": 0.0000044,
+ "litellm_provider": "openrouter",
+ "mode": "chat",
+ "supports_function_calling": true,
+ "supports_reasoning": true,
+ "supports_parallel_function_calling": true,
+ "supports_vision": false,
+ "supports_tool_choice": true
+ },
"openrouter/openai/gpt-4o": {
"max_tokens": 4096,
"max_input_tokens": 128000,
"max_output_tokens": 4096,
- "input_cost_per_token": 0.000005,
- "output_cost_per_token": 0.000015,
+ "input_cost_per_token": 0.0000025,
+ "output_cost_per_token": 0.000010,
"litellm_provider": "openrouter",
"mode": "chat",
"supports_function_calling": true,
@@ -7137,6 +8515,7 @@
"supports_assistant_prefill": true,
"supports_prompt_caching": true,
"supports_response_schema": true,
+ "supports_reasoning": true,
"supports_tool_choice": true
},
"anthropic.claude-3-5-sonnet-20241022-v2:0": {
@@ -7254,7 +8633,8 @@
"supports_assistant_prefill": true,
"supports_prompt_caching": true,
"supports_response_schema": true,
- "supports_tool_choice": true
+ "supports_tool_choice": true,
+ "supports_reasoning": true
},
"us.anthropic.claude-3-haiku-20240307-v1:0": {
"max_tokens": 4096,
@@ -7914,7 +9294,8 @@
"input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.0000020,
"litellm_provider": "bedrock",
- "mode": "chat"
+ "mode": "chat",
+ "supports_tool_choice": true
},
"bedrock/*/1-month-commitment/cohere.command-text-v14": {
"max_tokens": 4096,
@@ -7923,7 +9304,8 @@
"input_cost_per_second": 0.011,
"output_cost_per_second": 0.011,
"litellm_provider": "bedrock",
- "mode": "chat"
+ "mode": "chat",
+ "supports_tool_choice": true
},
"bedrock/*/6-month-commitment/cohere.command-text-v14": {
"max_tokens": 4096,
@@ -7932,7 +9314,8 @@
"input_cost_per_second": 0.0066027,
"output_cost_per_second": 0.0066027,
"litellm_provider": "bedrock",
- "mode": "chat"
+ "mode": "chat",
+ "supports_tool_choice": true
},
"cohere.command-light-text-v14": {
"max_tokens": 4096,
@@ -7941,7 +9324,8 @@
"input_cost_per_token": 0.0000003,
"output_cost_per_token": 0.0000006,
"litellm_provider": "bedrock",
- "mode": "chat"
+ "mode": "chat",
+ "supports_tool_choice": true
},
"bedrock/*/1-month-commitment/cohere.command-light-text-v14": {
"max_tokens": 4096,
@@ -7950,7 +9334,8 @@
"input_cost_per_second": 0.001902,
"output_cost_per_second": 0.001902,
"litellm_provider": "bedrock",
- "mode": "chat"
+ "mode": "chat",
+ "supports_tool_choice": true
},
"bedrock/*/6-month-commitment/cohere.command-light-text-v14": {
"max_tokens": 4096,
@@ -7959,7 +9344,8 @@
"input_cost_per_second": 0.0011416,
"output_cost_per_second": 0.0011416,
"litellm_provider": "bedrock",
- "mode": "chat"
+ "mode": "chat",
+ "supports_tool_choice": true
},
"cohere.command-r-plus-v1:0": {
"max_tokens": 4096,
@@ -7968,7 +9354,8 @@
"input_cost_per_token": 0.0000030,
"output_cost_per_token": 0.000015,
"litellm_provider": "bedrock",
- "mode": "chat"
+ "mode": "chat",
+ "supports_tool_choice": true
},
"cohere.command-r-v1:0": {
"max_tokens": 4096,
@@ -7977,7 +9364,8 @@
"input_cost_per_token": 0.0000005,
"output_cost_per_token": 0.0000015,
"litellm_provider": "bedrock",
- "mode": "chat"
+ "mode": "chat",
+ "supports_tool_choice": true
},
"cohere.embed-english-v3": {
"max_tokens": 512,
@@ -8005,6 +9393,7 @@
"output_cost_per_token": 0.0000054,
"litellm_provider": "bedrock_converse",
"mode": "chat",
+ "supports_reasoning": true,
"supports_function_calling": false,
"supports_tool_choice": false
@@ -9817,6 +11206,23 @@
"litellm_provider": "voyage",
"mode": "rerank"
},
+ "databricks/databricks-claude-3-7-sonnet": {
+ "max_tokens": 200000,
+ "max_input_tokens": 200000,
+ "max_output_tokens": 128000,
+ "input_cost_per_token": 0.0000025,
+ "input_dbu_cost_per_token": 0.00003571,
+ "output_cost_per_token": 0.00017857,
+ "output_db_cost_per_token": 0.000214286,
+ "litellm_provider": "databricks",
+ "mode": "chat",
+ "source": "https://www.databricks.com/product/pricing/foundation-model-serving",
+ "metadata": {"notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Claude 3.7 conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation."},
+ "supports_assistant_prefill": true,
+ "supports_function_calling": true,
+ "supports_tool_choice": true,
+ "supports_reasoning": true
+ },
"databricks/databricks-meta-llama-3-1-405b-instruct": {
"max_tokens": 128000,
"max_input_tokens": 128000,
@@ -9845,7 +11251,7 @@
"metadata": {"notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Llama 3.1 70B conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation."},
"supports_tool_choice": true
},
- "databricks/meta-llama-3.3-70b-instruct": {
+ "databricks/databricks-meta-llama-3-3-70b-instruct": {
"max_tokens": 128000,
"max_input_tokens": 128000,
"max_output_tokens": 128000,
@@ -10073,6 +11479,7 @@
"max_input_tokens": 32768,
"max_output_tokens": 8192,
"litellm_provider": "snowflake",
+ "supports_reasoning": true,
"mode": "chat"
},
"snowflake/snowflake-arctic": {
diff --git a/litellm/openai-responses-starter-app b/litellm/openai-responses-starter-app
new file mode 160000
index 0000000000..bf0485467c
--- /dev/null
+++ b/litellm/openai-responses-starter-app
@@ -0,0 +1 @@
+Subproject commit bf0485467c343957ba5c217db777f407b2e65453
diff --git a/litellm/proxy/_experimental/mcp_server/mcp_server_manager.py b/litellm/proxy/_experimental/mcp_server/mcp_server_manager.py
new file mode 100644
index 0000000000..9becb80758
--- /dev/null
+++ b/litellm/proxy/_experimental/mcp_server/mcp_server_manager.py
@@ -0,0 +1,153 @@
+"""
+MCP Client Manager
+
+This class is responsible for managing MCP SSE clients.
+
+This is a Proxy
+"""
+
+import asyncio
+import json
+from typing import Any, Dict, List, Optional
+
+from mcp import ClientSession
+from mcp.client.sse import sse_client
+from mcp.types import Tool as MCPTool
+
+from litellm._logging import verbose_logger
+from litellm.types.mcp_server.mcp_server_manager import MCPInfo, MCPSSEServer
+
+
+class MCPServerManager:
+ def __init__(self):
+ self.mcp_servers: List[MCPSSEServer] = []
+ """
+ eg.
+ [
+ {
+ "name": "zapier_mcp_server",
+ "url": "https://actions.zapier.com/mcp/sk-ak-2ew3bofIeQIkNoeKIdXrF1Hhhp/sse"
+ },
+ {
+ "name": "google_drive_mcp_server",
+ "url": "https://actions.zapier.com/mcp/sk-ak-2ew3bofIeQIkNoeKIdXrF1Hhhp/sse"
+ }
+ ]
+ """
+
+ self.tool_name_to_mcp_server_name_mapping: Dict[str, str] = {}
+ """
+ {
+ "gmail_send_email": "zapier_mcp_server",
+ }
+ """
+
+ def load_servers_from_config(self, mcp_servers_config: Dict[str, Any]):
+ """
+ Load the MCP Servers from the config
+ """
+ for server_name, server_config in mcp_servers_config.items():
+ _mcp_info: dict = server_config.get("mcp_info", None) or {}
+ mcp_info = MCPInfo(**_mcp_info)
+ mcp_info["server_name"] = server_name
+ self.mcp_servers.append(
+ MCPSSEServer(
+ name=server_name,
+ url=server_config["url"],
+ mcp_info=mcp_info,
+ )
+ )
+ verbose_logger.debug(
+ f"Loaded MCP Servers: {json.dumps(self.mcp_servers, indent=4, default=str)}"
+ )
+
+ self.initialize_tool_name_to_mcp_server_name_mapping()
+
+ async def list_tools(self) -> List[MCPTool]:
+ """
+ List all tools available across all MCP Servers.
+
+ Returns:
+ List[MCPTool]: Combined list of tools from all servers
+ """
+ list_tools_result: List[MCPTool] = []
+ verbose_logger.debug("SSE SERVER MANAGER LISTING TOOLS")
+
+ for server in self.mcp_servers:
+ tools = await self._get_tools_from_server(server)
+ list_tools_result.extend(tools)
+
+ return list_tools_result
+
+ async def _get_tools_from_server(self, server: MCPSSEServer) -> List[MCPTool]:
+ """
+ Helper method to get tools from a single MCP server.
+
+ Args:
+ server (MCPSSEServer): The server to query tools from
+
+ Returns:
+ List[MCPTool]: List of tools available on the server
+ """
+ verbose_logger.debug(f"Connecting to url: {server.url}")
+
+ async with sse_client(url=server.url) as (read, write):
+ async with ClientSession(read, write) as session:
+ await session.initialize()
+
+ tools_result = await session.list_tools()
+ verbose_logger.debug(f"Tools from {server.name}: {tools_result}")
+
+ # Update tool to server mapping
+ for tool in tools_result.tools:
+ self.tool_name_to_mcp_server_name_mapping[tool.name] = server.name
+
+ return tools_result.tools
+
+ def initialize_tool_name_to_mcp_server_name_mapping(self):
+ """
+ On startup, initialize the tool name to MCP server name mapping
+ """
+ try:
+ if asyncio.get_running_loop():
+ asyncio.create_task(
+ self._initialize_tool_name_to_mcp_server_name_mapping()
+ )
+ except RuntimeError as e: # no running event loop
+ verbose_logger.exception(
+ f"No running event loop - skipping tool name to MCP server name mapping initialization: {str(e)}"
+ )
+
+ async def _initialize_tool_name_to_mcp_server_name_mapping(self):
+ """
+ Call list_tools for each server and update the tool name to MCP server name mapping
+ """
+ for server in self.mcp_servers:
+ tools = await self._get_tools_from_server(server)
+ for tool in tools:
+ self.tool_name_to_mcp_server_name_mapping[tool.name] = server.name
+
+ async def call_tool(self, name: str, arguments: Dict[str, Any]):
+ """
+ Call a tool with the given name and arguments
+ """
+ mcp_server = self._get_mcp_server_from_tool_name(name)
+ if mcp_server is None:
+ raise ValueError(f"Tool {name} not found")
+ async with sse_client(url=mcp_server.url) as (read, write):
+ async with ClientSession(read, write) as session:
+ await session.initialize()
+ return await session.call_tool(name, arguments)
+
+ def _get_mcp_server_from_tool_name(self, tool_name: str) -> Optional[MCPSSEServer]:
+ """
+ Get the MCP Server from the tool name
+ """
+ if tool_name in self.tool_name_to_mcp_server_name_mapping:
+ for server in self.mcp_servers:
+ if server.name == self.tool_name_to_mcp_server_name_mapping[tool_name]:
+ return server
+ return None
+
+
+global_mcp_server_manager: MCPServerManager = MCPServerManager()
diff --git a/litellm/proxy/_experimental/mcp_server/server.py b/litellm/proxy/_experimental/mcp_server/server.py
new file mode 100644
index 0000000000..fe1eccb048
--- /dev/null
+++ b/litellm/proxy/_experimental/mcp_server/server.py
@@ -0,0 +1,309 @@
+"""
+LiteLLM MCP Server Routes
+"""
+
+import asyncio
+from typing import Any, Dict, List, Optional, Union
+
+from anyio import BrokenResourceError
+from fastapi import APIRouter, Depends, HTTPException, Request
+from fastapi.responses import StreamingResponse
+from pydantic import ConfigDict, ValidationError
+
+from litellm._logging import verbose_logger
+from litellm.constants import MCP_TOOL_NAME_PREFIX
+from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
+from litellm.proxy._types import UserAPIKeyAuth
+from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
+from litellm.types.mcp_server.mcp_server_manager import MCPInfo
+from litellm.types.utils import StandardLoggingMCPToolCall
+from litellm.utils import client
+
+# Check if MCP is available
+# "mcp" requires python 3.10 or higher, but several litellm users use python 3.8
+# We're making this conditional import to avoid breaking users who use python 3.8.
+try:
+ from mcp.server import Server
+
+ MCP_AVAILABLE = True
+except ImportError as e:
+ verbose_logger.debug(f"MCP module not found: {e}")
+ MCP_AVAILABLE = False
+ router = APIRouter(
+ prefix="/mcp",
+ tags=["mcp"],
+ )
+
+
+if MCP_AVAILABLE:
+ from mcp.server import NotificationOptions, Server
+ from mcp.server.models import InitializationOptions
+ from mcp.types import EmbeddedResource as MCPEmbeddedResource
+ from mcp.types import ImageContent as MCPImageContent
+ from mcp.types import TextContent as MCPTextContent
+ from mcp.types import Tool as MCPTool
+
+ from .mcp_server_manager import global_mcp_server_manager
+ from .sse_transport import SseServerTransport
+ from .tool_registry import global_mcp_tool_registry
+
+ ######################################################
+ ############ MCP Tools List REST API Response Object #
+ # Defined here because we don't want to add `mcp` as a
+ # required dependency for `litellm` pip package
+ ######################################################
+ class ListMCPToolsRestAPIResponseObject(MCPTool):
+ """
+ Object returned by the /tools/list REST API route.
+ """
+
+ mcp_info: Optional[MCPInfo] = None
+ model_config = ConfigDict(arbitrary_types_allowed=True)
+
+ ########################################################
+ ############ Initialize the MCP Server #################
+ ########################################################
+ router = APIRouter(
+ prefix="/mcp",
+ tags=["mcp"],
+ )
+ server: Server = Server("litellm-mcp-server")
+ sse: SseServerTransport = SseServerTransport("/mcp/sse/messages")
+
+ ########################################################
+ ############### MCP Server Routes #######################
+ ########################################################
+ @server.list_tools()
+ async def list_tools() -> list[MCPTool]:
+ """
+ List all available tools
+ """
+ return await _list_mcp_tools()
+
+ async def _list_mcp_tools() -> List[MCPTool]:
+ """
+ List all available tools
+ """
+ tools = []
+ for tool in global_mcp_tool_registry.list_tools():
+ tools.append(
+ MCPTool(
+ name=tool.name,
+ description=tool.description,
+ inputSchema=tool.input_schema,
+ )
+ )
+ verbose_logger.debug(
+ "GLOBAL MCP TOOLS: %s", global_mcp_tool_registry.list_tools()
+ )
+ sse_tools: List[MCPTool] = await global_mcp_server_manager.list_tools()
+ verbose_logger.debug("SSE TOOLS: %s", sse_tools)
+ if sse_tools is not None:
+ tools.extend(sse_tools)
+ return tools
+
+ @server.call_tool()
+ async def mcp_server_tool_call(
+ name: str, arguments: Dict[str, Any] | None
+ ) -> List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]:
+ """
+ Call a specific tool with the provided arguments
+
+ Args:
+ name (str): Name of the tool to call
+ arguments (Dict[str, Any] | None): Arguments to pass to the tool
+
+ Returns:
+ List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]: Tool execution results
+
+ Raises:
+ HTTPException: If tool not found or arguments missing
+ """
+ # Validate arguments
+ response = await call_mcp_tool(
+ name=name,
+ arguments=arguments,
+ )
+ return response
+
+ @client
+ async def call_mcp_tool(
+ name: str, arguments: Optional[Dict[str, Any]] = None, **kwargs: Any
+ ) -> List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]:
+ """
+ Call a specific tool with the provided arguments
+ """
+ if arguments is None:
+ raise HTTPException(
+ status_code=400, detail="Request arguments are required"
+ )
+
+ standard_logging_mcp_tool_call: StandardLoggingMCPToolCall = (
+ _get_standard_logging_mcp_tool_call(
+ name=name,
+ arguments=arguments,
+ )
+ )
+ litellm_logging_obj: Optional[LiteLLMLoggingObj] = kwargs.get(
+ "litellm_logging_obj", None
+ )
+ if litellm_logging_obj:
+ litellm_logging_obj.model_call_details["mcp_tool_call_metadata"] = (
+ standard_logging_mcp_tool_call
+ )
+ litellm_logging_obj.model_call_details["model"] = (
+ f"{MCP_TOOL_NAME_PREFIX}: {standard_logging_mcp_tool_call.get('name') or ''}"
+ )
+ litellm_logging_obj.model_call_details["custom_llm_provider"] = (
+ standard_logging_mcp_tool_call.get("mcp_server_name")
+ )
+
+ # Try managed server tool first
+ if name in global_mcp_server_manager.tool_name_to_mcp_server_name_mapping:
+ return await _handle_managed_mcp_tool(name, arguments)
+
+ # Fall back to local tool registry
+ return await _handle_local_mcp_tool(name, arguments)
+
+ def _get_standard_logging_mcp_tool_call(
+ name: str,
+ arguments: Dict[str, Any],
+ ) -> StandardLoggingMCPToolCall:
+ mcp_server = global_mcp_server_manager._get_mcp_server_from_tool_name(name)
+ if mcp_server:
+ mcp_info = mcp_server.mcp_info or {}
+ return StandardLoggingMCPToolCall(
+ name=name,
+ arguments=arguments,
+ mcp_server_name=mcp_info.get("server_name"),
+ mcp_server_logo_url=mcp_info.get("logo_url"),
+ )
+ else:
+ return StandardLoggingMCPToolCall(
+ name=name,
+ arguments=arguments,
+ )
+
+ async def _handle_managed_mcp_tool(
+ name: str, arguments: Dict[str, Any]
+ ) -> List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]:
+ """Handle tool execution for managed server tools"""
+ call_tool_result = await global_mcp_server_manager.call_tool(
+ name=name,
+ arguments=arguments,
+ )
+ verbose_logger.debug("CALL TOOL RESULT: %s", call_tool_result)
+ return call_tool_result.content
+
+ async def _handle_local_mcp_tool(
+ name: str, arguments: Dict[str, Any]
+ ) -> List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]:
+ """Handle tool execution for local registry tools"""
+ tool = global_mcp_tool_registry.get_tool(name)
+ if not tool:
+ raise HTTPException(status_code=404, detail=f"Tool '{name}' not found")
+
+ try:
+ result = tool.handler(**arguments)
+ return [MCPTextContent(text=str(result), type="text")]
+ except Exception as e:
+ return [MCPTextContent(text=f"Error: {str(e)}", type="text")]
+
+ @router.get("/", response_class=StreamingResponse)
+ async def handle_sse(request: Request):
+ verbose_logger.info("new incoming SSE connection established")
+ async with sse.connect_sse(request) as streams:
+ try:
+ await server.run(streams[0], streams[1], options)
+ except BrokenResourceError:
+ pass
+ except asyncio.CancelledError:
+ pass
+ except ValidationError:
+ pass
+ except Exception:
+ raise
+ await request.close()
+
+ @router.post("/sse/messages")
+ async def handle_messages(request: Request):
+ verbose_logger.info("incoming SSE message received")
+ await sse.handle_post_message(request.scope, request.receive, request._send)
+ await request.close()
+
+ ########################################################
+ ############ MCP Server REST API Routes #################
+ ########################################################
+ @router.get("/tools/list", dependencies=[Depends(user_api_key_auth)])
+ async def list_tool_rest_api() -> List[ListMCPToolsRestAPIResponseObject]:
+ """
+ List all available tools with information about the server they belong to.
+
+ Example response:
+ Tools:
+ [
+ {
+ "name": "create_zap",
+ "description": "Create a new zap",
+ "inputSchema": "tool_input_schema",
+ "mcp_info": {
+ "server_name": "zapier",
+ "logo_url": "https://www.zapier.com/logo.png",
+ }
+ },
+ {
+ "name": "fetch_data",
+ "description": "Fetch data from a URL",
+ "inputSchema": "tool_input_schema",
+ "mcp_info": {
+ "server_name": "fetch",
+ "logo_url": "https://www.fetch.com/logo.png",
+ }
+ }
+ ]
+ """
+ list_tools_result: List[ListMCPToolsRestAPIResponseObject] = []
+ for server in global_mcp_server_manager.mcp_servers:
+ try:
+ tools = await global_mcp_server_manager._get_tools_from_server(server)
+ for tool in tools:
+ list_tools_result.append(
+ ListMCPToolsRestAPIResponseObject(
+ name=tool.name,
+ description=tool.description,
+ inputSchema=tool.inputSchema,
+ mcp_info=server.mcp_info,
+ )
+ )
+ except Exception as e:
+ verbose_logger.exception(f"Error getting tools from {server.name}: {e}")
+ continue
+ return list_tools_result
+
+ @router.post("/tools/call", dependencies=[Depends(user_api_key_auth)])
+ async def call_tool_rest_api(
+ request: Request,
+ user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
+ ):
+ """
+ REST API to call a specific MCP tool with the provided arguments
+ """
+ from litellm.proxy.proxy_server import add_litellm_data_to_request, proxy_config
+
+ data = await request.json()
+ data = await add_litellm_data_to_request(
+ data=data,
+ request=request,
+ user_api_key_dict=user_api_key_dict,
+ proxy_config=proxy_config,
+ )
+ return await call_mcp_tool(**data)
+
+ options = InitializationOptions(
+ server_name="litellm-mcp-server",
+ server_version="0.1.0",
+ capabilities=server.get_capabilities(
+ notification_options=NotificationOptions(),
+ experimental_capabilities={},
+ ),
+ )
diff --git a/litellm/proxy/_experimental/mcp_server/sse_transport.py b/litellm/proxy/_experimental/mcp_server/sse_transport.py
new file mode 100644
index 0000000000..63ffd403c6
--- /dev/null
+++ b/litellm/proxy/_experimental/mcp_server/sse_transport.py
@@ -0,0 +1,150 @@
+"""
+This is a modification of code from: https://github.com/SecretiveShell/MCP-Bridge/blob/master/mcp_bridge/mcp_server/sse_transport.py
+
+Credit to the maintainers of SecretiveShell for their SSE Transport implementation
+
+"""
+
+from contextlib import asynccontextmanager
+from typing import Any
+from urllib.parse import quote
+from uuid import UUID, uuid4
+
+import anyio
+import mcp.types as types
+from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
+from fastapi.requests import Request
+from fastapi.responses import Response
+from pydantic import ValidationError
+from sse_starlette import EventSourceResponse
+from starlette.types import Receive, Scope, Send
+
+from litellm._logging import verbose_logger
+
+
+class SseServerTransport:
+ """
+ SSE server transport for MCP. This class provides _two_ ASGI applications,
+ suitable to be used with a framework like Starlette and a server like Hypercorn:
+
+ 1. connect_sse() is an ASGI application which receives incoming GET requests,
+ and sets up a new SSE stream to send server messages to the client.
+ 2. handle_post_message() is an ASGI application which receives incoming POST
+ requests, which should contain client messages that link to a
+ previously-established SSE session.
+ """
+
+ _endpoint: str
+ _read_stream_writers: dict[
+ UUID, MemoryObjectSendStream[types.JSONRPCMessage | Exception]
+ ]
+
+ def __init__(self, endpoint: str) -> None:
+ """
+ Creates a new SSE server transport, which will direct the client to POST
+ messages to the relative or absolute URL given.
+ """
+
+ super().__init__()
+ self._endpoint = endpoint
+ self._read_stream_writers = {}
+ verbose_logger.debug(
+ f"SseServerTransport initialized with endpoint: {endpoint}"
+ )
+
+ @asynccontextmanager
+ async def connect_sse(self, request: Request):
+ if request.scope["type"] != "http":
+ verbose_logger.error("connect_sse received non-HTTP request")
+ raise ValueError("connect_sse can only handle HTTP requests")
+
+ verbose_logger.debug("Setting up SSE connection")
+ read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception]
+ read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception]
+
+ write_stream: MemoryObjectSendStream[types.JSONRPCMessage]
+ write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage]
+
+ read_stream_writer, read_stream = anyio.create_memory_object_stream(0)
+ write_stream, write_stream_reader = anyio.create_memory_object_stream(0)
+
+ session_id = uuid4()
+ session_uri = f"{quote(self._endpoint)}?session_id={session_id.hex}"
+ self._read_stream_writers[session_id] = read_stream_writer
+ verbose_logger.debug(f"Created new session with ID: {session_id}")
+
+ sse_stream_writer: MemoryObjectSendStream[dict[str, Any]]
+ sse_stream_reader: MemoryObjectReceiveStream[dict[str, Any]]
+ sse_stream_writer, sse_stream_reader = anyio.create_memory_object_stream(
+ 0, dict[str, Any]
+ )
+
+ async def sse_writer():
+ verbose_logger.debug("Starting SSE writer")
+ async with sse_stream_writer, write_stream_reader:
+ await sse_stream_writer.send({"event": "endpoint", "data": session_uri})
+ verbose_logger.debug(f"Sent endpoint event: {session_uri}")
+
+ async for message in write_stream_reader:
+ verbose_logger.debug(f"Sending message via SSE: {message}")
+ await sse_stream_writer.send(
+ {
+ "event": "message",
+ "data": message.model_dump_json(
+ by_alias=True, exclude_none=True
+ ),
+ }
+ )
+
+ async with anyio.create_task_group() as tg:
+ response = EventSourceResponse(
+ content=sse_stream_reader, data_sender_callable=sse_writer
+ )
+ verbose_logger.debug("Starting SSE response task")
+ tg.start_soon(response, request.scope, request.receive, request._send)
+
+ verbose_logger.debug("Yielding read and write streams")
+ yield (read_stream, write_stream)
+
+ async def handle_post_message(
+ self, scope: Scope, receive: Receive, send: Send
+ ) -> Response:
+ verbose_logger.debug("Handling POST message")
+ request = Request(scope, receive)
+
+ session_id_param = request.query_params.get("session_id")
+ if session_id_param is None:
+ verbose_logger.warning("Received request without session_id")
+ response = Response("session_id is required", status_code=400)
+ return response
+
+ try:
+ session_id = UUID(hex=session_id_param)
+ verbose_logger.debug(f"Parsed session ID: {session_id}")
+ except ValueError:
+ verbose_logger.warning(f"Received invalid session ID: {session_id_param}")
+ response = Response("Invalid session ID", status_code=400)
+ return response
+
+ writer = self._read_stream_writers.get(session_id)
+ if not writer:
+ verbose_logger.warning(f"Could not find session for ID: {session_id}")
+ response = Response("Could not find session", status_code=404)
+ return response
+
+ json = await request.json()
+ verbose_logger.debug(f"Received JSON: {json}")
+
+ try:
+ message = types.JSONRPCMessage.model_validate(json)
+ verbose_logger.debug(f"Validated client message: {message}")
+ except ValidationError as err:
+ verbose_logger.error(f"Failed to parse message: {err}")
+ response = Response("Could not parse message", status_code=400)
+ await writer.send(err)
+ return response
+
+ verbose_logger.debug(f"Sending message to writer: {message}")
+ response = Response("Accepted", status_code=202)
+ await writer.send(message)
+ return response
diff --git a/litellm/proxy/_experimental/mcp_server/tool_registry.py b/litellm/proxy/_experimental/mcp_server/tool_registry.py
new file mode 100644
index 0000000000..c08b797968
--- /dev/null
+++ b/litellm/proxy/_experimental/mcp_server/tool_registry.py
@@ -0,0 +1,103 @@
+import json
+from typing import Any, Callable, Dict, List, Optional
+
+from litellm._logging import verbose_logger
+from litellm.proxy.types_utils.utils import get_instance_fn
+from litellm.types.mcp_server.tool_registry import MCPTool
+
+
+class MCPToolRegistry:
+ """
+ A registry for managing MCP tools
+ """
+
+ def __init__(self):
+ # Registry to store all registered tools
+ self.tools: Dict[str, MCPTool] = {}
+
+ def register_tool(
+ self,
+ name: str,
+ description: str,
+ input_schema: Dict[str, Any],
+ handler: Callable,
+ ) -> None:
+ """
+ Register a new tool in the registry
+ """
+ self.tools[name] = MCPTool(
+ name=name,
+ description=description,
+ input_schema=input_schema,
+ handler=handler,
+ )
+ verbose_logger.debug(f"Registered tool: {name}")
+
+ def get_tool(self, name: str) -> Optional[MCPTool]:
+ """
+ Get a tool from the registry by name
+ """
+ return self.tools.get(name)
+
+ def list_tools(self) -> List[MCPTool]:
+ """
+ List all registered tools
+ """
+ return list(self.tools.values())
+
+ def load_tools_from_config(
+ self, mcp_tools_config: Optional[Dict[str, Any]] = None
+ ) -> None:
+ """
+ Load and register tools from the proxy config
+
+ Args:
+ mcp_tools_config: The mcp_tools config from the proxy config
+ """
+ if mcp_tools_config is None:
+ raise ValueError(
+ "mcp_tools_config is required, please set `mcp_tools` in your proxy config"
+ )
+
+ for tool_config in mcp_tools_config:
+ if not isinstance(tool_config, dict):
+ raise ValueError("mcp_tools_config must be a list of dictionaries")
+
+ name = tool_config.get("name")
+ description = tool_config.get("description")
+ input_schema = tool_config.get("input_schema", {})
+ handler_name = tool_config.get("handler")
+
+ if not all([name, description, handler_name]):
+ continue
+
+ # Try to resolve the handler
+ # First check if it's a module path (e.g., "module.submodule.function")
+ if handler_name is None:
+ raise ValueError(f"handler is required for tool {name}")
+ handler = get_instance_fn(handler_name)
+
+ if handler is None:
+ verbose_logger.warning(
+ f"Warning: Could not find handler {handler_name} for tool {name}"
+ )
+ continue
+
+ # Register the tool
+ if name is None:
+ raise ValueError(f"name is required for tool {name}")
+ if description is None:
+ raise ValueError(f"description is required for tool {name}")
+
+ self.register_tool(
+ name=name,
+ description=description,
+ input_schema=input_schema,
+ handler=handler,
+ )
+ verbose_logger.debug(
+ "all registered tools: %s", json.dumps(self.tools, indent=4, default=str)
+ )
+
+
+global_mcp_tool_registry = MCPToolRegistry()
diff --git a/litellm/proxy/_experimental/out/_next/static/53gCyCPJv5kOanryQCHap/_buildManifest.js b/litellm/proxy/_experimental/out/_next/static/FPIQgzUY81b7nl8zNun4_/_buildManifest.js
similarity index 100%
rename from litellm/proxy/_experimental/out/_next/static/53gCyCPJv5kOanryQCHap/_buildManifest.js
rename to litellm/proxy/_experimental/out/_next/static/FPIQgzUY81b7nl8zNun4_/_buildManifest.js
diff --git a/litellm/proxy/_experimental/out/_next/static/53gCyCPJv5kOanryQCHap/_ssgManifest.js b/litellm/proxy/_experimental/out/_next/static/FPIQgzUY81b7nl8zNun4_/_ssgManifest.js
similarity index 100%
rename from litellm/proxy/_experimental/out/_next/static/53gCyCPJv5kOanryQCHap/_ssgManifest.js
rename to litellm/proxy/_experimental/out/_next/static/FPIQgzUY81b7nl8zNun4_/_ssgManifest.js
diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/117-883150efc583d711.js b/litellm/proxy/_experimental/out/_next/static/chunks/117-87ec698bfca6820e.js
similarity index 99%
rename from litellm/proxy/_experimental/out/_next/static/chunks/117-883150efc583d711.js
rename to litellm/proxy/_experimental/out/_next/static/chunks/117-87ec698bfca6820e.js
index afb2644d48..31fd397e11 100644
--- a/litellm/proxy/_experimental/out/_next/static/chunks/117-883150efc583d711.js
+++ b/litellm/proxy/_experimental/out/_next/static/chunks/117-87ec698bfca6820e.js
@@ -1,2 +1,2 @@
-(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[117],{65157:function(e,t){"use strict";function n(){return""}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"getDeploymentIdQueryOrEmptyString",{enumerable:!0,get:function(){return n}})},91572:function(){"trimStart"in String.prototype||(String.prototype.trimStart=String.prototype.trimLeft),"trimEnd"in String.prototype||(String.prototype.trimEnd=String.prototype.trimRight),"description"in Symbol.prototype||Object.defineProperty(Symbol.prototype,"description",{configurable:!0,get:function(){var e=/\((.*)\)/.exec(this.toString());return e?e[1]:void 0}}),Array.prototype.flat||(Array.prototype.flat=function(e,t){return t=this.concat.apply([],this),e>1&&t.some(Array.isArray)?t.flat(e-1):t},Array.prototype.flatMap=function(e,t){return this.map(e,t).flat()}),Promise.prototype.finally||(Promise.prototype.finally=function(e){if("function"!=typeof e)return this.then(e,e);var t=this.constructor||Promise;return this.then(function(n){return t.resolve(e()).then(function(){return n})},function(n){return t.resolve(e()).then(function(){throw n})})}),Object.fromEntries||(Object.fromEntries=function(e){return Array.from(e).reduce(function(e,t){return e[t[0]]=t[1],e},{})}),Array.prototype.at||(Array.prototype.at=function(e){var t=Math.trunc(e)||0;if(t<0&&(t+=this.length),!(t<0||t>=this.length))return this[t]}),Object.hasOwn||(Object.hasOwn=function(e,t){if(null==e)throw TypeError("Cannot convert undefined or null to object");return Object.prototype.hasOwnProperty.call(Object(e),t)}),"canParse"in URL||(URL.canParse=function(e,t){try{return new URL(e,t),!0}catch(e){return!1}})},1634:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"addBasePath",{enumerable:!0,get:function(){return u}});let r=n(68498),o=n(33068);function u(e,t){return(0,o.normalizePathTrailingSlash)((0,r.addPathPrefix)(e,"/ui"))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},75266:function(e,t){"use strict";function n(e){var t,n;t=self.__next_s,n=()=>{e()},t&&t.length?t.reduce((e,t)=>{let[n,r]=t;return e.then(()=>new Promise((e,t)=>{let o=document.createElement("script");if(r)for(let e in r)"children"!==e&&o.setAttribute(e,r[e]);n?(o.src=n,o.onload=()=>e(),o.onerror=t):r&&(o.innerHTML=r.children,setTimeout(e)),document.head.appendChild(o)}))},Promise.resolve()).catch(e=>{console.error(e)}).then(()=>{n()}):n()}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"appBootstrap",{enumerable:!0,get:function(){return n}}),window.next={version:"14.2.21",appDir:!0},("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},83079:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"callServer",{enumerable:!0,get:function(){return o}});let r=n(12846);async function o(e,t){let n=(0,r.getServerActionDispatcher)();if(!n)throw Error("Invariant: missing action dispatcher.");return new Promise((r,o)=>{n({actionId:e,actionArgs:t,resolve:r,reject:o})})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},92304:function(e,t,n){"use strict";let r,o;Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"hydrate",{enumerable:!0,get:function(){return x}});let u=n(47043),l=n(53099),a=n(57437);n(91572);let i=u._(n(34040)),c=l._(n(2265)),s=n(6671),f=n(48701),d=u._(n(61404)),p=n(83079),h=n(89721),y=n(2103);n(70647);let _=window.console.error;window.console.error=function(){for(var e=arguments.length,t=Array(e),n=0;n{if((0,h.isNextRouterError)(e.error)){e.preventDefault();return}});let v=document,b=new TextEncoder,g=!1,m=!1,R=null;function P(e){if(0===e[0])r=[];else if(1===e[0]){if(!r)throw Error("Unexpected server data: missing bootstrap script.");o?o.enqueue(b.encode(e[1])):r.push(e[1])}else 2===e[0]&&(R=e[1])}let j=function(){o&&!m&&(o.close(),m=!0,r=void 0),g=!0};"loading"===document.readyState?document.addEventListener("DOMContentLoaded",j,!1):j();let O=self.__next_f=self.__next_f||[];O.forEach(P),O.push=P;let S=new ReadableStream({start(e){r&&(r.forEach(t=>{e.enqueue(b.encode(t))}),g&&!m&&(e.close(),m=!0,r=void 0)),o=e}}),E=(0,s.createFromReadableStream)(S,{callServer:p.callServer});function w(){return(0,c.use)(E)}let T=c.default.StrictMode;function M(e){let{children:t}=e;return t}function x(){let e=(0,y.createMutableActionQueue)(),t=(0,a.jsx)(T,{children:(0,a.jsx)(f.HeadManagerContext.Provider,{value:{appDir:!0},children:(0,a.jsx)(y.ActionQueueContext.Provider,{value:e,children:(0,a.jsx)(M,{children:(0,a.jsx)(w,{})})})})}),n=window.__next_root_layout_missing_tags,r=!!(null==n?void 0:n.length),o={onRecoverableError:d.default};"__next_error__"===document.documentElement.id||r?i.default.createRoot(v,o).render(t):c.default.startTransition(()=>i.default.hydrateRoot(v,t,{...o,formState:R}))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},54278:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),n(19506),(0,n(75266).appBootstrap)(()=>{let{hydrate:e}=n(92304);n(12846),n(4707),e()}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},19506:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),n(65157);{let e=n.u;n.u=function(){for(var t=arguments.length,n=Array(t),r=0;r(l(function(){var e;let t=document.getElementsByName(u)[0];if(null==t?void 0:null==(e=t.shadowRoot)?void 0:e.childNodes[0])return t.shadowRoot.childNodes[0];{let e=document.createElement(u);e.style.cssText="position:absolute";let t=document.createElement("div");return t.ariaLive="assertive",t.id="__next-route-announcer__",t.role="alert",t.style.cssText="position:absolute;border:0;height:1px;margin:-1px;padding:0;width:1px;clip:rect(0 0 0 0);overflow:hidden;white-space:nowrap;word-wrap:normal",e.attachShadow({mode:"open"}).appendChild(t),document.body.appendChild(e),t}}()),()=>{let e=document.getElementsByTagName(u)[0];(null==e?void 0:e.isConnected)&&document.body.removeChild(e)}),[]);let[a,i]=(0,r.useState)(""),c=(0,r.useRef)();return(0,r.useEffect)(()=>{let e="";if(document.title)e=document.title;else{let t=document.querySelector("h1");t&&(e=t.innerText||t.textContent||"")}void 0!==c.current&&c.current!==e&&i(e),c.current=e},[t]),n?(0,o.createPortal)(a,n):null}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},6866:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ACTION:function(){return r},FLIGHT_PARAMETERS:function(){return i},NEXT_DID_POSTPONE_HEADER:function(){return s},NEXT_ROUTER_PREFETCH_HEADER:function(){return u},NEXT_ROUTER_STATE_TREE:function(){return o},NEXT_RSC_UNION_QUERY:function(){return c},NEXT_URL:function(){return l},RSC_CONTENT_TYPE_HEADER:function(){return a},RSC_HEADER:function(){return n}});let n="RSC",r="Next-Action",o="Next-Router-State-Tree",u="Next-Router-Prefetch",l="Next-Url",a="text/x-component",i=[[n],[o],[u]],c="_rsc",s="x-nextjs-postponed";("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},12846:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{createEmptyCacheNode:function(){return C},default:function(){return I},getServerActionDispatcher:function(){return E},urlToUrlWithoutFlightMarker:function(){return T}});let r=n(53099),o=n(57437),u=r._(n(2265)),l=n(61956),a=n(24673),i=n(33456),c=n(79060),s=n(47744),f=n(61060),d=n(82952),p=n(86146),h=n(1634),y=n(6495),_=n(4123),v=n(39320),b=n(38137),g=n(6866),m=n(35076),R=n(11283),P=n(84541),j="undefined"==typeof window,O=j?null:new Map,S=null;function E(){return S}let w={};function T(e){let t=new URL(e,location.origin);if(t.searchParams.delete(g.NEXT_RSC_UNION_QUERY),t.pathname.endsWith(".txt")){let{pathname:e}=t,n=e.endsWith("/index.txt")?10:4;t.pathname=e.slice(0,-n)}return t}function M(e){return e.origin!==window.location.origin}function x(e){let{appRouterState:t,sync:n}=e;return(0,u.useInsertionEffect)(()=>{let{tree:e,pushRef:r,canonicalUrl:o}=t,u={...r.preserveCustomHistoryState?window.history.state:{},__NA:!0,__PRIVATE_NEXTJS_INTERNALS_TREE:e};r.pendingPush&&(0,i.createHrefFromUrl)(new URL(window.location.href))!==o?(r.pendingPush=!1,window.history.pushState(u,"",o)):window.history.replaceState(u,"",o),n(t)},[t,n]),null}function C(){return{lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:null}}function A(e){null==e&&(e={});let t=window.history.state,n=null==t?void 0:t.__NA;n&&(e.__NA=n);let r=null==t?void 0:t.__PRIVATE_NEXTJS_INTERNALS_TREE;return r&&(e.__PRIVATE_NEXTJS_INTERNALS_TREE=r),e}function N(e){let{headCacheNode:t}=e,n=null!==t?t.head:null,r=null!==t?t.prefetchHead:null,o=null!==r?r:n;return(0,u.useDeferredValue)(n,o)}function D(e){let t,{buildId:n,initialHead:r,initialTree:i,urlParts:f,initialSeedData:g,couldBeIntercepted:E,assetPrefix:T,missingSlots:C}=e,D=(0,u.useMemo)(()=>(0,d.createInitialRouterState)({buildId:n,initialSeedData:g,urlParts:f,initialTree:i,initialParallelRoutes:O,location:j?null:window.location,initialHead:r,couldBeIntercepted:E}),[n,g,f,i,r,E]),[I,U,k]=(0,s.useReducerWithReduxDevtools)(D);(0,u.useEffect)(()=>{O=null},[]);let{canonicalUrl:F}=(0,s.useUnwrapState)(I),{searchParams:L,pathname:H}=(0,u.useMemo)(()=>{let e=new URL(F,"undefined"==typeof window?"http://n":window.location.href);return{searchParams:e.searchParams,pathname:(0,R.hasBasePath)(e.pathname)?(0,m.removeBasePath)(e.pathname):e.pathname}},[F]),$=(0,u.useCallback)(e=>{let{previousTree:t,serverResponse:n}=e;(0,u.startTransition)(()=>{U({type:a.ACTION_SERVER_PATCH,previousTree:t,serverResponse:n})})},[U]),G=(0,u.useCallback)((e,t,n)=>{let r=new URL((0,h.addBasePath)(e),location.href);return U({type:a.ACTION_NAVIGATE,url:r,isExternalUrl:M(r),locationSearch:location.search,shouldScroll:null==n||n,navigateType:t})},[U]);S=(0,u.useCallback)(e=>{(0,u.startTransition)(()=>{U({...e,type:a.ACTION_SERVER_ACTION})})},[U]);let z=(0,u.useMemo)(()=>({back:()=>window.history.back(),forward:()=>window.history.forward(),prefetch:(e,t)=>{let n;if(!(0,p.isBot)(window.navigator.userAgent)){try{n=new URL((0,h.addBasePath)(e),window.location.href)}catch(t){throw Error("Cannot prefetch '"+e+"' because it cannot be converted to a URL.")}M(n)||(0,u.startTransition)(()=>{var e;U({type:a.ACTION_PREFETCH,url:n,kind:null!=(e=null==t?void 0:t.kind)?e:a.PrefetchKind.FULL})})}},replace:(e,t)=>{void 0===t&&(t={}),(0,u.startTransition)(()=>{var n;G(e,"replace",null==(n=t.scroll)||n)})},push:(e,t)=>{void 0===t&&(t={}),(0,u.startTransition)(()=>{var n;G(e,"push",null==(n=t.scroll)||n)})},refresh:()=>{(0,u.startTransition)(()=>{U({type:a.ACTION_REFRESH,origin:window.location.origin})})},fastRefresh:()=>{throw Error("fastRefresh can only be used in development mode. Please use refresh instead.")}}),[U,G]);(0,u.useEffect)(()=>{window.next&&(window.next.router=z)},[z]),(0,u.useEffect)(()=>{function e(e){var t;e.persisted&&(null==(t=window.history.state)?void 0:t.__PRIVATE_NEXTJS_INTERNALS_TREE)&&(w.pendingMpaPath=void 0,U({type:a.ACTION_RESTORE,url:new URL(window.location.href),tree:window.history.state.__PRIVATE_NEXTJS_INTERNALS_TREE}))}return window.addEventListener("pageshow",e),()=>{window.removeEventListener("pageshow",e)}},[U]);let{pushRef:B}=(0,s.useUnwrapState)(I);if(B.mpaNavigation){if(w.pendingMpaPath!==F){let e=window.location;B.pendingPush?e.assign(F):e.replace(F),w.pendingMpaPath=F}(0,u.use)(b.unresolvedThenable)}(0,u.useEffect)(()=>{let e=window.history.pushState.bind(window.history),t=window.history.replaceState.bind(window.history),n=e=>{var t;let n=window.location.href,r=null==(t=window.history.state)?void 0:t.__PRIVATE_NEXTJS_INTERNALS_TREE;(0,u.startTransition)(()=>{U({type:a.ACTION_RESTORE,url:new URL(null!=e?e:n,n),tree:r})})};window.history.pushState=function(t,r,o){return(null==t?void 0:t.__NA)||(null==t?void 0:t._N)||(t=A(t),o&&n(o)),e(t,r,o)},window.history.replaceState=function(e,r,o){return(null==e?void 0:e.__NA)||(null==e?void 0:e._N)||(e=A(e),o&&n(o)),t(e,r,o)};let r=e=>{let{state:t}=e;if(t){if(!t.__NA){window.location.reload();return}(0,u.startTransition)(()=>{U({type:a.ACTION_RESTORE,url:new URL(window.location.href),tree:t.__PRIVATE_NEXTJS_INTERNALS_TREE})})}};return window.addEventListener("popstate",r),()=>{window.history.pushState=e,window.history.replaceState=t,window.removeEventListener("popstate",r)}},[U]);let{cache:W,tree:K,nextUrl:V,focusAndScrollRef:Y}=(0,s.useUnwrapState)(I),X=(0,u.useMemo)(()=>(0,v.findHeadInCache)(W,K[1]),[W,K]),q=(0,u.useMemo)(()=>(function e(t,n){for(let r of(void 0===n&&(n={}),Object.values(t[1]))){let t=r[0],o=Array.isArray(t),u=o?t[1]:t;!u||u.startsWith(P.PAGE_SEGMENT_KEY)||(o&&("c"===t[2]||"oc"===t[2])?n[t[0]]=t[1].split("/"):o&&(n[t[0]]=t[1]),n=e(r,n))}return n})(K),[K]);if(null!==X){let[e,n]=X;t=(0,o.jsx)(N,{headCacheNode:e},n)}else t=null;let J=(0,o.jsxs)(_.RedirectBoundary,{children:[t,W.rsc,(0,o.jsx)(y.AppRouterAnnouncer,{tree:K})]});return(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)(x,{appRouterState:(0,s.useUnwrapState)(I),sync:k}),(0,o.jsx)(c.PathParamsContext.Provider,{value:q,children:(0,o.jsx)(c.PathnameContext.Provider,{value:H,children:(0,o.jsx)(c.SearchParamsContext.Provider,{value:L,children:(0,o.jsx)(l.GlobalLayoutRouterContext.Provider,{value:{buildId:n,changeByServerResponse:$,tree:K,focusAndScrollRef:Y,nextUrl:V},children:(0,o.jsx)(l.AppRouterContext.Provider,{value:z,children:(0,o.jsx)(l.LayoutRouterContext.Provider,{value:{childNodes:W.parallelRoutes,tree:K,url:F,loading:W.loading},children:J})})})})})})]})}function I(e){let{globalErrorComponent:t,...n}=e;return(0,o.jsx)(f.ErrorBoundary,{errorComponent:t,children:(0,o.jsx)(D,{...n})})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},96149:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"bailoutToClientRendering",{enumerable:!0,get:function(){return u}});let r=n(18993),o=n(51845);function u(e){let t=o.staticGenerationAsyncStorage.getStore();if((null==t||!t.forceStatic)&&(null==t?void 0:t.isStaticGeneration))throw new r.BailoutToCSRError(e)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},19107:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"ClientPageRoot",{enumerable:!0,get:function(){return u}});let r=n(57437),o=n(54535);function u(e){let{Component:t,props:n}=e;return n.searchParams=(0,o.createDynamicallyTrackedSearchParams)(n.searchParams||{}),(0,r.jsx)(t,{...n})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},61060:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ErrorBoundary:function(){return h},ErrorBoundaryHandler:function(){return f},GlobalError:function(){return d},default:function(){return p}});let r=n(47043),o=n(57437),u=r._(n(2265)),l=n(35475),a=n(89721),i=n(51845),c={error:{fontFamily:'system-ui,"Segoe UI",Roboto,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji"',height:"100vh",textAlign:"center",display:"flex",flexDirection:"column",alignItems:"center",justifyContent:"center"},text:{fontSize:"14px",fontWeight:400,lineHeight:"28px",margin:"0 8px"}};function s(e){let{error:t}=e,n=i.staticGenerationAsyncStorage.getStore();if((null==n?void 0:n.isRevalidate)||(null==n?void 0:n.isStaticGeneration))throw console.error(t),t;return null}class f extends u.default.Component{static getDerivedStateFromError(e){if((0,a.isNextRouterError)(e))throw e;return{error:e}}static getDerivedStateFromProps(e,t){return e.pathname!==t.previousPathname&&t.error?{error:null,previousPathname:e.pathname}:{error:t.error,previousPathname:e.pathname}}render(){return this.state.error?(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)(s,{error:this.state.error}),this.props.errorStyles,this.props.errorScripts,(0,o.jsx)(this.props.errorComponent,{error:this.state.error,reset:this.reset})]}):this.props.children}constructor(e){super(e),this.reset=()=>{this.setState({error:null})},this.state={error:null,previousPathname:this.props.pathname}}}function d(e){let{error:t}=e,n=null==t?void 0:t.digest;return(0,o.jsxs)("html",{id:"__next_error__",children:[(0,o.jsx)("head",{}),(0,o.jsxs)("body",{children:[(0,o.jsx)(s,{error:t}),(0,o.jsx)("div",{style:c.error,children:(0,o.jsxs)("div",{children:[(0,o.jsx)("h2",{style:c.text,children:"Application error: a "+(n?"server":"client")+"-side exception has occurred (see the "+(n?"server logs":"browser console")+" for more information)."}),n?(0,o.jsx)("p",{style:c.text,children:"Digest: "+n}):null]})})]})]})}let p=d;function h(e){let{errorComponent:t,errorStyles:n,errorScripts:r,children:u}=e,a=(0,l.usePathname)();return t?(0,o.jsx)(f,{pathname:a,errorComponent:t,errorStyles:n,errorScripts:r,children:u}):(0,o.jsx)(o.Fragment,{children:u})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},46177:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{DynamicServerError:function(){return r},isDynamicServerError:function(){return o}});let n="DYNAMIC_SERVER_USAGE";class r extends Error{constructor(e){super("Dynamic server usage: "+e),this.description=e,this.digest=n}}function o(e){return"object"==typeof e&&null!==e&&"digest"in e&&"string"==typeof e.digest&&e.digest===n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},89721:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isNextRouterError",{enumerable:!0,get:function(){return u}});let r=n(98200),o=n(88968);function u(e){return e&&e.digest&&((0,o.isRedirectError)(e)||(0,r.isNotFoundError)(e))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4707:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return S}});let r=n(47043),o=n(53099),u=n(57437),l=o._(n(2265)),a=r._(n(54887)),i=n(61956),c=n(44848),s=n(38137),f=n(61060),d=n(76015),p=n(7092),h=n(4123),y=n(80),_=n(73171),v=n(78505),b=n(28077),g=["bottom","height","left","right","top","width","x","y"];function m(e,t){let n=e.getBoundingClientRect();return n.top>=0&&n.top<=t}class R extends l.default.Component{componentDidMount(){this.handlePotentialScroll()}componentDidUpdate(){this.props.focusAndScrollRef.apply&&this.handlePotentialScroll()}render(){return this.props.children}constructor(...e){super(...e),this.handlePotentialScroll=()=>{let{focusAndScrollRef:e,segmentPath:t}=this.props;if(e.apply){var n;if(0!==e.segmentPaths.length&&!e.segmentPaths.some(e=>t.every((t,n)=>(0,d.matchSegment)(t,e[n]))))return;let r=null,o=e.hashFragment;if(o&&(r="top"===o?document.body:null!=(n=document.getElementById(o))?n:document.getElementsByName(o)[0]),r||(r="undefined"==typeof window?null:a.default.findDOMNode(this)),!(r instanceof Element))return;for(;!(r instanceof HTMLElement)||function(e){if(["sticky","fixed"].includes(getComputedStyle(e).position))return!0;let t=e.getBoundingClientRect();return g.every(e=>0===t[e])}(r);){if(null===r.nextElementSibling)return;r=r.nextElementSibling}e.apply=!1,e.hashFragment=null,e.segmentPaths=[],(0,p.handleSmoothScroll)(()=>{if(o){r.scrollIntoView();return}let e=document.documentElement,t=e.clientHeight;!m(r,t)&&(e.scrollTop=0,m(r,t)||r.scrollIntoView())},{dontForceLayout:!0,onlyHashChange:e.onlyHashChange}),e.onlyHashChange=!1,r.focus()}}}}function P(e){let{segmentPath:t,children:n}=e,r=(0,l.useContext)(i.GlobalLayoutRouterContext);if(!r)throw Error("invariant global layout router not mounted");return(0,u.jsx)(R,{segmentPath:t,focusAndScrollRef:r.focusAndScrollRef,children:n})}function j(e){let{parallelRouterKey:t,url:n,childNodes:r,segmentPath:o,tree:a,cacheKey:f}=e,p=(0,l.useContext)(i.GlobalLayoutRouterContext);if(!p)throw Error("invariant global layout router not mounted");let{buildId:h,changeByServerResponse:y,tree:_}=p,v=r.get(f);if(void 0===v){let e={lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:null};v=e,r.set(f,e)}let g=null!==v.prefetchRsc?v.prefetchRsc:v.rsc,m=(0,l.useDeferredValue)(v.rsc,g),R="object"==typeof m&&null!==m&&"function"==typeof m.then?(0,l.use)(m):m;if(!R){let e=v.lazyData;if(null===e){let t=function e(t,n){if(t){let[r,o]=t,u=2===t.length;if((0,d.matchSegment)(n[0],r)&&n[1].hasOwnProperty(o)){if(u){let t=e(void 0,n[1][o]);return[n[0],{...n[1],[o]:[t[0],t[1],t[2],"refetch"]}]}return[n[0],{...n[1],[o]:e(t.slice(2),n[1][o])}]}}return n}(["",...o],_),r=(0,b.hasInterceptionRouteInCurrentTree)(_);v.lazyData=e=(0,c.fetchServerResponse)(new URL(n,location.origin),t,r?p.nextUrl:null,h),v.lazyDataResolved=!1}let t=(0,l.use)(e);v.lazyDataResolved||(setTimeout(()=>{(0,l.startTransition)(()=>{y({previousTree:_,serverResponse:t})})}),v.lazyDataResolved=!0),(0,l.use)(s.unresolvedThenable)}return(0,u.jsx)(i.LayoutRouterContext.Provider,{value:{tree:a[1][t],childNodes:v.parallelRoutes,url:n,loading:v.loading},children:R})}function O(e){let{children:t,hasLoading:n,loading:r,loadingStyles:o,loadingScripts:a}=e;return n?(0,u.jsx)(l.Suspense,{fallback:(0,u.jsxs)(u.Fragment,{children:[o,a,r]}),children:t}):(0,u.jsx)(u.Fragment,{children:t})}function S(e){let{parallelRouterKey:t,segmentPath:n,error:r,errorStyles:o,errorScripts:a,templateStyles:c,templateScripts:s,template:d,notFound:p,notFoundStyles:b}=e,g=(0,l.useContext)(i.LayoutRouterContext);if(!g)throw Error("invariant expected layout router to be mounted");let{childNodes:m,tree:R,url:S,loading:E}=g,w=m.get(t);w||(w=new Map,m.set(t,w));let T=R[1][t][0],M=(0,_.getSegmentValue)(T),x=[T];return(0,u.jsx)(u.Fragment,{children:x.map(e=>{let l=(0,_.getSegmentValue)(e),g=(0,v.createRouterCacheKey)(e);return(0,u.jsxs)(i.TemplateContext.Provider,{value:(0,u.jsx)(P,{segmentPath:n,children:(0,u.jsx)(f.ErrorBoundary,{errorComponent:r,errorStyles:o,errorScripts:a,children:(0,u.jsx)(O,{hasLoading:!!E,loading:null==E?void 0:E[0],loadingStyles:null==E?void 0:E[1],loadingScripts:null==E?void 0:E[2],children:(0,u.jsx)(y.NotFoundBoundary,{notFound:p,notFoundStyles:b,children:(0,u.jsx)(h.RedirectBoundary,{children:(0,u.jsx)(j,{parallelRouterKey:t,url:S,tree:R,childNodes:w,segmentPath:n,cacheKey:g,isActive:M===l})})})})})}),children:[c,s,d]},(0,v.createRouterCacheKey)(e,!0))})})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},76015:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{canSegmentBeOverridden:function(){return u},matchSegment:function(){return o}});let r=n(87417),o=(e,t)=>"string"==typeof e?"string"==typeof t&&e===t:"string"!=typeof t&&e[0]===t[0]&&e[1]===t[1],u=(e,t)=>{var n;return!Array.isArray(e)&&!!Array.isArray(t)&&(null==(n=(0,r.getSegmentParam)(e))?void 0:n.param)===t[0]};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},35475:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ReadonlyURLSearchParams:function(){return i.ReadonlyURLSearchParams},RedirectType:function(){return i.RedirectType},ServerInsertedHTMLContext:function(){return c.ServerInsertedHTMLContext},notFound:function(){return i.notFound},permanentRedirect:function(){return i.permanentRedirect},redirect:function(){return i.redirect},useParams:function(){return p},usePathname:function(){return f},useRouter:function(){return d},useSearchParams:function(){return s},useSelectedLayoutSegment:function(){return y},useSelectedLayoutSegments:function(){return h},useServerInsertedHTML:function(){return c.useServerInsertedHTML}});let r=n(2265),o=n(61956),u=n(79060),l=n(73171),a=n(84541),i=n(52646),c=n(55501);function s(){let e=(0,r.useContext)(u.SearchParamsContext),t=(0,r.useMemo)(()=>e?new i.ReadonlyURLSearchParams(e):null,[e]);if("undefined"==typeof window){let{bailoutToClientRendering:e}=n(96149);e("useSearchParams()")}return t}function f(){return(0,r.useContext)(u.PathnameContext)}function d(){let e=(0,r.useContext)(o.AppRouterContext);if(null===e)throw Error("invariant expected app router to be mounted");return e}function p(){return(0,r.useContext)(u.PathParamsContext)}function h(e){void 0===e&&(e="children");let t=(0,r.useContext)(o.LayoutRouterContext);return t?function e(t,n,r,o){let u;if(void 0===r&&(r=!0),void 0===o&&(o=[]),r)u=t[1][n];else{var i;let e=t[1];u=null!=(i=e.children)?i:Object.values(e)[0]}if(!u)return o;let c=u[0],s=(0,l.getSegmentValue)(c);return!s||s.startsWith(a.PAGE_SEGMENT_KEY)?o:(o.push(s),e(u,n,!1,o))}(t.tree,e):null}function y(e){void 0===e&&(e="children");let t=h(e);if(!t||0===t.length)return null;let n="children"===e?t[0]:t[t.length-1];return n===a.DEFAULT_SEGMENT_KEY?null:n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},52646:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ReadonlyURLSearchParams:function(){return l},RedirectType:function(){return r.RedirectType},notFound:function(){return o.notFound},permanentRedirect:function(){return r.permanentRedirect},redirect:function(){return r.redirect}});let r=n(88968),o=n(98200);class u extends Error{constructor(){super("Method unavailable on `ReadonlyURLSearchParams`. Read more: https://nextjs.org/docs/app/api-reference/functions/use-search-params#updating-searchparams")}}class l extends URLSearchParams{append(){throw new u}delete(){throw new u}set(){throw new u}sort(){throw new u}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},80:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"NotFoundBoundary",{enumerable:!0,get:function(){return s}});let r=n(53099),o=n(57437),u=r._(n(2265)),l=n(35475),a=n(98200);n(31765);let i=n(61956);class c extends u.default.Component{componentDidCatch(){}static getDerivedStateFromError(e){if((0,a.isNotFoundError)(e))return{notFoundTriggered:!0};throw e}static getDerivedStateFromProps(e,t){return e.pathname!==t.previousPathname&&t.notFoundTriggered?{notFoundTriggered:!1,previousPathname:e.pathname}:{notFoundTriggered:t.notFoundTriggered,previousPathname:e.pathname}}render(){return this.state.notFoundTriggered?(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)("meta",{name:"robots",content:"noindex"}),!1,this.props.notFoundStyles,this.props.notFound]}):this.props.children}constructor(e){super(e),this.state={notFoundTriggered:!!e.asNotFound,previousPathname:e.pathname}}}function s(e){let{notFound:t,notFoundStyles:n,asNotFound:r,children:a}=e,s=(0,l.usePathname)(),f=(0,u.useContext)(i.MissingSlotContext);return t?(0,o.jsx)(c,{pathname:s,notFound:t,notFoundStyles:n,asNotFound:r,missingSlots:f,children:a}):(0,o.jsx)(o.Fragment,{children:a})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},98200:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{isNotFoundError:function(){return o},notFound:function(){return r}});let n="NEXT_NOT_FOUND";function r(){let e=Error(n);throw e.digest=n,e}function o(e){return"object"==typeof e&&null!==e&&"digest"in e&&e.digest===n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},29744:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"PromiseQueue",{enumerable:!0,get:function(){return c}});let r=n(2522),o=n(90675);var u=o._("_maxConcurrency"),l=o._("_runningCount"),a=o._("_queue"),i=o._("_processNext");class c{enqueue(e){let t,n;let o=new Promise((e,r)=>{t=e,n=r}),u=async()=>{try{r._(this,l)[l]++;let n=await e();t(n)}catch(e){n(e)}finally{r._(this,l)[l]--,r._(this,i)[i]()}};return r._(this,a)[a].push({promiseFn:o,task:u}),r._(this,i)[i](),o}bump(e){let t=r._(this,a)[a].findIndex(t=>t.promiseFn===e);if(t>-1){let e=r._(this,a)[a].splice(t,1)[0];r._(this,a)[a].unshift(e),r._(this,i)[i](!0)}}constructor(e=5){Object.defineProperty(this,i,{value:s}),Object.defineProperty(this,u,{writable:!0,value:void 0}),Object.defineProperty(this,l,{writable:!0,value:void 0}),Object.defineProperty(this,a,{writable:!0,value:void 0}),r._(this,u)[u]=e,r._(this,l)[l]=0,r._(this,a)[a]=[]}}function s(e){if(void 0===e&&(e=!1),(r._(this,l)[l]0){var t;null==(t=r._(this,a)[a].shift())||t.task()}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4123:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{RedirectBoundary:function(){return s},RedirectErrorBoundary:function(){return c}});let r=n(53099),o=n(57437),u=r._(n(2265)),l=n(35475),a=n(88968);function i(e){let{redirect:t,reset:n,redirectType:r}=e,o=(0,l.useRouter)();return(0,u.useEffect)(()=>{u.default.startTransition(()=>{r===a.RedirectType.push?o.push(t,{}):o.replace(t,{}),n()})},[t,r,n,o]),null}class c extends u.default.Component{static getDerivedStateFromError(e){if((0,a.isRedirectError)(e))return{redirect:(0,a.getURLFromRedirectError)(e),redirectType:(0,a.getRedirectTypeFromError)(e)};throw e}render(){let{redirect:e,redirectType:t}=this.state;return null!==e&&null!==t?(0,o.jsx)(i,{redirect:e,redirectType:t,reset:()=>this.setState({redirect:null})}):this.props.children}constructor(e){super(e),this.state={redirect:null,redirectType:null}}}function s(e){let{children:t}=e,n=(0,l.useRouter)();return(0,o.jsx)(c,{router:n,children:t})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},5001:function(e,t){"use strict";var n,r;Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"RedirectStatusCode",{enumerable:!0,get:function(){return n}}),(r=n||(n={}))[r.SeeOther=303]="SeeOther",r[r.TemporaryRedirect=307]="TemporaryRedirect",r[r.PermanentRedirect=308]="PermanentRedirect",("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},88968:function(e,t,n){"use strict";var r,o;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{RedirectType:function(){return r},getRedirectError:function(){return c},getRedirectStatusCodeFromError:function(){return y},getRedirectTypeFromError:function(){return h},getURLFromRedirectError:function(){return p},isRedirectError:function(){return d},permanentRedirect:function(){return f},redirect:function(){return s}});let u=n(20544),l=n(90295),a=n(5001),i="NEXT_REDIRECT";function c(e,t,n){void 0===n&&(n=a.RedirectStatusCode.TemporaryRedirect);let r=Error(i);r.digest=i+";"+t+";"+e+";"+n+";";let o=u.requestAsyncStorage.getStore();return o&&(r.mutableCookies=o.mutableCookies),r}function s(e,t){void 0===t&&(t="replace");let n=l.actionAsyncStorage.getStore();throw c(e,t,(null==n?void 0:n.isAction)?a.RedirectStatusCode.SeeOther:a.RedirectStatusCode.TemporaryRedirect)}function f(e,t){void 0===t&&(t="replace");let n=l.actionAsyncStorage.getStore();throw c(e,t,(null==n?void 0:n.isAction)?a.RedirectStatusCode.SeeOther:a.RedirectStatusCode.PermanentRedirect)}function d(e){if("object"!=typeof e||null===e||!("digest"in e)||"string"!=typeof e.digest)return!1;let[t,n,r,o]=e.digest.split(";",4),u=Number(o);return t===i&&("replace"===n||"push"===n)&&"string"==typeof r&&!isNaN(u)&&u in a.RedirectStatusCode}function p(e){return d(e)?e.digest.split(";",3)[2]:null}function h(e){if(!d(e))throw Error("Not a redirect error");return e.digest.split(";",2)[1]}function y(e){if(!d(e))throw Error("Not a redirect error");return Number(e.digest.split(";",4)[3])}(o=r||(r={})).push="push",o.replace="replace",("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},36423:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return a}});let r=n(53099),o=n(57437),u=r._(n(2265)),l=n(61956);function a(){let e=(0,u.useContext)(l.TemplateContext);return(0,o.jsx)(o.Fragment,{children:e})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},20544:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{getExpectedRequestStore:function(){return o},requestAsyncStorage:function(){return r.requestAsyncStorage}});let r=n(25575);function o(e){let t=r.requestAsyncStorage.getStore();if(t)return t;throw Error("`"+e+"` was called outside a request scope. Read more: https://nextjs.org/docs/messages/next-dynamic-api-wrong-context")}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},22356:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"applyFlightData",{enumerable:!0,get:function(){return u}});let r=n(27420),o=n(92576);function u(e,t,n,u){let[l,a,i]=n.slice(-3);if(null===a)return!1;if(3===n.length){let n=a[2],o=a[3];t.loading=o,t.rsc=n,t.prefetchRsc=null,(0,r.fillLazyItemsTillLeafWithHead)(t,e,l,a,i,u)}else t.rsc=e.rsc,t.prefetchRsc=e.prefetchRsc,t.parallelRoutes=new Map(e.parallelRoutes),t.loading=e.loading,(0,o.fillCacheWithNewSubTreeData)(t,e,n,u);return!0}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},81935:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"applyRouterStatePatchToTree",{enumerable:!0,get:function(){return function e(t,n,r,a){let i;let[c,s,f,d,p]=n;if(1===t.length){let e=l(n,r,t);return(0,u.addRefreshMarkerToActiveParallelSegments)(e,a),e}let[h,y]=t;if(!(0,o.matchSegment)(h,c))return null;if(2===t.length)i=l(s[y],r,t);else if(null===(i=e(t.slice(2),s[y],r,a)))return null;let _=[t[0],{...s,[y]:i},f,d];return p&&(_[4]=!0),(0,u.addRefreshMarkerToActiveParallelSegments)(_,a),_}}});let r=n(84541),o=n(76015),u=n(50232);function l(e,t,n){let[u,a]=e,[i,c]=t;if(i===r.DEFAULT_SEGMENT_KEY&&u!==r.DEFAULT_SEGMENT_KEY)return e;if((0,o.matchSegment)(u,i)){let t={};for(let e in a)void 0!==c[e]?t[e]=l(a[e],c[e],n):t[e]=a[e];for(let e in c)t[e]||(t[e]=c[e]);let r=[u,t];return e[2]&&(r[2]=e[2]),e[3]&&(r[3]=e[3]),e[4]&&(r[4]=e[4]),r}return t}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},65556:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"clearCacheNodeDataForSegmentPath",{enumerable:!0,get:function(){return function e(t,n,o){let u=o.length<=2,[l,a]=o,i=(0,r.createRouterCacheKey)(a),c=n.parallelRoutes.get(l),s=t.parallelRoutes.get(l);s&&s!==c||(s=new Map(c),t.parallelRoutes.set(l,s));let f=null==c?void 0:c.get(i),d=s.get(i);if(u){d&&d.lazyData&&d!==f||s.set(i,{lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:null});return}if(!d||!f){d||s.set(i,{lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:null});return}return d===f&&(d={lazyData:d.lazyData,rsc:d.rsc,prefetchRsc:d.prefetchRsc,head:d.head,prefetchHead:d.prefetchHead,parallelRoutes:new Map(d.parallelRoutes),lazyDataResolved:d.lazyDataResolved,loading:d.loading},s.set(i,d)),e(d,f,o.slice(2))}}});let r=n(78505);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},5410:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{computeChangedPath:function(){return s},extractPathFromFlightRouterState:function(){return c}});let r=n(91182),o=n(84541),u=n(76015),l=e=>"/"===e[0]?e.slice(1):e,a=e=>"string"==typeof e?"children"===e?"":e:e[1];function i(e){return e.reduce((e,t)=>""===(t=l(t))||(0,o.isGroupSegment)(t)?e:e+"/"+t,"")||"/"}function c(e){var t;let n=Array.isArray(e[0])?e[0][1]:e[0];if(n===o.DEFAULT_SEGMENT_KEY||r.INTERCEPTION_ROUTE_MARKERS.some(e=>n.startsWith(e)))return;if(n.startsWith(o.PAGE_SEGMENT_KEY))return"";let u=[a(n)],l=null!=(t=e[1])?t:{},s=l.children?c(l.children):void 0;if(void 0!==s)u.push(s);else for(let[e,t]of Object.entries(l)){if("children"===e)continue;let n=c(t);void 0!==n&&u.push(n)}return i(u)}function s(e,t){let n=function e(t,n){let[o,l]=t,[i,s]=n,f=a(o),d=a(i);if(r.INTERCEPTION_ROUTE_MARKERS.some(e=>f.startsWith(e)||d.startsWith(e)))return"";if(!(0,u.matchSegment)(o,i)){var p;return null!=(p=c(n))?p:""}for(let t in l)if(s[t]){let n=e(l[t],s[t]);if(null!==n)return a(i)+"/"+n}return null}(e,t);return null==n||"/"===n?n:i(n.split("/"))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},33456:function(e,t){"use strict";function n(e,t){return void 0===t&&(t=!0),e.pathname+e.search+(t?e.hash:"")}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createHrefFromUrl",{enumerable:!0,get:function(){return n}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},82952:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createInitialRouterState",{enumerable:!0,get:function(){return c}});let r=n(33456),o=n(27420),u=n(5410),l=n(60305),a=n(24673),i=n(50232);function c(e){var t;let{buildId:n,initialTree:c,initialSeedData:s,urlParts:f,initialParallelRoutes:d,location:p,initialHead:h,couldBeIntercepted:y}=e,_=f.join("/"),v=!p,b={lazyData:null,rsc:s[2],prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:v?new Map:d,lazyDataResolved:!1,loading:s[3]},g=p?(0,r.createHrefFromUrl)(p):_;(0,i.addRefreshMarkerToActiveParallelSegments)(c,g);let m=new Map;(null===d||0===d.size)&&(0,o.fillLazyItemsTillLeafWithHead)(b,void 0,c,s,h);let R={buildId:n,tree:c,cache:b,prefetchCache:m,pushRef:{pendingPush:!1,mpaNavigation:!1,preserveCustomHistoryState:!0},focusAndScrollRef:{apply:!1,onlyHashChange:!1,hashFragment:null,segmentPaths:[]},canonicalUrl:g,nextUrl:null!=(t=(0,u.extractPathFromFlightRouterState)(c)||(null==p?void 0:p.pathname))?t:null};if(p){let e=new URL(""+p.pathname+p.search,p.origin),t=[["",c,null,null]];(0,l.createPrefetchCacheEntryForInitialLoad)({url:e,kind:a.PrefetchKind.AUTO,data:[t,void 0,!1,y],tree:R.tree,prefetchCache:R.prefetchCache,nextUrl:R.nextUrl})}return R}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},78505:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createRouterCacheKey",{enumerable:!0,get:function(){return o}});let r=n(84541);function o(e,t){return(void 0===t&&(t=!1),Array.isArray(e))?e[0]+"|"+e[1]+"|"+e[2]:t&&e.startsWith(r.PAGE_SEGMENT_KEY)?r.PAGE_SEGMENT_KEY:e}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},44848:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fetchServerResponse",{enumerable:!0,get:function(){return s}});let r=n(6866),o=n(12846),u=n(83079),l=n(24673),a=n(37207),{createFromFetch:i}=n(6671);function c(e){return[(0,o.urlToUrlWithoutFlightMarker)(e).toString(),void 0,!1,!1]}async function s(e,t,n,s,f){let d={[r.RSC_HEADER]:"1",[r.NEXT_ROUTER_STATE_TREE]:encodeURIComponent(JSON.stringify(t))};f===l.PrefetchKind.AUTO&&(d[r.NEXT_ROUTER_PREFETCH_HEADER]="1"),n&&(d[r.NEXT_URL]=n);let p=(0,a.hexHash)([d[r.NEXT_ROUTER_PREFETCH_HEADER]||"0",d[r.NEXT_ROUTER_STATE_TREE],d[r.NEXT_URL]].join(","));try{var h;let t=new URL(e);t.pathname.endsWith("/")?t.pathname+="index.txt":t.pathname+=".txt",t.searchParams.set(r.NEXT_RSC_UNION_QUERY,p);let n=await fetch(t,{credentials:"same-origin",headers:d}),l=(0,o.urlToUrlWithoutFlightMarker)(n.url),a=n.redirected?l:void 0,f=n.headers.get("content-type")||"",y=!!n.headers.get(r.NEXT_DID_POSTPONE_HEADER),_=!!(null==(h=n.headers.get("vary"))?void 0:h.includes(r.NEXT_URL)),v=f===r.RSC_CONTENT_TYPE_HEADER;if(v||(v=f.startsWith("text/plain")),!v||!n.ok)return e.hash&&(l.hash=e.hash),c(l.toString());let[b,g]=await i(Promise.resolve(n),{callServer:u.callServer});if(s!==b)return c(n.url);return[g,a,y,_]}catch(t){return console.error("Failed to fetch RSC payload for "+e+". Falling back to browser navigation.",t),[e.toString(),void 0,!1,!1]}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},92576:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fillCacheWithNewSubTreeData",{enumerable:!0,get:function(){return function e(t,n,l,a){let i=l.length<=5,[c,s]=l,f=(0,u.createRouterCacheKey)(s),d=n.parallelRoutes.get(c);if(!d)return;let p=t.parallelRoutes.get(c);p&&p!==d||(p=new Map(d),t.parallelRoutes.set(c,p));let h=d.get(f),y=p.get(f);if(i){if(!y||!y.lazyData||y===h){let e=l[3];y={lazyData:null,rsc:e[2],prefetchRsc:null,head:null,prefetchHead:null,loading:e[3],parallelRoutes:h?new Map(h.parallelRoutes):new Map,lazyDataResolved:!1},h&&(0,r.invalidateCacheByRouterState)(y,h,l[2]),(0,o.fillLazyItemsTillLeafWithHead)(y,h,l[2],e,l[4],a),p.set(f,y)}return}y&&h&&(y===h&&(y={lazyData:y.lazyData,rsc:y.rsc,prefetchRsc:y.prefetchRsc,head:y.head,prefetchHead:y.prefetchHead,parallelRoutes:new Map(y.parallelRoutes),lazyDataResolved:!1,loading:y.loading},p.set(f,y)),e(y,h,l.slice(2),a))}}});let r=n(94377),o=n(27420),u=n(78505);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},27420:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fillLazyItemsTillLeafWithHead",{enumerable:!0,get:function(){return function e(t,n,u,l,a,i){if(0===Object.keys(u[1]).length){t.head=a;return}for(let c in u[1]){let s;let f=u[1][c],d=f[0],p=(0,r.createRouterCacheKey)(d),h=null!==l&&void 0!==l[1][c]?l[1][c]:null;if(n){let r=n.parallelRoutes.get(c);if(r){let n;let u=(null==i?void 0:i.kind)==="auto"&&i.status===o.PrefetchCacheEntryStatus.reusable,l=new Map(r),s=l.get(p);n=null!==h?{lazyData:null,rsc:h[2],prefetchRsc:null,head:null,prefetchHead:null,loading:h[3],parallelRoutes:new Map(null==s?void 0:s.parallelRoutes),lazyDataResolved:!1}:u&&s?{lazyData:s.lazyData,rsc:s.rsc,prefetchRsc:s.prefetchRsc,head:s.head,prefetchHead:s.prefetchHead,parallelRoutes:new Map(s.parallelRoutes),lazyDataResolved:s.lazyDataResolved,loading:s.loading}:{lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map(null==s?void 0:s.parallelRoutes),lazyDataResolved:!1,loading:null},l.set(p,n),e(n,s,f,h||null,a,i),t.parallelRoutes.set(c,l);continue}}if(null!==h){let e=h[2],t=h[3];s={lazyData:null,rsc:e,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:t}}else s={lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:null};let y=t.parallelRoutes.get(c);y?y.set(p,s):t.parallelRoutes.set(c,new Map([[p,s]])),e(s,void 0,f,h,a,i)}}}});let r=n(78505),o=n(24673);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},44510:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"handleMutable",{enumerable:!0,get:function(){return u}});let r=n(5410);function o(e){return void 0!==e}function u(e,t){var n,u,l;let a=null==(u=t.shouldScroll)||u,i=e.nextUrl;if(o(t.patchedTree)){let n=(0,r.computeChangedPath)(e.tree,t.patchedTree);n?i=n:i||(i=e.canonicalUrl)}return{buildId:e.buildId,canonicalUrl:o(t.canonicalUrl)?t.canonicalUrl===e.canonicalUrl?e.canonicalUrl:t.canonicalUrl:e.canonicalUrl,pushRef:{pendingPush:o(t.pendingPush)?t.pendingPush:e.pushRef.pendingPush,mpaNavigation:o(t.mpaNavigation)?t.mpaNavigation:e.pushRef.mpaNavigation,preserveCustomHistoryState:o(t.preserveCustomHistoryState)?t.preserveCustomHistoryState:e.pushRef.preserveCustomHistoryState},focusAndScrollRef:{apply:!!a&&(!!o(null==t?void 0:t.scrollableSegments)||e.focusAndScrollRef.apply),onlyHashChange:!!t.hashFragment&&e.canonicalUrl.split("#",1)[0]===(null==(n=t.canonicalUrl)?void 0:n.split("#",1)[0]),hashFragment:a?t.hashFragment&&""!==t.hashFragment?decodeURIComponent(t.hashFragment.slice(1)):e.focusAndScrollRef.hashFragment:null,segmentPaths:a?null!=(l=null==t?void 0:t.scrollableSegments)?l:e.focusAndScrollRef.segmentPaths:[]},cache:t.cache?t.cache:e.cache,prefetchCache:t.prefetchCache?t.prefetchCache:e.prefetchCache,tree:o(t.patchedTree)?t.patchedTree:e.tree,nextUrl:i}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},77831:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"handleSegmentMismatch",{enumerable:!0,get:function(){return o}});let r=n(95967);function o(e,t,n){return(0,r.handleExternalUrl)(e,{},e.canonicalUrl,!0)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},77058:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"invalidateCacheBelowFlightSegmentPath",{enumerable:!0,get:function(){return function e(t,n,o){let u=o.length<=2,[l,a]=o,i=(0,r.createRouterCacheKey)(a),c=n.parallelRoutes.get(l);if(!c)return;let s=t.parallelRoutes.get(l);if(s&&s!==c||(s=new Map(c),t.parallelRoutes.set(l,s)),u){s.delete(i);return}let f=c.get(i),d=s.get(i);d&&f&&(d===f&&(d={lazyData:d.lazyData,rsc:d.rsc,prefetchRsc:d.prefetchRsc,head:d.head,prefetchHead:d.prefetchHead,parallelRoutes:new Map(d.parallelRoutes),lazyDataResolved:d.lazyDataResolved},s.set(i,d)),e(d,f,o.slice(2)))}}});let r=n(78505);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},94377:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"invalidateCacheByRouterState",{enumerable:!0,get:function(){return o}});let r=n(78505);function o(e,t,n){for(let o in n[1]){let u=n[1][o][0],l=(0,r.createRouterCacheKey)(u),a=t.parallelRoutes.get(o);if(a){let t=new Map(a);t.delete(l),e.parallelRoutes.set(o,t)}}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},63237:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isNavigatingToNewRootLayout",{enumerable:!0,get:function(){return function e(t,n){let r=t[0],o=n[0];if(Array.isArray(r)&&Array.isArray(o)){if(r[0]!==o[0]||r[2]!==o[2])return!0}else if(r!==o)return!0;if(t[4])return!n[4];if(n[4])return!0;let u=Object.values(t[1])[0],l=Object.values(n[1])[0];return!u||!l||e(u,l)}}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},56118:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{abortTask:function(){return c},listenForDynamicRequest:function(){return a},updateCacheNodeOnNavigation:function(){return function e(t,n,a,c,s){let f=n[1],d=a[1],p=c[1],h=t.parallelRoutes,y=new Map(h),_={},v=null;for(let t in d){let n;let a=d[t],c=f[t],b=h.get(t),g=p[t],m=a[0],R=(0,u.createRouterCacheKey)(m),P=void 0!==c?c[0]:void 0,j=void 0!==b?b.get(R):void 0;if(null!==(n=m===r.PAGE_SEGMENT_KEY?l(a,void 0!==g?g:null,s):m===r.DEFAULT_SEGMENT_KEY?void 0!==c?{route:c,node:null,children:null}:l(a,void 0!==g?g:null,s):void 0!==P&&(0,o.matchSegment)(m,P)&&void 0!==j&&void 0!==c?null!=g?e(j,c,a,g,s):function(e){let t=i(e,null,null);return{route:e,node:t,children:null}}(a):l(a,void 0!==g?g:null,s))){null===v&&(v=new Map),v.set(t,n);let e=n.node;if(null!==e){let n=new Map(b);n.set(R,e),y.set(t,n)}_[t]=n.route}else _[t]=a}if(null===v)return null;let b={lazyData:null,rsc:t.rsc,prefetchRsc:t.prefetchRsc,head:t.head,prefetchHead:t.prefetchHead,loading:t.loading,parallelRoutes:y,lazyDataResolved:!1};return{route:function(e,t){let n=[e[0],t];return 2 in e&&(n[2]=e[2]),3 in e&&(n[3]=e[3]),4 in e&&(n[4]=e[4]),n}(a,_),node:b,children:v}}},updateCacheNodeOnPopstateRestoration:function(){return function e(t,n){let r=n[1],o=t.parallelRoutes,l=new Map(o);for(let t in r){let n=r[t],a=n[0],i=(0,u.createRouterCacheKey)(a),c=o.get(t);if(void 0!==c){let r=c.get(i);if(void 0!==r){let o=e(r,n),u=new Map(c);u.set(i,o),l.set(t,u)}}}let a=t.rsc,i=d(a)&&"pending"===a.status;return{lazyData:null,rsc:a,head:t.head,prefetchHead:i?t.prefetchHead:null,prefetchRsc:i?t.prefetchRsc:null,loading:i?t.loading:null,parallelRoutes:l,lazyDataResolved:!1}}}});let r=n(84541),o=n(76015),u=n(78505);function l(e,t,n){let r=i(e,t,n);return{route:e,node:r,children:null}}function a(e,t){t.then(t=>{for(let n of t[0]){let t=n.slice(0,-3),r=n[n.length-3],l=n[n.length-2],a=n[n.length-1];"string"!=typeof t&&function(e,t,n,r,l){let a=e;for(let e=0;e{c(e,t)})}function i(e,t,n){let r=e[1],o=null!==t?t[1]:null,l=new Map;for(let e in r){let t=r[e],a=null!==o?o[e]:null,c=t[0],s=(0,u.createRouterCacheKey)(c),f=i(t,void 0===a?null:a,n),d=new Map;d.set(s,f),l.set(e,d)}let a=0===l.size,c=null!==t?t[2]:null,s=null!==t?t[3]:null;return{lazyData:null,parallelRoutes:l,prefetchRsc:void 0!==c?c:null,prefetchHead:a?n:null,loading:void 0!==s?s:null,rsc:p(),head:a?p():null,lazyDataResolved:!1}}function c(e,t){let n=e.node;if(null===n)return;let r=e.children;if(null===r)s(e.route,n,t);else for(let e of r.values())c(e,t);e.node=null}function s(e,t,n){let r=e[1],o=t.parallelRoutes;for(let e in r){let t=r[e],l=o.get(e);if(void 0===l)continue;let a=t[0],i=(0,u.createRouterCacheKey)(a),c=l.get(i);void 0!==c&&s(t,c,n)}let l=t.rsc;d(l)&&(null===n?l.resolve(null):l.reject(n));let a=t.head;d(a)&&a.resolve(null)}let f=Symbol();function d(e){return e&&e.tag===f}function p(){let e,t;let n=new Promise((n,r)=>{e=n,t=r});return n.status="pending",n.resolve=t=>{"pending"===n.status&&(n.status="fulfilled",n.value=t,e(t))},n.reject=e=>{"pending"===n.status&&(n.status="rejected",n.reason=e,t(e))},n.tag=f,n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},60305:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{createPrefetchCacheEntryForInitialLoad:function(){return c},getOrCreatePrefetchCacheEntry:function(){return i},prunePrefetchCache:function(){return f}});let r=n(33456),o=n(44848),u=n(24673),l=n(24819);function a(e,t){let n=(0,r.createHrefFromUrl)(e,!1);return t?t+"%"+n:n}function i(e){let t,{url:n,nextUrl:r,tree:o,buildId:l,prefetchCache:i,kind:c}=e,f=a(n,r),d=i.get(f);if(d)t=d;else{let e=a(n),r=i.get(e);r&&(t=r)}return t?(t.status=h(t),t.kind!==u.PrefetchKind.FULL&&c===u.PrefetchKind.FULL)?s({tree:o,url:n,buildId:l,nextUrl:r,prefetchCache:i,kind:null!=c?c:u.PrefetchKind.TEMPORARY}):(c&&t.kind===u.PrefetchKind.TEMPORARY&&(t.kind=c),t):s({tree:o,url:n,buildId:l,nextUrl:r,prefetchCache:i,kind:c||u.PrefetchKind.TEMPORARY})}function c(e){let{nextUrl:t,tree:n,prefetchCache:r,url:o,kind:l,data:i}=e,[,,,c]=i,s=c?a(o,t):a(o),f={treeAtTimeOfPrefetch:n,data:Promise.resolve(i),kind:l,prefetchTime:Date.now(),lastUsedTime:Date.now(),key:s,status:u.PrefetchCacheEntryStatus.fresh};return r.set(s,f),f}function s(e){let{url:t,kind:n,tree:r,nextUrl:i,buildId:c,prefetchCache:s}=e,f=a(t),d=l.prefetchQueue.enqueue(()=>(0,o.fetchServerResponse)(t,r,i,c,n).then(e=>{let[,,,n]=e;return n&&function(e){let{url:t,nextUrl:n,prefetchCache:r}=e,o=a(t),u=r.get(o);if(!u)return;let l=a(t,n);r.set(l,u),r.delete(o)}({url:t,nextUrl:i,prefetchCache:s}),e})),p={treeAtTimeOfPrefetch:r,data:d,kind:n,prefetchTime:Date.now(),lastUsedTime:null,key:f,status:u.PrefetchCacheEntryStatus.fresh};return s.set(f,p),p}function f(e){for(let[t,n]of e)h(n)===u.PrefetchCacheEntryStatus.expired&&e.delete(t)}let d=1e3*Number("30"),p=1e3*Number("300");function h(e){let{kind:t,prefetchTime:n,lastUsedTime:r}=e;return Date.now()<(null!=r?r:n)+d?r?u.PrefetchCacheEntryStatus.reusable:u.PrefetchCacheEntryStatus.fresh:"auto"===t&&Date.now(){let[n,f]=t,h=!1;if(S.lastUsedTime||(S.lastUsedTime=Date.now(),h=!0),"string"==typeof n)return _(e,R,n,O);if(document.getElementById("__next-page-redirect"))return _(e,R,j,O);let b=e.tree,g=e.cache,w=[];for(let t of n){let n=t.slice(0,-4),r=t.slice(-3)[0],c=["",...n],f=(0,u.applyRouterStatePatchToTree)(c,b,r,j);if(null===f&&(f=(0,u.applyRouterStatePatchToTree)(c,E,r,j)),null!==f){if((0,a.isNavigatingToNewRootLayout)(b,f))return _(e,R,j,O);let u=(0,d.createEmptyCacheNode)(),m=!1;for(let e of(S.status!==i.PrefetchCacheEntryStatus.stale||h?m=(0,s.applyFlightData)(g,u,t,S):(m=function(e,t,n,r){let o=!1;for(let u of(e.rsc=t.rsc,e.prefetchRsc=t.prefetchRsc,e.loading=t.loading,e.parallelRoutes=new Map(t.parallelRoutes),v(r).map(e=>[...n,...e])))(0,y.clearCacheNodeDataForSegmentPath)(e,t,u),o=!0;return o}(u,g,n,r),S.lastUsedTime=Date.now()),(0,l.shouldHardNavigate)(c,b)?(u.rsc=g.rsc,u.prefetchRsc=g.prefetchRsc,(0,o.invalidateCacheBelowFlightSegmentPath)(u,g,n),R.cache=u):m&&(R.cache=u,g=u),b=f,v(r))){let t=[...n,...e];t[t.length-1]!==p.DEFAULT_SEGMENT_KEY&&w.push(t)}}}return R.patchedTree=b,R.canonicalUrl=f?(0,r.createHrefFromUrl)(f):j,R.pendingPush=O,R.scrollableSegments=w,R.hashFragment=P,R.shouldScroll=m,(0,c.handleMutable)(e,R)},()=>e)};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},24819:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{prefetchQueue:function(){return l},prefetchReducer:function(){return a}});let r=n(6866),o=n(29744),u=n(60305),l=new o.PromiseQueue(5);function a(e,t){(0,u.prunePrefetchCache)(e.prefetchCache);let{url:n}=t;return n.searchParams.delete(r.NEXT_RSC_UNION_QUERY),(0,u.getOrCreatePrefetchCacheEntry)({url:n,nextUrl:e.nextUrl,prefetchCache:e.prefetchCache,kind:t.kind,tree:e.tree,buildId:e.buildId}),e}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},99601:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"refreshReducer",{enumerable:!0,get:function(){return h}});let r=n(44848),o=n(33456),u=n(81935),l=n(63237),a=n(95967),i=n(44510),c=n(27420),s=n(12846),f=n(77831),d=n(28077),p=n(50232);function h(e,t){let{origin:n}=t,h={},y=e.canonicalUrl,_=e.tree;h.preserveCustomHistoryState=!1;let v=(0,s.createEmptyCacheNode)(),b=(0,d.hasInterceptionRouteInCurrentTree)(e.tree);return v.lazyData=(0,r.fetchServerResponse)(new URL(y,n),[_[0],_[1],_[2],"refetch"],b?e.nextUrl:null,e.buildId),v.lazyData.then(async n=>{let[r,s]=n;if("string"==typeof r)return(0,a.handleExternalUrl)(e,h,r,e.pushRef.pendingPush);for(let n of(v.lazyData=null,r)){if(3!==n.length)return console.log("REFRESH FAILED"),e;let[r]=n,i=(0,u.applyRouterStatePatchToTree)([""],_,r,e.canonicalUrl);if(null===i)return(0,f.handleSegmentMismatch)(e,t,r);if((0,l.isNavigatingToNewRootLayout)(_,i))return(0,a.handleExternalUrl)(e,h,y,e.pushRef.pendingPush);let d=s?(0,o.createHrefFromUrl)(s):void 0;s&&(h.canonicalUrl=d);let[g,m]=n.slice(-2);if(null!==g){let e=g[2];v.rsc=e,v.prefetchRsc=null,(0,c.fillLazyItemsTillLeafWithHead)(v,void 0,r,g,m),h.prefetchCache=new Map}await (0,p.refreshInactiveParallelSegments)({state:e,updatedTree:i,updatedCache:v,includeNextUrl:b,canonicalUrl:h.canonicalUrl||e.canonicalUrl}),h.cache=v,h.patchedTree=i,h.canonicalUrl=y,_=i}return(0,i.handleMutable)(e,h)},()=>e)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},77784:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"restoreReducer",{enumerable:!0,get:function(){return u}});let r=n(33456),o=n(5410);function u(e,t){var n;let{url:u,tree:l}=t,a=(0,r.createHrefFromUrl)(u),i=l||e.tree,c=e.cache;return{buildId:e.buildId,canonicalUrl:a,pushRef:{pendingPush:!1,mpaNavigation:!1,preserveCustomHistoryState:!0},focusAndScrollRef:e.focusAndScrollRef,cache:c,prefetchCache:e.prefetchCache,tree:i,nextUrl:null!=(n=(0,o.extractPathFromFlightRouterState)(i))?n:u.pathname}}n(56118),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},13722:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"serverActionReducer",{enumerable:!0,get:function(){return g}});let r=n(83079),o=n(6866),u=n(1634),l=n(33456),a=n(95967),i=n(81935),c=n(63237),s=n(44510),f=n(27420),d=n(12846),p=n(28077),h=n(77831),y=n(50232),{createFromFetch:_,encodeReply:v}=n(6671);async function b(e,t,n){let l,{actionId:a,actionArgs:i}=n,c=await v(i),s=await fetch("",{method:"POST",headers:{Accept:o.RSC_CONTENT_TYPE_HEADER,[o.ACTION]:a,[o.NEXT_ROUTER_STATE_TREE]:encodeURIComponent(JSON.stringify(e.tree)),...t?{[o.NEXT_URL]:t}:{}},body:c}),f=s.headers.get("x-action-redirect");try{let e=JSON.parse(s.headers.get("x-action-revalidated")||"[[],0,0]");l={paths:e[0]||[],tag:!!e[1],cookie:e[2]}}catch(e){l={paths:[],tag:!1,cookie:!1}}let d=f?new URL((0,u.addBasePath)(f),new URL(e.canonicalUrl,window.location.href)):void 0;if(s.headers.get("content-type")===o.RSC_CONTENT_TYPE_HEADER){let e=await _(Promise.resolve(s),{callServer:r.callServer});if(f){let[,t]=null!=e?e:[];return{actionFlightData:t,redirectLocation:d,revalidatedParts:l}}let[t,[,n]]=null!=e?e:[];return{actionResult:t,actionFlightData:n,redirectLocation:d,revalidatedParts:l}}return{redirectLocation:d,revalidatedParts:l}}function g(e,t){let{resolve:n,reject:r}=t,o={},u=e.canonicalUrl,_=e.tree;o.preserveCustomHistoryState=!1;let v=e.nextUrl&&(0,p.hasInterceptionRouteInCurrentTree)(e.tree)?e.nextUrl:null;return o.inFlightServerAction=b(e,v,t),o.inFlightServerAction.then(async r=>{let{actionResult:p,actionFlightData:b,redirectLocation:g}=r;if(g&&(e.pushRef.pendingPush=!0,o.pendingPush=!0),!b)return(n(p),g)?(0,a.handleExternalUrl)(e,o,g.href,e.pushRef.pendingPush):e;if("string"==typeof b)return(0,a.handleExternalUrl)(e,o,b,e.pushRef.pendingPush);if(o.inFlightServerAction=null,g){let e=(0,l.createHrefFromUrl)(g,!1);o.canonicalUrl=e}for(let n of b){if(3!==n.length)return console.log("SERVER ACTION APPLY FAILED"),e;let[r]=n,s=(0,i.applyRouterStatePatchToTree)([""],_,r,g?(0,l.createHrefFromUrl)(g):e.canonicalUrl);if(null===s)return(0,h.handleSegmentMismatch)(e,t,r);if((0,c.isNavigatingToNewRootLayout)(_,s))return(0,a.handleExternalUrl)(e,o,u,e.pushRef.pendingPush);let[p,b]=n.slice(-2),m=null!==p?p[2]:null;if(null!==m){let t=(0,d.createEmptyCacheNode)();t.rsc=m,t.prefetchRsc=null,(0,f.fillLazyItemsTillLeafWithHead)(t,void 0,r,p,b),await (0,y.refreshInactiveParallelSegments)({state:e,updatedTree:s,updatedCache:t,includeNextUrl:!!v,canonicalUrl:o.canonicalUrl||e.canonicalUrl}),o.cache=t,o.prefetchCache=new Map}o.patchedTree=s,_=s}return n(p),(0,s.handleMutable)(e,o)},t=>(r(t),e))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},68448:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"serverPatchReducer",{enumerable:!0,get:function(){return f}});let r=n(33456),o=n(81935),u=n(63237),l=n(95967),a=n(22356),i=n(44510),c=n(12846),s=n(77831);function f(e,t){let{serverResponse:n}=t,[f,d]=n,p={};if(p.preserveCustomHistoryState=!1,"string"==typeof f)return(0,l.handleExternalUrl)(e,p,f,e.pushRef.pendingPush);let h=e.tree,y=e.cache;for(let n of f){let i=n.slice(0,-4),[f]=n.slice(-3,-2),_=(0,o.applyRouterStatePatchToTree)(["",...i],h,f,e.canonicalUrl);if(null===_)return(0,s.handleSegmentMismatch)(e,t,f);if((0,u.isNavigatingToNewRootLayout)(h,_))return(0,l.handleExternalUrl)(e,p,e.canonicalUrl,e.pushRef.pendingPush);let v=d?(0,r.createHrefFromUrl)(d):void 0;v&&(p.canonicalUrl=v);let b=(0,c.createEmptyCacheNode)();(0,a.applyFlightData)(y,b,n),p.patchedTree=_,p.cache=b,y=b,h=_}return(0,i.handleMutable)(e,p)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},50232:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{addRefreshMarkerToActiveParallelSegments:function(){return function e(t,n){let[r,o,,l]=t;for(let a in r.includes(u.PAGE_SEGMENT_KEY)&&"refresh"!==l&&(t[2]=n,t[3]="refresh"),o)e(o[a],n)}},refreshInactiveParallelSegments:function(){return l}});let r=n(22356),o=n(44848),u=n(84541);async function l(e){let t=new Set;await a({...e,rootTree:e.updatedTree,fetchedSegments:t})}async function a(e){let{state:t,updatedTree:n,updatedCache:u,includeNextUrl:l,fetchedSegments:i,rootTree:c=n,canonicalUrl:s}=e,[,f,d,p]=n,h=[];if(d&&d!==s&&"refresh"===p&&!i.has(d)){i.add(d);let e=(0,o.fetchServerResponse)(new URL(d,location.origin),[c[0],c[1],c[2],"refetch"],l?t.nextUrl:null,t.buildId).then(e=>{let t=e[0];if("string"!=typeof t)for(let e of t)(0,r.applyFlightData)(u,u,e)});h.push(e)}for(let e in f){let n=a({state:t,updatedTree:f[e],updatedCache:u,includeNextUrl:l,fetchedSegments:i,rootTree:c,canonicalUrl:s});h.push(n)}await Promise.all(h)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},24673:function(e,t){"use strict";var n,r,o,u;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ACTION_FAST_REFRESH:function(){return f},ACTION_NAVIGATE:function(){return a},ACTION_PREFETCH:function(){return s},ACTION_REFRESH:function(){return l},ACTION_RESTORE:function(){return i},ACTION_SERVER_ACTION:function(){return d},ACTION_SERVER_PATCH:function(){return c},PrefetchCacheEntryStatus:function(){return r},PrefetchKind:function(){return n},isThenable:function(){return p}});let l="refresh",a="navigate",i="restore",c="server-patch",s="prefetch",f="fast-refresh",d="server-action";function p(e){return e&&("object"==typeof e||"function"==typeof e)&&"function"==typeof e.then}(o=n||(n={})).AUTO="auto",o.FULL="full",o.TEMPORARY="temporary",(u=r||(r={})).fresh="fresh",u.reusable="reusable",u.expired="expired",u.stale="stale",("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},91450:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"reducer",{enumerable:!0,get:function(){return f}});let r=n(24673),o=n(95967),u=n(68448),l=n(77784),a=n(99601),i=n(24819),c=n(44529),s=n(13722),f="undefined"==typeof window?function(e,t){return e}:function(e,t){switch(t.type){case r.ACTION_NAVIGATE:return(0,o.navigateReducer)(e,t);case r.ACTION_SERVER_PATCH:return(0,u.serverPatchReducer)(e,t);case r.ACTION_RESTORE:return(0,l.restoreReducer)(e,t);case r.ACTION_REFRESH:return(0,a.refreshReducer)(e,t);case r.ACTION_FAST_REFRESH:return(0,c.fastRefreshReducer)(e,t);case r.ACTION_PREFETCH:return(0,i.prefetchReducer)(e,t);case r.ACTION_SERVER_ACTION:return(0,s.serverActionReducer)(e,t);default:throw Error("Unknown action")}};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},53728:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"shouldHardNavigate",{enumerable:!0,get:function(){return function e(t,n){let[o,u]=n,[l,a]=t;return(0,r.matchSegment)(l,o)?!(t.length<=2)&&e(t.slice(2),u[a]):!!Array.isArray(l)}}});let r=n(76015);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},54535:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{createDynamicallyTrackedSearchParams:function(){return a},createUntrackedSearchParams:function(){return l}});let r=n(51845),o=n(86999),u=n(30650);function l(e){let t=r.staticGenerationAsyncStorage.getStore();return t&&t.forceStatic?{}:e}function a(e){let t=r.staticGenerationAsyncStorage.getStore();return t?t.forceStatic?{}:t.isStaticGeneration||t.dynamicShouldError?new Proxy({},{get:(e,n,r)=>("string"==typeof n&&(0,o.trackDynamicDataAccessed)(t,"searchParams."+n),u.ReflectAdapter.get(e,n,r)),has:(e,n)=>("string"==typeof n&&(0,o.trackDynamicDataAccessed)(t,"searchParams."+n),Reflect.has(e,n)),ownKeys:e=>((0,o.trackDynamicDataAccessed)(t,"searchParams"),Reflect.ownKeys(e))}):e:e}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},51845:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"staticGenerationAsyncStorage",{enumerable:!0,get:function(){return r.staticGenerationAsyncStorage}});let r=n(20030);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},36864:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{StaticGenBailoutError:function(){return r},isStaticGenBailoutError:function(){return o}});let n="NEXT_STATIC_GEN_BAILOUT";class r extends Error{constructor(...e){super(...e),this.code=n}}function o(e){return"object"==typeof e&&null!==e&&"code"in e&&e.code===n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},38137:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"unresolvedThenable",{enumerable:!0,get:function(){return n}});let n={then:()=>{}};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},47744:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{useReducerWithReduxDevtools:function(){return i},useUnwrapState:function(){return a}});let r=n(53099)._(n(2265)),o=n(24673),u=n(2103);function l(e){if(e instanceof Map){let t={};for(let[n,r]of e.entries()){if("function"==typeof r){t[n]="fn()";continue}if("object"==typeof r&&null!==r){if(r.$$typeof){t[n]=r.$$typeof.toString();continue}if(r._bundlerConfig){t[n]="FlightData";continue}}t[n]=l(r)}return t}if("object"==typeof e&&null!==e){let t={};for(let n in e){let r=e[n];if("function"==typeof r){t[n]="fn()";continue}if("object"==typeof r&&null!==r){if(r.$$typeof){t[n]=r.$$typeof.toString();continue}if(r.hasOwnProperty("_bundlerConfig")){t[n]="FlightData";continue}}t[n]=l(r)}return t}return Array.isArray(e)?e.map(l):e}function a(e){return(0,o.isThenable)(e)?(0,r.use)(e):e}let i="undefined"!=typeof window?function(e){let[t,n]=r.default.useState(e),o=(0,r.useContext)(u.ActionQueueContext);if(!o)throw Error("Invariant: Missing ActionQueueContext");let a=(0,r.useRef)(),i=(0,r.useRef)();return(0,r.useEffect)(()=>{if(!a.current&&!1!==i.current){if(void 0===i.current&&void 0===window.__REDUX_DEVTOOLS_EXTENSION__){i.current=!1;return}return a.current=window.__REDUX_DEVTOOLS_EXTENSION__.connect({instanceId:8e3,name:"next-router"}),a.current&&(a.current.init(l(e)),o&&(o.devToolsInstance=a.current)),()=>{a.current=void 0}}},[e,o]),[t,(0,r.useCallback)(t=>{o.state||(o.state=e),o.dispatch(t,n)},[o,e]),(0,r.useCallback)(e=>{a.current&&a.current.send({type:"RENDER_SYNC"},l(e))},[])]}:function(e){return[e,()=>{},()=>{}]};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},11283:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"hasBasePath",{enumerable:!0,get:function(){return o}});let r=n(10580);function o(e){return(0,r.pathHasPrefix)(e,"/ui")}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},33068:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"normalizePathTrailingSlash",{enumerable:!0,get:function(){return u}});let r=n(26674),o=n(63381),u=e=>{if(!e.startsWith("/"))return e;let{pathname:t,query:n,hash:u}=(0,o.parsePath)(e);return""+(0,r.removeTrailingSlash)(t)+n+u};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},61404:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return o}});let r=n(18993);function o(e){let t="function"==typeof reportError?reportError:e=>{window.console.error(e)};(0,r.isBailoutToCSRError)(e)||t(e)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},35076:function(e,t,n){"use strict";function r(e){return(e=e.slice(3)).startsWith("/")||(e="/"+e),e}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"removeBasePath",{enumerable:!0,get:function(){return r}}),n(11283),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},12010:function(e,t){"use strict";function n(e,t){var n=e.length;for(e.push(t);0>>1,o=e[r];if(0>>1;ru(i,n))cu(s,i)?(e[r]=s,e[c]=n,r=c):(e[r]=i,e[a]=n,r=a);else if(cu(s,n))e[r]=s,e[c]=n,r=c;else break}}return t}function u(e,t){var n=e.sortIndex-t.sortIndex;return 0!==n?n:e.id-t.id}if(t.unstable_now=void 0,"object"==typeof performance&&"function"==typeof performance.now){var l,a=performance;t.unstable_now=function(){return a.now()}}else{var i=Date,c=i.now();t.unstable_now=function(){return i.now()-c}}var s=[],f=[],d=1,p=null,h=3,y=!1,_=!1,v=!1,b="function"==typeof setTimeout?setTimeout:null,g="function"==typeof clearTimeout?clearTimeout:null,m="undefined"!=typeof setImmediate?setImmediate:null;function R(e){for(var t=r(f);null!==t;){if(null===t.callback)o(f);else if(t.startTime<=e)o(f),t.sortIndex=t.expirationTime,n(s,t);else break;t=r(f)}}function P(e){if(v=!1,R(e),!_){if(null!==r(s))_=!0,C();else{var t=r(f);null!==t&&A(P,t.startTime-e)}}}"undefined"!=typeof navigator&&void 0!==navigator.scheduling&&void 0!==navigator.scheduling.isInputPending&&navigator.scheduling.isInputPending.bind(navigator.scheduling);var j=!1,O=-1,S=5,E=-1;function w(){return!(t.unstable_now()-Ee&&w());){var a=p.callback;if("function"==typeof a){p.callback=null,h=p.priorityLevel;var i=a(p.expirationTime<=e);if(e=t.unstable_now(),"function"==typeof i){p.callback=i,R(e),n=!0;break t}p===r(s)&&o(s),R(e)}else o(s);p=r(s)}if(null!==p)n=!0;else{var c=r(f);null!==c&&A(P,c.startTime-e),n=!1}}break e}finally{p=null,h=u,y=!1}n=void 0}}finally{n?l():j=!1}}}if("function"==typeof m)l=function(){m(T)};else if("undefined"!=typeof MessageChannel){var M=new MessageChannel,x=M.port2;M.port1.onmessage=T,l=function(){x.postMessage(null)}}else l=function(){b(T,0)};function C(){j||(j=!0,l())}function A(e,n){O=b(function(){e(t.unstable_now())},n)}t.unstable_IdlePriority=5,t.unstable_ImmediatePriority=1,t.unstable_LowPriority=4,t.unstable_NormalPriority=3,t.unstable_Profiling=null,t.unstable_UserBlockingPriority=2,t.unstable_cancelCallback=function(e){e.callback=null},t.unstable_continueExecution=function(){_||y||(_=!0,C())},t.unstable_forceFrameRate=function(e){0>e||125l?(e.sortIndex=u,n(f,e),null===r(s)&&e===r(f)&&(v?(g(O),O=-1):v=!0,A(P,u-l))):(e.sortIndex=a,n(s,e),_||y||(_=!0,C())),e},t.unstable_shouldYield=w,t.unstable_wrapCallback=function(e){var t=h;return function(){var n=h;h=t;try{return e.apply(this,arguments)}finally{h=n}}}},71767:function(e,t,n){"use strict";e.exports=n(12010)},60934:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{getPathname:function(){return r},isFullStringUrl:function(){return o},parseUrl:function(){return u}});let n="http://n";function r(e){return new URL(e,n).pathname}function o(e){return/https?:\/\//.test(e)}function u(e){let t;try{t=new URL(e,n)}catch{}return t}},86999:function(e,t,n){"use strict";var r;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{Postpone:function(){return d},createPostponedAbortSignal:function(){return b},createPrerenderState:function(){return c},formatDynamicAPIAccesses:function(){return _},markCurrentScopeAsDynamic:function(){return s},trackDynamicDataAccessed:function(){return f},trackDynamicFetch:function(){return p},usedDynamicAPIs:function(){return y}});let o=(r=n(2265))&&r.__esModule?r:{default:r},u=n(46177),l=n(36864),a=n(60934),i="function"==typeof o.default.unstable_postpone;function c(e){return{isDebugSkeleton:e,dynamicAccesses:[]}}function s(e,t){let n=(0,a.getPathname)(e.urlPathname);if(!e.isUnstableCacheCallback){if(e.dynamicShouldError)throw new l.StaticGenBailoutError(`Route ${n} with \`dynamic = "error"\` couldn't be rendered statically because it used \`${t}\`. See more info here: https://nextjs.org/docs/app/building-your-application/rendering/static-and-dynamic#dynamic-rendering`);if(e.prerenderState)h(e.prerenderState,t,n);else if(e.revalidate=0,e.isStaticGeneration){let r=new u.DynamicServerError(`Route ${n} couldn't be rendered statically because it used ${t}. See more info here: https://nextjs.org/docs/messages/dynamic-server-error`);throw e.dynamicUsageDescription=t,e.dynamicUsageStack=r.stack,r}}}function f(e,t){let n=(0,a.getPathname)(e.urlPathname);if(e.isUnstableCacheCallback)throw Error(`Route ${n} used "${t}" inside a function cached with "unstable_cache(...)". Accessing Dynamic data sources inside a cache scope is not supported. If you need this data inside a cached function use "${t}" outside of the cached function and pass the required dynamic data in as an argument. See more info here: https://nextjs.org/docs/app/api-reference/functions/unstable_cache`);if(e.dynamicShouldError)throw new l.StaticGenBailoutError(`Route ${n} with \`dynamic = "error"\` couldn't be rendered statically because it used \`${t}\`. See more info here: https://nextjs.org/docs/app/building-your-application/rendering/static-and-dynamic#dynamic-rendering`);if(e.prerenderState)h(e.prerenderState,t,n);else if(e.revalidate=0,e.isStaticGeneration){let r=new u.DynamicServerError(`Route ${n} couldn't be rendered statically because it used \`${t}\`. See more info here: https://nextjs.org/docs/messages/dynamic-server-error`);throw e.dynamicUsageDescription=t,e.dynamicUsageStack=r.stack,r}}function d({reason:e,prerenderState:t,pathname:n}){h(t,e,n)}function p(e,t){e.prerenderState&&h(e.prerenderState,t,e.urlPathname)}function h(e,t,n){v();let r=`Route ${n} needs to bail out of prerendering at this point because it used ${t}. React throws this special object to indicate where. It should not be caught by your own try/catch. Learn more: https://nextjs.org/docs/messages/ppr-caught-error`;e.dynamicAccesses.push({stack:e.isDebugSkeleton?Error().stack:void 0,expression:t}),o.default.unstable_postpone(r)}function y(e){return e.dynamicAccesses.length>0}function _(e){return e.dynamicAccesses.filter(e=>"string"==typeof e.stack&&e.stack.length>0).map(({expression:e,stack:t})=>(t=t.split("\n").slice(4).filter(e=>!(e.includes("node_modules/next/")||e.includes(" ()")||e.includes(" (node:"))).join("\n"),`Dynamic API Usage Debug - ${e}:
+(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[117],{65157:function(e,t){"use strict";function n(){return""}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"getDeploymentIdQueryOrEmptyString",{enumerable:!0,get:function(){return n}})},91572:function(){"trimStart"in String.prototype||(String.prototype.trimStart=String.prototype.trimLeft),"trimEnd"in String.prototype||(String.prototype.trimEnd=String.prototype.trimRight),"description"in Symbol.prototype||Object.defineProperty(Symbol.prototype,"description",{configurable:!0,get:function(){var e=/\((.*)\)/.exec(this.toString());return e?e[1]:void 0}}),Array.prototype.flat||(Array.prototype.flat=function(e,t){return t=this.concat.apply([],this),e>1&&t.some(Array.isArray)?t.flat(e-1):t},Array.prototype.flatMap=function(e,t){return this.map(e,t).flat()}),Promise.prototype.finally||(Promise.prototype.finally=function(e){if("function"!=typeof e)return this.then(e,e);var t=this.constructor||Promise;return this.then(function(n){return t.resolve(e()).then(function(){return n})},function(n){return t.resolve(e()).then(function(){throw n})})}),Object.fromEntries||(Object.fromEntries=function(e){return Array.from(e).reduce(function(e,t){return e[t[0]]=t[1],e},{})}),Array.prototype.at||(Array.prototype.at=function(e){var t=Math.trunc(e)||0;if(t<0&&(t+=this.length),!(t<0||t>=this.length))return this[t]}),Object.hasOwn||(Object.hasOwn=function(e,t){if(null==e)throw TypeError("Cannot convert undefined or null to object");return Object.prototype.hasOwnProperty.call(Object(e),t)}),"canParse"in URL||(URL.canParse=function(e,t){try{return new URL(e,t),!0}catch(e){return!1}})},1634:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"addBasePath",{enumerable:!0,get:function(){return u}});let r=n(68498),o=n(33068);function u(e,t){return(0,o.normalizePathTrailingSlash)((0,r.addPathPrefix)(e,"/ui"))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},75266:function(e,t){"use strict";function n(e){var t,n;t=self.__next_s,n=()=>{e()},t&&t.length?t.reduce((e,t)=>{let[n,r]=t;return e.then(()=>new Promise((e,t)=>{let o=document.createElement("script");if(r)for(let e in r)"children"!==e&&o.setAttribute(e,r[e]);n?(o.src=n,o.onload=()=>e(),o.onerror=t):r&&(o.innerHTML=r.children,setTimeout(e)),document.head.appendChild(o)}))},Promise.resolve()).catch(e=>{console.error(e)}).then(()=>{n()}):n()}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"appBootstrap",{enumerable:!0,get:function(){return n}}),window.next={version:"14.2.26",appDir:!0},("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},83079:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"callServer",{enumerable:!0,get:function(){return o}});let r=n(12846);async function o(e,t){let n=(0,r.getServerActionDispatcher)();if(!n)throw Error("Invariant: missing action dispatcher.");return new Promise((r,o)=>{n({actionId:e,actionArgs:t,resolve:r,reject:o})})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},92304:function(e,t,n){"use strict";let r,o;Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"hydrate",{enumerable:!0,get:function(){return x}});let u=n(47043),l=n(53099),a=n(57437);n(91572);let i=u._(n(34040)),c=l._(n(2265)),s=n(6671),f=n(48701),d=u._(n(61404)),p=n(83079),h=n(89721),y=n(2103);n(70647);let _=window.console.error;window.console.error=function(){for(var e=arguments.length,t=Array(e),n=0;n{if((0,h.isNextRouterError)(e.error)){e.preventDefault();return}});let v=document,b=new TextEncoder,g=!1,m=!1,R=null;function P(e){if(0===e[0])r=[];else if(1===e[0]){if(!r)throw Error("Unexpected server data: missing bootstrap script.");o?o.enqueue(b.encode(e[1])):r.push(e[1])}else 2===e[0]&&(R=e[1])}let j=function(){o&&!m&&(o.close(),m=!0,r=void 0),g=!0};"loading"===document.readyState?document.addEventListener("DOMContentLoaded",j,!1):j();let O=self.__next_f=self.__next_f||[];O.forEach(P),O.push=P;let S=new ReadableStream({start(e){r&&(r.forEach(t=>{e.enqueue(b.encode(t))}),g&&!m&&(e.close(),m=!0,r=void 0)),o=e}}),E=(0,s.createFromReadableStream)(S,{callServer:p.callServer});function w(){return(0,c.use)(E)}let T=c.default.StrictMode;function M(e){let{children:t}=e;return t}function x(){let e=(0,y.createMutableActionQueue)(),t=(0,a.jsx)(T,{children:(0,a.jsx)(f.HeadManagerContext.Provider,{value:{appDir:!0},children:(0,a.jsx)(y.ActionQueueContext.Provider,{value:e,children:(0,a.jsx)(M,{children:(0,a.jsx)(w,{})})})})}),n=window.__next_root_layout_missing_tags,r=!!(null==n?void 0:n.length),o={onRecoverableError:d.default};"__next_error__"===document.documentElement.id||r?i.default.createRoot(v,o).render(t):c.default.startTransition(()=>i.default.hydrateRoot(v,t,{...o,formState:R}))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},54278:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),n(19506),(0,n(75266).appBootstrap)(()=>{let{hydrate:e}=n(92304);n(12846),n(4707),e()}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},19506:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),n(65157);{let e=n.u;n.u=function(){for(var t=arguments.length,n=Array(t),r=0;r(l(function(){var e;let t=document.getElementsByName(u)[0];if(null==t?void 0:null==(e=t.shadowRoot)?void 0:e.childNodes[0])return t.shadowRoot.childNodes[0];{let e=document.createElement(u);e.style.cssText="position:absolute";let t=document.createElement("div");return t.ariaLive="assertive",t.id="__next-route-announcer__",t.role="alert",t.style.cssText="position:absolute;border:0;height:1px;margin:-1px;padding:0;width:1px;clip:rect(0 0 0 0);overflow:hidden;white-space:nowrap;word-wrap:normal",e.attachShadow({mode:"open"}).appendChild(t),document.body.appendChild(e),t}}()),()=>{let e=document.getElementsByTagName(u)[0];(null==e?void 0:e.isConnected)&&document.body.removeChild(e)}),[]);let[a,i]=(0,r.useState)(""),c=(0,r.useRef)();return(0,r.useEffect)(()=>{let e="";if(document.title)e=document.title;else{let t=document.querySelector("h1");t&&(e=t.innerText||t.textContent||"")}void 0!==c.current&&c.current!==e&&i(e),c.current=e},[t]),n?(0,o.createPortal)(a,n):null}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},6866:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ACTION:function(){return r},FLIGHT_PARAMETERS:function(){return i},NEXT_DID_POSTPONE_HEADER:function(){return s},NEXT_ROUTER_PREFETCH_HEADER:function(){return u},NEXT_ROUTER_STATE_TREE:function(){return o},NEXT_RSC_UNION_QUERY:function(){return c},NEXT_URL:function(){return l},RSC_CONTENT_TYPE_HEADER:function(){return a},RSC_HEADER:function(){return n}});let n="RSC",r="Next-Action",o="Next-Router-State-Tree",u="Next-Router-Prefetch",l="Next-Url",a="text/x-component",i=[[n],[o],[u]],c="_rsc",s="x-nextjs-postponed";("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},12846:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{createEmptyCacheNode:function(){return C},default:function(){return I},getServerActionDispatcher:function(){return E},urlToUrlWithoutFlightMarker:function(){return T}});let r=n(53099),o=n(57437),u=r._(n(2265)),l=n(61956),a=n(24673),i=n(33456),c=n(79060),s=n(47744),f=n(61060),d=n(82952),p=n(86146),h=n(1634),y=n(6495),_=n(4123),v=n(39320),b=n(38137),g=n(6866),m=n(35076),R=n(11283),P=n(84541),j="undefined"==typeof window,O=j?null:new Map,S=null;function E(){return S}let w={};function T(e){let t=new URL(e,location.origin);if(t.searchParams.delete(g.NEXT_RSC_UNION_QUERY),t.pathname.endsWith(".txt")){let{pathname:e}=t,n=e.endsWith("/index.txt")?10:4;t.pathname=e.slice(0,-n)}return t}function M(e){return e.origin!==window.location.origin}function x(e){let{appRouterState:t,sync:n}=e;return(0,u.useInsertionEffect)(()=>{let{tree:e,pushRef:r,canonicalUrl:o}=t,u={...r.preserveCustomHistoryState?window.history.state:{},__NA:!0,__PRIVATE_NEXTJS_INTERNALS_TREE:e};r.pendingPush&&(0,i.createHrefFromUrl)(new URL(window.location.href))!==o?(r.pendingPush=!1,window.history.pushState(u,"",o)):window.history.replaceState(u,"",o),n(t)},[t,n]),null}function C(){return{lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:null}}function A(e){null==e&&(e={});let t=window.history.state,n=null==t?void 0:t.__NA;n&&(e.__NA=n);let r=null==t?void 0:t.__PRIVATE_NEXTJS_INTERNALS_TREE;return r&&(e.__PRIVATE_NEXTJS_INTERNALS_TREE=r),e}function N(e){let{headCacheNode:t}=e,n=null!==t?t.head:null,r=null!==t?t.prefetchHead:null,o=null!==r?r:n;return(0,u.useDeferredValue)(n,o)}function D(e){let t,{buildId:n,initialHead:r,initialTree:i,urlParts:f,initialSeedData:g,couldBeIntercepted:E,assetPrefix:T,missingSlots:C}=e,D=(0,u.useMemo)(()=>(0,d.createInitialRouterState)({buildId:n,initialSeedData:g,urlParts:f,initialTree:i,initialParallelRoutes:O,location:j?null:window.location,initialHead:r,couldBeIntercepted:E}),[n,g,f,i,r,E]),[I,U,k]=(0,s.useReducerWithReduxDevtools)(D);(0,u.useEffect)(()=>{O=null},[]);let{canonicalUrl:F}=(0,s.useUnwrapState)(I),{searchParams:L,pathname:H}=(0,u.useMemo)(()=>{let e=new URL(F,"undefined"==typeof window?"http://n":window.location.href);return{searchParams:e.searchParams,pathname:(0,R.hasBasePath)(e.pathname)?(0,m.removeBasePath)(e.pathname):e.pathname}},[F]),$=(0,u.useCallback)(e=>{let{previousTree:t,serverResponse:n}=e;(0,u.startTransition)(()=>{U({type:a.ACTION_SERVER_PATCH,previousTree:t,serverResponse:n})})},[U]),G=(0,u.useCallback)((e,t,n)=>{let r=new URL((0,h.addBasePath)(e),location.href);return U({type:a.ACTION_NAVIGATE,url:r,isExternalUrl:M(r),locationSearch:location.search,shouldScroll:null==n||n,navigateType:t})},[U]);S=(0,u.useCallback)(e=>{(0,u.startTransition)(()=>{U({...e,type:a.ACTION_SERVER_ACTION})})},[U]);let z=(0,u.useMemo)(()=>({back:()=>window.history.back(),forward:()=>window.history.forward(),prefetch:(e,t)=>{let n;if(!(0,p.isBot)(window.navigator.userAgent)){try{n=new URL((0,h.addBasePath)(e),window.location.href)}catch(t){throw Error("Cannot prefetch '"+e+"' because it cannot be converted to a URL.")}M(n)||(0,u.startTransition)(()=>{var e;U({type:a.ACTION_PREFETCH,url:n,kind:null!=(e=null==t?void 0:t.kind)?e:a.PrefetchKind.FULL})})}},replace:(e,t)=>{void 0===t&&(t={}),(0,u.startTransition)(()=>{var n;G(e,"replace",null==(n=t.scroll)||n)})},push:(e,t)=>{void 0===t&&(t={}),(0,u.startTransition)(()=>{var n;G(e,"push",null==(n=t.scroll)||n)})},refresh:()=>{(0,u.startTransition)(()=>{U({type:a.ACTION_REFRESH,origin:window.location.origin})})},fastRefresh:()=>{throw Error("fastRefresh can only be used in development mode. Please use refresh instead.")}}),[U,G]);(0,u.useEffect)(()=>{window.next&&(window.next.router=z)},[z]),(0,u.useEffect)(()=>{function e(e){var t;e.persisted&&(null==(t=window.history.state)?void 0:t.__PRIVATE_NEXTJS_INTERNALS_TREE)&&(w.pendingMpaPath=void 0,U({type:a.ACTION_RESTORE,url:new URL(window.location.href),tree:window.history.state.__PRIVATE_NEXTJS_INTERNALS_TREE}))}return window.addEventListener("pageshow",e),()=>{window.removeEventListener("pageshow",e)}},[U]);let{pushRef:B}=(0,s.useUnwrapState)(I);if(B.mpaNavigation){if(w.pendingMpaPath!==F){let e=window.location;B.pendingPush?e.assign(F):e.replace(F),w.pendingMpaPath=F}(0,u.use)(b.unresolvedThenable)}(0,u.useEffect)(()=>{let e=window.history.pushState.bind(window.history),t=window.history.replaceState.bind(window.history),n=e=>{var t;let n=window.location.href,r=null==(t=window.history.state)?void 0:t.__PRIVATE_NEXTJS_INTERNALS_TREE;(0,u.startTransition)(()=>{U({type:a.ACTION_RESTORE,url:new URL(null!=e?e:n,n),tree:r})})};window.history.pushState=function(t,r,o){return(null==t?void 0:t.__NA)||(null==t?void 0:t._N)||(t=A(t),o&&n(o)),e(t,r,o)},window.history.replaceState=function(e,r,o){return(null==e?void 0:e.__NA)||(null==e?void 0:e._N)||(e=A(e),o&&n(o)),t(e,r,o)};let r=e=>{let{state:t}=e;if(t){if(!t.__NA){window.location.reload();return}(0,u.startTransition)(()=>{U({type:a.ACTION_RESTORE,url:new URL(window.location.href),tree:t.__PRIVATE_NEXTJS_INTERNALS_TREE})})}};return window.addEventListener("popstate",r),()=>{window.history.pushState=e,window.history.replaceState=t,window.removeEventListener("popstate",r)}},[U]);let{cache:W,tree:K,nextUrl:V,focusAndScrollRef:Y}=(0,s.useUnwrapState)(I),X=(0,u.useMemo)(()=>(0,v.findHeadInCache)(W,K[1]),[W,K]),q=(0,u.useMemo)(()=>(function e(t,n){for(let r of(void 0===n&&(n={}),Object.values(t[1]))){let t=r[0],o=Array.isArray(t),u=o?t[1]:t;!u||u.startsWith(P.PAGE_SEGMENT_KEY)||(o&&("c"===t[2]||"oc"===t[2])?n[t[0]]=t[1].split("/"):o&&(n[t[0]]=t[1]),n=e(r,n))}return n})(K),[K]);if(null!==X){let[e,n]=X;t=(0,o.jsx)(N,{headCacheNode:e},n)}else t=null;let J=(0,o.jsxs)(_.RedirectBoundary,{children:[t,W.rsc,(0,o.jsx)(y.AppRouterAnnouncer,{tree:K})]});return(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)(x,{appRouterState:(0,s.useUnwrapState)(I),sync:k}),(0,o.jsx)(c.PathParamsContext.Provider,{value:q,children:(0,o.jsx)(c.PathnameContext.Provider,{value:H,children:(0,o.jsx)(c.SearchParamsContext.Provider,{value:L,children:(0,o.jsx)(l.GlobalLayoutRouterContext.Provider,{value:{buildId:n,changeByServerResponse:$,tree:K,focusAndScrollRef:Y,nextUrl:V},children:(0,o.jsx)(l.AppRouterContext.Provider,{value:z,children:(0,o.jsx)(l.LayoutRouterContext.Provider,{value:{childNodes:W.parallelRoutes,tree:K,url:F,loading:W.loading},children:J})})})})})})]})}function I(e){let{globalErrorComponent:t,...n}=e;return(0,o.jsx)(f.ErrorBoundary,{errorComponent:t,children:(0,o.jsx)(D,{...n})})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},96149:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"bailoutToClientRendering",{enumerable:!0,get:function(){return u}});let r=n(18993),o=n(51845);function u(e){let t=o.staticGenerationAsyncStorage.getStore();if((null==t||!t.forceStatic)&&(null==t?void 0:t.isStaticGeneration))throw new r.BailoutToCSRError(e)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},19107:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"ClientPageRoot",{enumerable:!0,get:function(){return u}});let r=n(57437),o=n(54535);function u(e){let{Component:t,props:n}=e;return n.searchParams=(0,o.createDynamicallyTrackedSearchParams)(n.searchParams||{}),(0,r.jsx)(t,{...n})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},61060:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ErrorBoundary:function(){return h},ErrorBoundaryHandler:function(){return f},GlobalError:function(){return d},default:function(){return p}});let r=n(47043),o=n(57437),u=r._(n(2265)),l=n(35475),a=n(89721),i=n(51845),c={error:{fontFamily:'system-ui,"Segoe UI",Roboto,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji"',height:"100vh",textAlign:"center",display:"flex",flexDirection:"column",alignItems:"center",justifyContent:"center"},text:{fontSize:"14px",fontWeight:400,lineHeight:"28px",margin:"0 8px"}};function s(e){let{error:t}=e,n=i.staticGenerationAsyncStorage.getStore();if((null==n?void 0:n.isRevalidate)||(null==n?void 0:n.isStaticGeneration))throw console.error(t),t;return null}class f extends u.default.Component{static getDerivedStateFromError(e){if((0,a.isNextRouterError)(e))throw e;return{error:e}}static getDerivedStateFromProps(e,t){return e.pathname!==t.previousPathname&&t.error?{error:null,previousPathname:e.pathname}:{error:t.error,previousPathname:e.pathname}}render(){return this.state.error?(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)(s,{error:this.state.error}),this.props.errorStyles,this.props.errorScripts,(0,o.jsx)(this.props.errorComponent,{error:this.state.error,reset:this.reset})]}):this.props.children}constructor(e){super(e),this.reset=()=>{this.setState({error:null})},this.state={error:null,previousPathname:this.props.pathname}}}function d(e){let{error:t}=e,n=null==t?void 0:t.digest;return(0,o.jsxs)("html",{id:"__next_error__",children:[(0,o.jsx)("head",{}),(0,o.jsxs)("body",{children:[(0,o.jsx)(s,{error:t}),(0,o.jsx)("div",{style:c.error,children:(0,o.jsxs)("div",{children:[(0,o.jsx)("h2",{style:c.text,children:"Application error: a "+(n?"server":"client")+"-side exception has occurred (see the "+(n?"server logs":"browser console")+" for more information)."}),n?(0,o.jsx)("p",{style:c.text,children:"Digest: "+n}):null]})})]})]})}let p=d;function h(e){let{errorComponent:t,errorStyles:n,errorScripts:r,children:u}=e,a=(0,l.usePathname)();return t?(0,o.jsx)(f,{pathname:a,errorComponent:t,errorStyles:n,errorScripts:r,children:u}):(0,o.jsx)(o.Fragment,{children:u})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},46177:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{DynamicServerError:function(){return r},isDynamicServerError:function(){return o}});let n="DYNAMIC_SERVER_USAGE";class r extends Error{constructor(e){super("Dynamic server usage: "+e),this.description=e,this.digest=n}}function o(e){return"object"==typeof e&&null!==e&&"digest"in e&&"string"==typeof e.digest&&e.digest===n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},89721:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isNextRouterError",{enumerable:!0,get:function(){return u}});let r=n(98200),o=n(88968);function u(e){return e&&e.digest&&((0,o.isRedirectError)(e)||(0,r.isNotFoundError)(e))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4707:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return S}});let r=n(47043),o=n(53099),u=n(57437),l=o._(n(2265)),a=r._(n(54887)),i=n(61956),c=n(44848),s=n(38137),f=n(61060),d=n(76015),p=n(7092),h=n(4123),y=n(80),_=n(73171),v=n(78505),b=n(28077),g=["bottom","height","left","right","top","width","x","y"];function m(e,t){let n=e.getBoundingClientRect();return n.top>=0&&n.top<=t}class R extends l.default.Component{componentDidMount(){this.handlePotentialScroll()}componentDidUpdate(){this.props.focusAndScrollRef.apply&&this.handlePotentialScroll()}render(){return this.props.children}constructor(...e){super(...e),this.handlePotentialScroll=()=>{let{focusAndScrollRef:e,segmentPath:t}=this.props;if(e.apply){var n;if(0!==e.segmentPaths.length&&!e.segmentPaths.some(e=>t.every((t,n)=>(0,d.matchSegment)(t,e[n]))))return;let r=null,o=e.hashFragment;if(o&&(r="top"===o?document.body:null!=(n=document.getElementById(o))?n:document.getElementsByName(o)[0]),r||(r="undefined"==typeof window?null:a.default.findDOMNode(this)),!(r instanceof Element))return;for(;!(r instanceof HTMLElement)||function(e){if(["sticky","fixed"].includes(getComputedStyle(e).position))return!0;let t=e.getBoundingClientRect();return g.every(e=>0===t[e])}(r);){if(null===r.nextElementSibling)return;r=r.nextElementSibling}e.apply=!1,e.hashFragment=null,e.segmentPaths=[],(0,p.handleSmoothScroll)(()=>{if(o){r.scrollIntoView();return}let e=document.documentElement,t=e.clientHeight;!m(r,t)&&(e.scrollTop=0,m(r,t)||r.scrollIntoView())},{dontForceLayout:!0,onlyHashChange:e.onlyHashChange}),e.onlyHashChange=!1,r.focus()}}}}function P(e){let{segmentPath:t,children:n}=e,r=(0,l.useContext)(i.GlobalLayoutRouterContext);if(!r)throw Error("invariant global layout router not mounted");return(0,u.jsx)(R,{segmentPath:t,focusAndScrollRef:r.focusAndScrollRef,children:n})}function j(e){let{parallelRouterKey:t,url:n,childNodes:r,segmentPath:o,tree:a,cacheKey:f}=e,p=(0,l.useContext)(i.GlobalLayoutRouterContext);if(!p)throw Error("invariant global layout router not mounted");let{buildId:h,changeByServerResponse:y,tree:_}=p,v=r.get(f);if(void 0===v){let e={lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:null};v=e,r.set(f,e)}let g=null!==v.prefetchRsc?v.prefetchRsc:v.rsc,m=(0,l.useDeferredValue)(v.rsc,g),R="object"==typeof m&&null!==m&&"function"==typeof m.then?(0,l.use)(m):m;if(!R){let e=v.lazyData;if(null===e){let t=function e(t,n){if(t){let[r,o]=t,u=2===t.length;if((0,d.matchSegment)(n[0],r)&&n[1].hasOwnProperty(o)){if(u){let t=e(void 0,n[1][o]);return[n[0],{...n[1],[o]:[t[0],t[1],t[2],"refetch"]}]}return[n[0],{...n[1],[o]:e(t.slice(2),n[1][o])}]}}return n}(["",...o],_),r=(0,b.hasInterceptionRouteInCurrentTree)(_);v.lazyData=e=(0,c.fetchServerResponse)(new URL(n,location.origin),t,r?p.nextUrl:null,h),v.lazyDataResolved=!1}let t=(0,l.use)(e);v.lazyDataResolved||(setTimeout(()=>{(0,l.startTransition)(()=>{y({previousTree:_,serverResponse:t})})}),v.lazyDataResolved=!0),(0,l.use)(s.unresolvedThenable)}return(0,u.jsx)(i.LayoutRouterContext.Provider,{value:{tree:a[1][t],childNodes:v.parallelRoutes,url:n,loading:v.loading},children:R})}function O(e){let{children:t,hasLoading:n,loading:r,loadingStyles:o,loadingScripts:a}=e;return n?(0,u.jsx)(l.Suspense,{fallback:(0,u.jsxs)(u.Fragment,{children:[o,a,r]}),children:t}):(0,u.jsx)(u.Fragment,{children:t})}function S(e){let{parallelRouterKey:t,segmentPath:n,error:r,errorStyles:o,errorScripts:a,templateStyles:c,templateScripts:s,template:d,notFound:p,notFoundStyles:b}=e,g=(0,l.useContext)(i.LayoutRouterContext);if(!g)throw Error("invariant expected layout router to be mounted");let{childNodes:m,tree:R,url:S,loading:E}=g,w=m.get(t);w||(w=new Map,m.set(t,w));let T=R[1][t][0],M=(0,_.getSegmentValue)(T),x=[T];return(0,u.jsx)(u.Fragment,{children:x.map(e=>{let l=(0,_.getSegmentValue)(e),g=(0,v.createRouterCacheKey)(e);return(0,u.jsxs)(i.TemplateContext.Provider,{value:(0,u.jsx)(P,{segmentPath:n,children:(0,u.jsx)(f.ErrorBoundary,{errorComponent:r,errorStyles:o,errorScripts:a,children:(0,u.jsx)(O,{hasLoading:!!E,loading:null==E?void 0:E[0],loadingStyles:null==E?void 0:E[1],loadingScripts:null==E?void 0:E[2],children:(0,u.jsx)(y.NotFoundBoundary,{notFound:p,notFoundStyles:b,children:(0,u.jsx)(h.RedirectBoundary,{children:(0,u.jsx)(j,{parallelRouterKey:t,url:S,tree:R,childNodes:w,segmentPath:n,cacheKey:g,isActive:M===l})})})})})}),children:[c,s,d]},(0,v.createRouterCacheKey)(e,!0))})})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},76015:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{canSegmentBeOverridden:function(){return u},matchSegment:function(){return o}});let r=n(87417),o=(e,t)=>"string"==typeof e?"string"==typeof t&&e===t:"string"!=typeof t&&e[0]===t[0]&&e[1]===t[1],u=(e,t)=>{var n;return!Array.isArray(e)&&!!Array.isArray(t)&&(null==(n=(0,r.getSegmentParam)(e))?void 0:n.param)===t[0]};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},35475:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ReadonlyURLSearchParams:function(){return i.ReadonlyURLSearchParams},RedirectType:function(){return i.RedirectType},ServerInsertedHTMLContext:function(){return c.ServerInsertedHTMLContext},notFound:function(){return i.notFound},permanentRedirect:function(){return i.permanentRedirect},redirect:function(){return i.redirect},useParams:function(){return p},usePathname:function(){return f},useRouter:function(){return d},useSearchParams:function(){return s},useSelectedLayoutSegment:function(){return y},useSelectedLayoutSegments:function(){return h},useServerInsertedHTML:function(){return c.useServerInsertedHTML}});let r=n(2265),o=n(61956),u=n(79060),l=n(73171),a=n(84541),i=n(52646),c=n(55501);function s(){let e=(0,r.useContext)(u.SearchParamsContext),t=(0,r.useMemo)(()=>e?new i.ReadonlyURLSearchParams(e):null,[e]);if("undefined"==typeof window){let{bailoutToClientRendering:e}=n(96149);e("useSearchParams()")}return t}function f(){return(0,r.useContext)(u.PathnameContext)}function d(){let e=(0,r.useContext)(o.AppRouterContext);if(null===e)throw Error("invariant expected app router to be mounted");return e}function p(){return(0,r.useContext)(u.PathParamsContext)}function h(e){void 0===e&&(e="children");let t=(0,r.useContext)(o.LayoutRouterContext);return t?function e(t,n,r,o){let u;if(void 0===r&&(r=!0),void 0===o&&(o=[]),r)u=t[1][n];else{var i;let e=t[1];u=null!=(i=e.children)?i:Object.values(e)[0]}if(!u)return o;let c=u[0],s=(0,l.getSegmentValue)(c);return!s||s.startsWith(a.PAGE_SEGMENT_KEY)?o:(o.push(s),e(u,n,!1,o))}(t.tree,e):null}function y(e){void 0===e&&(e="children");let t=h(e);if(!t||0===t.length)return null;let n="children"===e?t[0]:t[t.length-1];return n===a.DEFAULT_SEGMENT_KEY?null:n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},52646:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ReadonlyURLSearchParams:function(){return l},RedirectType:function(){return r.RedirectType},notFound:function(){return o.notFound},permanentRedirect:function(){return r.permanentRedirect},redirect:function(){return r.redirect}});let r=n(88968),o=n(98200);class u extends Error{constructor(){super("Method unavailable on `ReadonlyURLSearchParams`. Read more: https://nextjs.org/docs/app/api-reference/functions/use-search-params#updating-searchparams")}}class l extends URLSearchParams{append(){throw new u}delete(){throw new u}set(){throw new u}sort(){throw new u}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},80:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"NotFoundBoundary",{enumerable:!0,get:function(){return s}});let r=n(53099),o=n(57437),u=r._(n(2265)),l=n(35475),a=n(98200);n(31765);let i=n(61956);class c extends u.default.Component{componentDidCatch(){}static getDerivedStateFromError(e){if((0,a.isNotFoundError)(e))return{notFoundTriggered:!0};throw e}static getDerivedStateFromProps(e,t){return e.pathname!==t.previousPathname&&t.notFoundTriggered?{notFoundTriggered:!1,previousPathname:e.pathname}:{notFoundTriggered:t.notFoundTriggered,previousPathname:e.pathname}}render(){return this.state.notFoundTriggered?(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)("meta",{name:"robots",content:"noindex"}),!1,this.props.notFoundStyles,this.props.notFound]}):this.props.children}constructor(e){super(e),this.state={notFoundTriggered:!!e.asNotFound,previousPathname:e.pathname}}}function s(e){let{notFound:t,notFoundStyles:n,asNotFound:r,children:a}=e,s=(0,l.usePathname)(),f=(0,u.useContext)(i.MissingSlotContext);return t?(0,o.jsx)(c,{pathname:s,notFound:t,notFoundStyles:n,asNotFound:r,missingSlots:f,children:a}):(0,o.jsx)(o.Fragment,{children:a})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},98200:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{isNotFoundError:function(){return o},notFound:function(){return r}});let n="NEXT_NOT_FOUND";function r(){let e=Error(n);throw e.digest=n,e}function o(e){return"object"==typeof e&&null!==e&&"digest"in e&&e.digest===n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},29744:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"PromiseQueue",{enumerable:!0,get:function(){return c}});let r=n(2522),o=n(90675);var u=o._("_maxConcurrency"),l=o._("_runningCount"),a=o._("_queue"),i=o._("_processNext");class c{enqueue(e){let t,n;let o=new Promise((e,r)=>{t=e,n=r}),u=async()=>{try{r._(this,l)[l]++;let n=await e();t(n)}catch(e){n(e)}finally{r._(this,l)[l]--,r._(this,i)[i]()}};return r._(this,a)[a].push({promiseFn:o,task:u}),r._(this,i)[i](),o}bump(e){let t=r._(this,a)[a].findIndex(t=>t.promiseFn===e);if(t>-1){let e=r._(this,a)[a].splice(t,1)[0];r._(this,a)[a].unshift(e),r._(this,i)[i](!0)}}constructor(e=5){Object.defineProperty(this,i,{value:s}),Object.defineProperty(this,u,{writable:!0,value:void 0}),Object.defineProperty(this,l,{writable:!0,value:void 0}),Object.defineProperty(this,a,{writable:!0,value:void 0}),r._(this,u)[u]=e,r._(this,l)[l]=0,r._(this,a)[a]=[]}}function s(e){if(void 0===e&&(e=!1),(r._(this,l)[l]0){var t;null==(t=r._(this,a)[a].shift())||t.task()}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},4123:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{RedirectBoundary:function(){return s},RedirectErrorBoundary:function(){return c}});let r=n(53099),o=n(57437),u=r._(n(2265)),l=n(35475),a=n(88968);function i(e){let{redirect:t,reset:n,redirectType:r}=e,o=(0,l.useRouter)();return(0,u.useEffect)(()=>{u.default.startTransition(()=>{r===a.RedirectType.push?o.push(t,{}):o.replace(t,{}),n()})},[t,r,n,o]),null}class c extends u.default.Component{static getDerivedStateFromError(e){if((0,a.isRedirectError)(e))return{redirect:(0,a.getURLFromRedirectError)(e),redirectType:(0,a.getRedirectTypeFromError)(e)};throw e}render(){let{redirect:e,redirectType:t}=this.state;return null!==e&&null!==t?(0,o.jsx)(i,{redirect:e,redirectType:t,reset:()=>this.setState({redirect:null})}):this.props.children}constructor(e){super(e),this.state={redirect:null,redirectType:null}}}function s(e){let{children:t}=e,n=(0,l.useRouter)();return(0,o.jsx)(c,{router:n,children:t})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},5001:function(e,t){"use strict";var n,r;Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"RedirectStatusCode",{enumerable:!0,get:function(){return n}}),(r=n||(n={}))[r.SeeOther=303]="SeeOther",r[r.TemporaryRedirect=307]="TemporaryRedirect",r[r.PermanentRedirect=308]="PermanentRedirect",("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},88968:function(e,t,n){"use strict";var r,o;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{RedirectType:function(){return r},getRedirectError:function(){return c},getRedirectStatusCodeFromError:function(){return y},getRedirectTypeFromError:function(){return h},getURLFromRedirectError:function(){return p},isRedirectError:function(){return d},permanentRedirect:function(){return f},redirect:function(){return s}});let u=n(20544),l=n(90295),a=n(5001),i="NEXT_REDIRECT";function c(e,t,n){void 0===n&&(n=a.RedirectStatusCode.TemporaryRedirect);let r=Error(i);r.digest=i+";"+t+";"+e+";"+n+";";let o=u.requestAsyncStorage.getStore();return o&&(r.mutableCookies=o.mutableCookies),r}function s(e,t){void 0===t&&(t="replace");let n=l.actionAsyncStorage.getStore();throw c(e,t,(null==n?void 0:n.isAction)?a.RedirectStatusCode.SeeOther:a.RedirectStatusCode.TemporaryRedirect)}function f(e,t){void 0===t&&(t="replace");let n=l.actionAsyncStorage.getStore();throw c(e,t,(null==n?void 0:n.isAction)?a.RedirectStatusCode.SeeOther:a.RedirectStatusCode.PermanentRedirect)}function d(e){if("object"!=typeof e||null===e||!("digest"in e)||"string"!=typeof e.digest)return!1;let[t,n,r,o]=e.digest.split(";",4),u=Number(o);return t===i&&("replace"===n||"push"===n)&&"string"==typeof r&&!isNaN(u)&&u in a.RedirectStatusCode}function p(e){return d(e)?e.digest.split(";",3)[2]:null}function h(e){if(!d(e))throw Error("Not a redirect error");return e.digest.split(";",2)[1]}function y(e){if(!d(e))throw Error("Not a redirect error");return Number(e.digest.split(";",4)[3])}(o=r||(r={})).push="push",o.replace="replace",("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},36423:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return a}});let r=n(53099),o=n(57437),u=r._(n(2265)),l=n(61956);function a(){let e=(0,u.useContext)(l.TemplateContext);return(0,o.jsx)(o.Fragment,{children:e})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},20544:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{getExpectedRequestStore:function(){return o},requestAsyncStorage:function(){return r.requestAsyncStorage}});let r=n(25575);function o(e){let t=r.requestAsyncStorage.getStore();if(t)return t;throw Error("`"+e+"` was called outside a request scope. Read more: https://nextjs.org/docs/messages/next-dynamic-api-wrong-context")}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},22356:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"applyFlightData",{enumerable:!0,get:function(){return u}});let r=n(27420),o=n(92576);function u(e,t,n,u){let[l,a,i]=n.slice(-3);if(null===a)return!1;if(3===n.length){let n=a[2],o=a[3];t.loading=o,t.rsc=n,t.prefetchRsc=null,(0,r.fillLazyItemsTillLeafWithHead)(t,e,l,a,i,u)}else t.rsc=e.rsc,t.prefetchRsc=e.prefetchRsc,t.parallelRoutes=new Map(e.parallelRoutes),t.loading=e.loading,(0,o.fillCacheWithNewSubTreeData)(t,e,n,u);return!0}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},81935:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"applyRouterStatePatchToTree",{enumerable:!0,get:function(){return function e(t,n,r,a){let i;let[c,s,f,d,p]=n;if(1===t.length){let e=l(n,r,t);return(0,u.addRefreshMarkerToActiveParallelSegments)(e,a),e}let[h,y]=t;if(!(0,o.matchSegment)(h,c))return null;if(2===t.length)i=l(s[y],r,t);else if(null===(i=e(t.slice(2),s[y],r,a)))return null;let _=[t[0],{...s,[y]:i},f,d];return p&&(_[4]=!0),(0,u.addRefreshMarkerToActiveParallelSegments)(_,a),_}}});let r=n(84541),o=n(76015),u=n(50232);function l(e,t,n){let[u,a]=e,[i,c]=t;if(i===r.DEFAULT_SEGMENT_KEY&&u!==r.DEFAULT_SEGMENT_KEY)return e;if((0,o.matchSegment)(u,i)){let t={};for(let e in a)void 0!==c[e]?t[e]=l(a[e],c[e],n):t[e]=a[e];for(let e in c)t[e]||(t[e]=c[e]);let r=[u,t];return e[2]&&(r[2]=e[2]),e[3]&&(r[3]=e[3]),e[4]&&(r[4]=e[4]),r}return t}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},65556:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"clearCacheNodeDataForSegmentPath",{enumerable:!0,get:function(){return function e(t,n,o){let u=o.length<=2,[l,a]=o,i=(0,r.createRouterCacheKey)(a),c=n.parallelRoutes.get(l),s=t.parallelRoutes.get(l);s&&s!==c||(s=new Map(c),t.parallelRoutes.set(l,s));let f=null==c?void 0:c.get(i),d=s.get(i);if(u){d&&d.lazyData&&d!==f||s.set(i,{lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:null});return}if(!d||!f){d||s.set(i,{lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:null});return}return d===f&&(d={lazyData:d.lazyData,rsc:d.rsc,prefetchRsc:d.prefetchRsc,head:d.head,prefetchHead:d.prefetchHead,parallelRoutes:new Map(d.parallelRoutes),lazyDataResolved:d.lazyDataResolved,loading:d.loading},s.set(i,d)),e(d,f,o.slice(2))}}});let r=n(78505);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},5410:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{computeChangedPath:function(){return s},extractPathFromFlightRouterState:function(){return c}});let r=n(91182),o=n(84541),u=n(76015),l=e=>"/"===e[0]?e.slice(1):e,a=e=>"string"==typeof e?"children"===e?"":e:e[1];function i(e){return e.reduce((e,t)=>""===(t=l(t))||(0,o.isGroupSegment)(t)?e:e+"/"+t,"")||"/"}function c(e){var t;let n=Array.isArray(e[0])?e[0][1]:e[0];if(n===o.DEFAULT_SEGMENT_KEY||r.INTERCEPTION_ROUTE_MARKERS.some(e=>n.startsWith(e)))return;if(n.startsWith(o.PAGE_SEGMENT_KEY))return"";let u=[a(n)],l=null!=(t=e[1])?t:{},s=l.children?c(l.children):void 0;if(void 0!==s)u.push(s);else for(let[e,t]of Object.entries(l)){if("children"===e)continue;let n=c(t);void 0!==n&&u.push(n)}return i(u)}function s(e,t){let n=function e(t,n){let[o,l]=t,[i,s]=n,f=a(o),d=a(i);if(r.INTERCEPTION_ROUTE_MARKERS.some(e=>f.startsWith(e)||d.startsWith(e)))return"";if(!(0,u.matchSegment)(o,i)){var p;return null!=(p=c(n))?p:""}for(let t in l)if(s[t]){let n=e(l[t],s[t]);if(null!==n)return a(i)+"/"+n}return null}(e,t);return null==n||"/"===n?n:i(n.split("/"))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},33456:function(e,t){"use strict";function n(e,t){return void 0===t&&(t=!0),e.pathname+e.search+(t?e.hash:"")}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createHrefFromUrl",{enumerable:!0,get:function(){return n}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},82952:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createInitialRouterState",{enumerable:!0,get:function(){return c}});let r=n(33456),o=n(27420),u=n(5410),l=n(60305),a=n(24673),i=n(50232);function c(e){var t;let{buildId:n,initialTree:c,initialSeedData:s,urlParts:f,initialParallelRoutes:d,location:p,initialHead:h,couldBeIntercepted:y}=e,_=f.join("/"),v=!p,b={lazyData:null,rsc:s[2],prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:v?new Map:d,lazyDataResolved:!1,loading:s[3]},g=p?(0,r.createHrefFromUrl)(p):_;(0,i.addRefreshMarkerToActiveParallelSegments)(c,g);let m=new Map;(null===d||0===d.size)&&(0,o.fillLazyItemsTillLeafWithHead)(b,void 0,c,s,h);let R={buildId:n,tree:c,cache:b,prefetchCache:m,pushRef:{pendingPush:!1,mpaNavigation:!1,preserveCustomHistoryState:!0},focusAndScrollRef:{apply:!1,onlyHashChange:!1,hashFragment:null,segmentPaths:[]},canonicalUrl:g,nextUrl:null!=(t=(0,u.extractPathFromFlightRouterState)(c)||(null==p?void 0:p.pathname))?t:null};if(p){let e=new URL(""+p.pathname+p.search,p.origin),t=[["",c,null,null]];(0,l.createPrefetchCacheEntryForInitialLoad)({url:e,kind:a.PrefetchKind.AUTO,data:[t,void 0,!1,y],tree:R.tree,prefetchCache:R.prefetchCache,nextUrl:R.nextUrl})}return R}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},78505:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createRouterCacheKey",{enumerable:!0,get:function(){return o}});let r=n(84541);function o(e,t){return(void 0===t&&(t=!1),Array.isArray(e))?e[0]+"|"+e[1]+"|"+e[2]:t&&e.startsWith(r.PAGE_SEGMENT_KEY)?r.PAGE_SEGMENT_KEY:e}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},44848:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fetchServerResponse",{enumerable:!0,get:function(){return s}});let r=n(6866),o=n(12846),u=n(83079),l=n(24673),a=n(37207),{createFromFetch:i}=n(6671);function c(e){return[(0,o.urlToUrlWithoutFlightMarker)(e).toString(),void 0,!1,!1]}async function s(e,t,n,s,f){let d={[r.RSC_HEADER]:"1",[r.NEXT_ROUTER_STATE_TREE]:encodeURIComponent(JSON.stringify(t))};f===l.PrefetchKind.AUTO&&(d[r.NEXT_ROUTER_PREFETCH_HEADER]="1"),n&&(d[r.NEXT_URL]=n);let p=(0,a.hexHash)([d[r.NEXT_ROUTER_PREFETCH_HEADER]||"0",d[r.NEXT_ROUTER_STATE_TREE],d[r.NEXT_URL]].join(","));try{var h;let t=new URL(e);t.pathname.endsWith("/")?t.pathname+="index.txt":t.pathname+=".txt",t.searchParams.set(r.NEXT_RSC_UNION_QUERY,p);let n=await fetch(t,{credentials:"same-origin",headers:d}),l=(0,o.urlToUrlWithoutFlightMarker)(n.url),a=n.redirected?l:void 0,f=n.headers.get("content-type")||"",y=!!n.headers.get(r.NEXT_DID_POSTPONE_HEADER),_=!!(null==(h=n.headers.get("vary"))?void 0:h.includes(r.NEXT_URL)),v=f===r.RSC_CONTENT_TYPE_HEADER;if(v||(v=f.startsWith("text/plain")),!v||!n.ok)return e.hash&&(l.hash=e.hash),c(l.toString());let[b,g]=await i(Promise.resolve(n),{callServer:u.callServer});if(s!==b)return c(n.url);return[g,a,y,_]}catch(t){return console.error("Failed to fetch RSC payload for "+e+". Falling back to browser navigation.",t),[e.toString(),void 0,!1,!1]}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},92576:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fillCacheWithNewSubTreeData",{enumerable:!0,get:function(){return function e(t,n,l,a){let i=l.length<=5,[c,s]=l,f=(0,u.createRouterCacheKey)(s),d=n.parallelRoutes.get(c);if(!d)return;let p=t.parallelRoutes.get(c);p&&p!==d||(p=new Map(d),t.parallelRoutes.set(c,p));let h=d.get(f),y=p.get(f);if(i){if(!y||!y.lazyData||y===h){let e=l[3];y={lazyData:null,rsc:e[2],prefetchRsc:null,head:null,prefetchHead:null,loading:e[3],parallelRoutes:h?new Map(h.parallelRoutes):new Map,lazyDataResolved:!1},h&&(0,r.invalidateCacheByRouterState)(y,h,l[2]),(0,o.fillLazyItemsTillLeafWithHead)(y,h,l[2],e,l[4],a),p.set(f,y)}return}y&&h&&(y===h&&(y={lazyData:y.lazyData,rsc:y.rsc,prefetchRsc:y.prefetchRsc,head:y.head,prefetchHead:y.prefetchHead,parallelRoutes:new Map(y.parallelRoutes),lazyDataResolved:!1,loading:y.loading},p.set(f,y)),e(y,h,l.slice(2),a))}}});let r=n(94377),o=n(27420),u=n(78505);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},27420:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"fillLazyItemsTillLeafWithHead",{enumerable:!0,get:function(){return function e(t,n,u,l,a,i){if(0===Object.keys(u[1]).length){t.head=a;return}for(let c in u[1]){let s;let f=u[1][c],d=f[0],p=(0,r.createRouterCacheKey)(d),h=null!==l&&void 0!==l[1][c]?l[1][c]:null;if(n){let r=n.parallelRoutes.get(c);if(r){let n;let u=(null==i?void 0:i.kind)==="auto"&&i.status===o.PrefetchCacheEntryStatus.reusable,l=new Map(r),s=l.get(p);n=null!==h?{lazyData:null,rsc:h[2],prefetchRsc:null,head:null,prefetchHead:null,loading:h[3],parallelRoutes:new Map(null==s?void 0:s.parallelRoutes),lazyDataResolved:!1}:u&&s?{lazyData:s.lazyData,rsc:s.rsc,prefetchRsc:s.prefetchRsc,head:s.head,prefetchHead:s.prefetchHead,parallelRoutes:new Map(s.parallelRoutes),lazyDataResolved:s.lazyDataResolved,loading:s.loading}:{lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map(null==s?void 0:s.parallelRoutes),lazyDataResolved:!1,loading:null},l.set(p,n),e(n,s,f,h||null,a,i),t.parallelRoutes.set(c,l);continue}}if(null!==h){let e=h[2],t=h[3];s={lazyData:null,rsc:e,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:t}}else s={lazyData:null,rsc:null,prefetchRsc:null,head:null,prefetchHead:null,parallelRoutes:new Map,lazyDataResolved:!1,loading:null};let y=t.parallelRoutes.get(c);y?y.set(p,s):t.parallelRoutes.set(c,new Map([[p,s]])),e(s,void 0,f,h,a,i)}}}});let r=n(78505),o=n(24673);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},44510:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"handleMutable",{enumerable:!0,get:function(){return u}});let r=n(5410);function o(e){return void 0!==e}function u(e,t){var n,u,l;let a=null==(u=t.shouldScroll)||u,i=e.nextUrl;if(o(t.patchedTree)){let n=(0,r.computeChangedPath)(e.tree,t.patchedTree);n?i=n:i||(i=e.canonicalUrl)}return{buildId:e.buildId,canonicalUrl:o(t.canonicalUrl)?t.canonicalUrl===e.canonicalUrl?e.canonicalUrl:t.canonicalUrl:e.canonicalUrl,pushRef:{pendingPush:o(t.pendingPush)?t.pendingPush:e.pushRef.pendingPush,mpaNavigation:o(t.mpaNavigation)?t.mpaNavigation:e.pushRef.mpaNavigation,preserveCustomHistoryState:o(t.preserveCustomHistoryState)?t.preserveCustomHistoryState:e.pushRef.preserveCustomHistoryState},focusAndScrollRef:{apply:!!a&&(!!o(null==t?void 0:t.scrollableSegments)||e.focusAndScrollRef.apply),onlyHashChange:!!t.hashFragment&&e.canonicalUrl.split("#",1)[0]===(null==(n=t.canonicalUrl)?void 0:n.split("#",1)[0]),hashFragment:a?t.hashFragment&&""!==t.hashFragment?decodeURIComponent(t.hashFragment.slice(1)):e.focusAndScrollRef.hashFragment:null,segmentPaths:a?null!=(l=null==t?void 0:t.scrollableSegments)?l:e.focusAndScrollRef.segmentPaths:[]},cache:t.cache?t.cache:e.cache,prefetchCache:t.prefetchCache?t.prefetchCache:e.prefetchCache,tree:o(t.patchedTree)?t.patchedTree:e.tree,nextUrl:i}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},77831:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"handleSegmentMismatch",{enumerable:!0,get:function(){return o}});let r=n(95967);function o(e,t,n){return(0,r.handleExternalUrl)(e,{},e.canonicalUrl,!0)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},77058:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"invalidateCacheBelowFlightSegmentPath",{enumerable:!0,get:function(){return function e(t,n,o){let u=o.length<=2,[l,a]=o,i=(0,r.createRouterCacheKey)(a),c=n.parallelRoutes.get(l);if(!c)return;let s=t.parallelRoutes.get(l);if(s&&s!==c||(s=new Map(c),t.parallelRoutes.set(l,s)),u){s.delete(i);return}let f=c.get(i),d=s.get(i);d&&f&&(d===f&&(d={lazyData:d.lazyData,rsc:d.rsc,prefetchRsc:d.prefetchRsc,head:d.head,prefetchHead:d.prefetchHead,parallelRoutes:new Map(d.parallelRoutes),lazyDataResolved:d.lazyDataResolved},s.set(i,d)),e(d,f,o.slice(2)))}}});let r=n(78505);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},94377:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"invalidateCacheByRouterState",{enumerable:!0,get:function(){return o}});let r=n(78505);function o(e,t,n){for(let o in n[1]){let u=n[1][o][0],l=(0,r.createRouterCacheKey)(u),a=t.parallelRoutes.get(o);if(a){let t=new Map(a);t.delete(l),e.parallelRoutes.set(o,t)}}}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},63237:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isNavigatingToNewRootLayout",{enumerable:!0,get:function(){return function e(t,n){let r=t[0],o=n[0];if(Array.isArray(r)&&Array.isArray(o)){if(r[0]!==o[0]||r[2]!==o[2])return!0}else if(r!==o)return!0;if(t[4])return!n[4];if(n[4])return!0;let u=Object.values(t[1])[0],l=Object.values(n[1])[0];return!u||!l||e(u,l)}}}),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},56118:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{abortTask:function(){return c},listenForDynamicRequest:function(){return a},updateCacheNodeOnNavigation:function(){return function e(t,n,a,c,s){let f=n[1],d=a[1],p=c[1],h=t.parallelRoutes,y=new Map(h),_={},v=null;for(let t in d){let n;let a=d[t],c=f[t],b=h.get(t),g=p[t],m=a[0],R=(0,u.createRouterCacheKey)(m),P=void 0!==c?c[0]:void 0,j=void 0!==b?b.get(R):void 0;if(null!==(n=m===r.PAGE_SEGMENT_KEY?l(a,void 0!==g?g:null,s):m===r.DEFAULT_SEGMENT_KEY?void 0!==c?{route:c,node:null,children:null}:l(a,void 0!==g?g:null,s):void 0!==P&&(0,o.matchSegment)(m,P)&&void 0!==j&&void 0!==c?null!=g?e(j,c,a,g,s):function(e){let t=i(e,null,null);return{route:e,node:t,children:null}}(a):l(a,void 0!==g?g:null,s))){null===v&&(v=new Map),v.set(t,n);let e=n.node;if(null!==e){let n=new Map(b);n.set(R,e),y.set(t,n)}_[t]=n.route}else _[t]=a}if(null===v)return null;let b={lazyData:null,rsc:t.rsc,prefetchRsc:t.prefetchRsc,head:t.head,prefetchHead:t.prefetchHead,loading:t.loading,parallelRoutes:y,lazyDataResolved:!1};return{route:function(e,t){let n=[e[0],t];return 2 in e&&(n[2]=e[2]),3 in e&&(n[3]=e[3]),4 in e&&(n[4]=e[4]),n}(a,_),node:b,children:v}}},updateCacheNodeOnPopstateRestoration:function(){return function e(t,n){let r=n[1],o=t.parallelRoutes,l=new Map(o);for(let t in r){let n=r[t],a=n[0],i=(0,u.createRouterCacheKey)(a),c=o.get(t);if(void 0!==c){let r=c.get(i);if(void 0!==r){let o=e(r,n),u=new Map(c);u.set(i,o),l.set(t,u)}}}let a=t.rsc,i=d(a)&&"pending"===a.status;return{lazyData:null,rsc:a,head:t.head,prefetchHead:i?t.prefetchHead:null,prefetchRsc:i?t.prefetchRsc:null,loading:i?t.loading:null,parallelRoutes:l,lazyDataResolved:!1}}}});let r=n(84541),o=n(76015),u=n(78505);function l(e,t,n){let r=i(e,t,n);return{route:e,node:r,children:null}}function a(e,t){t.then(t=>{for(let n of t[0]){let t=n.slice(0,-3),r=n[n.length-3],l=n[n.length-2],a=n[n.length-1];"string"!=typeof t&&function(e,t,n,r,l){let a=e;for(let e=0;e{c(e,t)})}function i(e,t,n){let r=e[1],o=null!==t?t[1]:null,l=new Map;for(let e in r){let t=r[e],a=null!==o?o[e]:null,c=t[0],s=(0,u.createRouterCacheKey)(c),f=i(t,void 0===a?null:a,n),d=new Map;d.set(s,f),l.set(e,d)}let a=0===l.size,c=null!==t?t[2]:null,s=null!==t?t[3]:null;return{lazyData:null,parallelRoutes:l,prefetchRsc:void 0!==c?c:null,prefetchHead:a?n:null,loading:void 0!==s?s:null,rsc:p(),head:a?p():null,lazyDataResolved:!1}}function c(e,t){let n=e.node;if(null===n)return;let r=e.children;if(null===r)s(e.route,n,t);else for(let e of r.values())c(e,t);e.node=null}function s(e,t,n){let r=e[1],o=t.parallelRoutes;for(let e in r){let t=r[e],l=o.get(e);if(void 0===l)continue;let a=t[0],i=(0,u.createRouterCacheKey)(a),c=l.get(i);void 0!==c&&s(t,c,n)}let l=t.rsc;d(l)&&(null===n?l.resolve(null):l.reject(n));let a=t.head;d(a)&&a.resolve(null)}let f=Symbol();function d(e){return e&&e.tag===f}function p(){let e,t;let n=new Promise((n,r)=>{e=n,t=r});return n.status="pending",n.resolve=t=>{"pending"===n.status&&(n.status="fulfilled",n.value=t,e(t))},n.reject=e=>{"pending"===n.status&&(n.status="rejected",n.reason=e,t(e))},n.tag=f,n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},60305:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{createPrefetchCacheEntryForInitialLoad:function(){return c},getOrCreatePrefetchCacheEntry:function(){return i},prunePrefetchCache:function(){return f}});let r=n(33456),o=n(44848),u=n(24673),l=n(24819);function a(e,t){let n=(0,r.createHrefFromUrl)(e,!1);return t?t+"%"+n:n}function i(e){let t,{url:n,nextUrl:r,tree:o,buildId:l,prefetchCache:i,kind:c}=e,f=a(n,r),d=i.get(f);if(d)t=d;else{let e=a(n),r=i.get(e);r&&(t=r)}return t?(t.status=h(t),t.kind!==u.PrefetchKind.FULL&&c===u.PrefetchKind.FULL)?s({tree:o,url:n,buildId:l,nextUrl:r,prefetchCache:i,kind:null!=c?c:u.PrefetchKind.TEMPORARY}):(c&&t.kind===u.PrefetchKind.TEMPORARY&&(t.kind=c),t):s({tree:o,url:n,buildId:l,nextUrl:r,prefetchCache:i,kind:c||u.PrefetchKind.TEMPORARY})}function c(e){let{nextUrl:t,tree:n,prefetchCache:r,url:o,kind:l,data:i}=e,[,,,c]=i,s=c?a(o,t):a(o),f={treeAtTimeOfPrefetch:n,data:Promise.resolve(i),kind:l,prefetchTime:Date.now(),lastUsedTime:Date.now(),key:s,status:u.PrefetchCacheEntryStatus.fresh};return r.set(s,f),f}function s(e){let{url:t,kind:n,tree:r,nextUrl:i,buildId:c,prefetchCache:s}=e,f=a(t),d=l.prefetchQueue.enqueue(()=>(0,o.fetchServerResponse)(t,r,i,c,n).then(e=>{let[,,,n]=e;return n&&function(e){let{url:t,nextUrl:n,prefetchCache:r}=e,o=a(t),u=r.get(o);if(!u)return;let l=a(t,n);r.set(l,u),r.delete(o)}({url:t,nextUrl:i,prefetchCache:s}),e})),p={treeAtTimeOfPrefetch:r,data:d,kind:n,prefetchTime:Date.now(),lastUsedTime:null,key:f,status:u.PrefetchCacheEntryStatus.fresh};return s.set(f,p),p}function f(e){for(let[t,n]of e)h(n)===u.PrefetchCacheEntryStatus.expired&&e.delete(t)}let d=1e3*Number("30"),p=1e3*Number("300");function h(e){let{kind:t,prefetchTime:n,lastUsedTime:r}=e;return Date.now()<(null!=r?r:n)+d?r?u.PrefetchCacheEntryStatus.reusable:u.PrefetchCacheEntryStatus.fresh:"auto"===t&&Date.now(){let[n,f]=t,h=!1;if(S.lastUsedTime||(S.lastUsedTime=Date.now(),h=!0),"string"==typeof n)return _(e,R,n,O);if(document.getElementById("__next-page-redirect"))return _(e,R,j,O);let b=e.tree,g=e.cache,w=[];for(let t of n){let n=t.slice(0,-4),r=t.slice(-3)[0],c=["",...n],f=(0,u.applyRouterStatePatchToTree)(c,b,r,j);if(null===f&&(f=(0,u.applyRouterStatePatchToTree)(c,E,r,j)),null!==f){if((0,a.isNavigatingToNewRootLayout)(b,f))return _(e,R,j,O);let u=(0,d.createEmptyCacheNode)(),m=!1;for(let e of(S.status!==i.PrefetchCacheEntryStatus.stale||h?m=(0,s.applyFlightData)(g,u,t,S):(m=function(e,t,n,r){let o=!1;for(let u of(e.rsc=t.rsc,e.prefetchRsc=t.prefetchRsc,e.loading=t.loading,e.parallelRoutes=new Map(t.parallelRoutes),v(r).map(e=>[...n,...e])))(0,y.clearCacheNodeDataForSegmentPath)(e,t,u),o=!0;return o}(u,g,n,r),S.lastUsedTime=Date.now()),(0,l.shouldHardNavigate)(c,b)?(u.rsc=g.rsc,u.prefetchRsc=g.prefetchRsc,(0,o.invalidateCacheBelowFlightSegmentPath)(u,g,n),R.cache=u):m&&(R.cache=u,g=u),b=f,v(r))){let t=[...n,...e];t[t.length-1]!==p.DEFAULT_SEGMENT_KEY&&w.push(t)}}}return R.patchedTree=b,R.canonicalUrl=f?(0,r.createHrefFromUrl)(f):j,R.pendingPush=O,R.scrollableSegments=w,R.hashFragment=P,R.shouldScroll=m,(0,c.handleMutable)(e,R)},()=>e)};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},24819:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{prefetchQueue:function(){return l},prefetchReducer:function(){return a}});let r=n(6866),o=n(29744),u=n(60305),l=new o.PromiseQueue(5);function a(e,t){(0,u.prunePrefetchCache)(e.prefetchCache);let{url:n}=t;return n.searchParams.delete(r.NEXT_RSC_UNION_QUERY),(0,u.getOrCreatePrefetchCacheEntry)({url:n,nextUrl:e.nextUrl,prefetchCache:e.prefetchCache,kind:t.kind,tree:e.tree,buildId:e.buildId}),e}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},99601:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"refreshReducer",{enumerable:!0,get:function(){return h}});let r=n(44848),o=n(33456),u=n(81935),l=n(63237),a=n(95967),i=n(44510),c=n(27420),s=n(12846),f=n(77831),d=n(28077),p=n(50232);function h(e,t){let{origin:n}=t,h={},y=e.canonicalUrl,_=e.tree;h.preserveCustomHistoryState=!1;let v=(0,s.createEmptyCacheNode)(),b=(0,d.hasInterceptionRouteInCurrentTree)(e.tree);return v.lazyData=(0,r.fetchServerResponse)(new URL(y,n),[_[0],_[1],_[2],"refetch"],b?e.nextUrl:null,e.buildId),v.lazyData.then(async n=>{let[r,s]=n;if("string"==typeof r)return(0,a.handleExternalUrl)(e,h,r,e.pushRef.pendingPush);for(let n of(v.lazyData=null,r)){if(3!==n.length)return console.log("REFRESH FAILED"),e;let[r]=n,i=(0,u.applyRouterStatePatchToTree)([""],_,r,e.canonicalUrl);if(null===i)return(0,f.handleSegmentMismatch)(e,t,r);if((0,l.isNavigatingToNewRootLayout)(_,i))return(0,a.handleExternalUrl)(e,h,y,e.pushRef.pendingPush);let d=s?(0,o.createHrefFromUrl)(s):void 0;s&&(h.canonicalUrl=d);let[g,m]=n.slice(-2);if(null!==g){let e=g[2];v.rsc=e,v.prefetchRsc=null,(0,c.fillLazyItemsTillLeafWithHead)(v,void 0,r,g,m),h.prefetchCache=new Map}await (0,p.refreshInactiveParallelSegments)({state:e,updatedTree:i,updatedCache:v,includeNextUrl:b,canonicalUrl:h.canonicalUrl||e.canonicalUrl}),h.cache=v,h.patchedTree=i,h.canonicalUrl=y,_=i}return(0,i.handleMutable)(e,h)},()=>e)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},77784:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"restoreReducer",{enumerable:!0,get:function(){return u}});let r=n(33456),o=n(5410);function u(e,t){var n;let{url:u,tree:l}=t,a=(0,r.createHrefFromUrl)(u),i=l||e.tree,c=e.cache;return{buildId:e.buildId,canonicalUrl:a,pushRef:{pendingPush:!1,mpaNavigation:!1,preserveCustomHistoryState:!0},focusAndScrollRef:e.focusAndScrollRef,cache:c,prefetchCache:e.prefetchCache,tree:i,nextUrl:null!=(n=(0,o.extractPathFromFlightRouterState)(i))?n:u.pathname}}n(56118),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},13722:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"serverActionReducer",{enumerable:!0,get:function(){return g}});let r=n(83079),o=n(6866),u=n(1634),l=n(33456),a=n(95967),i=n(81935),c=n(63237),s=n(44510),f=n(27420),d=n(12846),p=n(28077),h=n(77831),y=n(50232),{createFromFetch:_,encodeReply:v}=n(6671);async function b(e,t,n){let l,{actionId:a,actionArgs:i}=n,c=await v(i),s=await fetch("",{method:"POST",headers:{Accept:o.RSC_CONTENT_TYPE_HEADER,[o.ACTION]:a,[o.NEXT_ROUTER_STATE_TREE]:encodeURIComponent(JSON.stringify(e.tree)),...t?{[o.NEXT_URL]:t}:{}},body:c}),f=s.headers.get("x-action-redirect");try{let e=JSON.parse(s.headers.get("x-action-revalidated")||"[[],0,0]");l={paths:e[0]||[],tag:!!e[1],cookie:e[2]}}catch(e){l={paths:[],tag:!1,cookie:!1}}let d=f?new URL((0,u.addBasePath)(f),new URL(e.canonicalUrl,window.location.href)):void 0;if(s.headers.get("content-type")===o.RSC_CONTENT_TYPE_HEADER){let e=await _(Promise.resolve(s),{callServer:r.callServer});if(f){let[,t]=null!=e?e:[];return{actionFlightData:t,redirectLocation:d,revalidatedParts:l}}let[t,[,n]]=null!=e?e:[];return{actionResult:t,actionFlightData:n,redirectLocation:d,revalidatedParts:l}}return{redirectLocation:d,revalidatedParts:l}}function g(e,t){let{resolve:n,reject:r}=t,o={},u=e.canonicalUrl,_=e.tree;o.preserveCustomHistoryState=!1;let v=e.nextUrl&&(0,p.hasInterceptionRouteInCurrentTree)(e.tree)?e.nextUrl:null;return o.inFlightServerAction=b(e,v,t),o.inFlightServerAction.then(async r=>{let{actionResult:p,actionFlightData:b,redirectLocation:g}=r;if(g&&(e.pushRef.pendingPush=!0,o.pendingPush=!0),!b)return(n(p),g)?(0,a.handleExternalUrl)(e,o,g.href,e.pushRef.pendingPush):e;if("string"==typeof b)return(0,a.handleExternalUrl)(e,o,b,e.pushRef.pendingPush);if(o.inFlightServerAction=null,g){let e=(0,l.createHrefFromUrl)(g,!1);o.canonicalUrl=e}for(let n of b){if(3!==n.length)return console.log("SERVER ACTION APPLY FAILED"),e;let[r]=n,s=(0,i.applyRouterStatePatchToTree)([""],_,r,g?(0,l.createHrefFromUrl)(g):e.canonicalUrl);if(null===s)return(0,h.handleSegmentMismatch)(e,t,r);if((0,c.isNavigatingToNewRootLayout)(_,s))return(0,a.handleExternalUrl)(e,o,u,e.pushRef.pendingPush);let[p,b]=n.slice(-2),m=null!==p?p[2]:null;if(null!==m){let t=(0,d.createEmptyCacheNode)();t.rsc=m,t.prefetchRsc=null,(0,f.fillLazyItemsTillLeafWithHead)(t,void 0,r,p,b),await (0,y.refreshInactiveParallelSegments)({state:e,updatedTree:s,updatedCache:t,includeNextUrl:!!v,canonicalUrl:o.canonicalUrl||e.canonicalUrl}),o.cache=t,o.prefetchCache=new Map}o.patchedTree=s,_=s}return n(p),(0,s.handleMutable)(e,o)},t=>(r(t),e))}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},68448:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"serverPatchReducer",{enumerable:!0,get:function(){return f}});let r=n(33456),o=n(81935),u=n(63237),l=n(95967),a=n(22356),i=n(44510),c=n(12846),s=n(77831);function f(e,t){let{serverResponse:n}=t,[f,d]=n,p={};if(p.preserveCustomHistoryState=!1,"string"==typeof f)return(0,l.handleExternalUrl)(e,p,f,e.pushRef.pendingPush);let h=e.tree,y=e.cache;for(let n of f){let i=n.slice(0,-4),[f]=n.slice(-3,-2),_=(0,o.applyRouterStatePatchToTree)(["",...i],h,f,e.canonicalUrl);if(null===_)return(0,s.handleSegmentMismatch)(e,t,f);if((0,u.isNavigatingToNewRootLayout)(h,_))return(0,l.handleExternalUrl)(e,p,e.canonicalUrl,e.pushRef.pendingPush);let v=d?(0,r.createHrefFromUrl)(d):void 0;v&&(p.canonicalUrl=v);let b=(0,c.createEmptyCacheNode)();(0,a.applyFlightData)(y,b,n),p.patchedTree=_,p.cache=b,y=b,h=_}return(0,i.handleMutable)(e,p)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},50232:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{addRefreshMarkerToActiveParallelSegments:function(){return function e(t,n){let[r,o,,l]=t;for(let a in r.includes(u.PAGE_SEGMENT_KEY)&&"refresh"!==l&&(t[2]=n,t[3]="refresh"),o)e(o[a],n)}},refreshInactiveParallelSegments:function(){return l}});let r=n(22356),o=n(44848),u=n(84541);async function l(e){let t=new Set;await a({...e,rootTree:e.updatedTree,fetchedSegments:t})}async function a(e){let{state:t,updatedTree:n,updatedCache:u,includeNextUrl:l,fetchedSegments:i,rootTree:c=n,canonicalUrl:s}=e,[,f,d,p]=n,h=[];if(d&&d!==s&&"refresh"===p&&!i.has(d)){i.add(d);let e=(0,o.fetchServerResponse)(new URL(d,location.origin),[c[0],c[1],c[2],"refetch"],l?t.nextUrl:null,t.buildId).then(e=>{let t=e[0];if("string"!=typeof t)for(let e of t)(0,r.applyFlightData)(u,u,e)});h.push(e)}for(let e in f){let n=a({state:t,updatedTree:f[e],updatedCache:u,includeNextUrl:l,fetchedSegments:i,rootTree:c,canonicalUrl:s});h.push(n)}await Promise.all(h)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},24673:function(e,t){"use strict";var n,r,o,u;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ACTION_FAST_REFRESH:function(){return f},ACTION_NAVIGATE:function(){return a},ACTION_PREFETCH:function(){return s},ACTION_REFRESH:function(){return l},ACTION_RESTORE:function(){return i},ACTION_SERVER_ACTION:function(){return d},ACTION_SERVER_PATCH:function(){return c},PrefetchCacheEntryStatus:function(){return r},PrefetchKind:function(){return n},isThenable:function(){return p}});let l="refresh",a="navigate",i="restore",c="server-patch",s="prefetch",f="fast-refresh",d="server-action";function p(e){return e&&("object"==typeof e||"function"==typeof e)&&"function"==typeof e.then}(o=n||(n={})).AUTO="auto",o.FULL="full",o.TEMPORARY="temporary",(u=r||(r={})).fresh="fresh",u.reusable="reusable",u.expired="expired",u.stale="stale",("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},91450:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"reducer",{enumerable:!0,get:function(){return f}});let r=n(24673),o=n(95967),u=n(68448),l=n(77784),a=n(99601),i=n(24819),c=n(44529),s=n(13722),f="undefined"==typeof window?function(e,t){return e}:function(e,t){switch(t.type){case r.ACTION_NAVIGATE:return(0,o.navigateReducer)(e,t);case r.ACTION_SERVER_PATCH:return(0,u.serverPatchReducer)(e,t);case r.ACTION_RESTORE:return(0,l.restoreReducer)(e,t);case r.ACTION_REFRESH:return(0,a.refreshReducer)(e,t);case r.ACTION_FAST_REFRESH:return(0,c.fastRefreshReducer)(e,t);case r.ACTION_PREFETCH:return(0,i.prefetchReducer)(e,t);case r.ACTION_SERVER_ACTION:return(0,s.serverActionReducer)(e,t);default:throw Error("Unknown action")}};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},53728:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"shouldHardNavigate",{enumerable:!0,get:function(){return function e(t,n){let[o,u]=n,[l,a]=t;return(0,r.matchSegment)(l,o)?!(t.length<=2)&&e(t.slice(2),u[a]):!!Array.isArray(l)}}});let r=n(76015);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},54535:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{createDynamicallyTrackedSearchParams:function(){return a},createUntrackedSearchParams:function(){return l}});let r=n(51845),o=n(86999),u=n(30650);function l(e){let t=r.staticGenerationAsyncStorage.getStore();return t&&t.forceStatic?{}:e}function a(e){let t=r.staticGenerationAsyncStorage.getStore();return t?t.forceStatic?{}:t.isStaticGeneration||t.dynamicShouldError?new Proxy({},{get:(e,n,r)=>("string"==typeof n&&(0,o.trackDynamicDataAccessed)(t,"searchParams."+n),u.ReflectAdapter.get(e,n,r)),has:(e,n)=>("string"==typeof n&&(0,o.trackDynamicDataAccessed)(t,"searchParams."+n),Reflect.has(e,n)),ownKeys:e=>((0,o.trackDynamicDataAccessed)(t,"searchParams"),Reflect.ownKeys(e))}):e:e}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},51845:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"staticGenerationAsyncStorage",{enumerable:!0,get:function(){return r.staticGenerationAsyncStorage}});let r=n(20030);("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},36864:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{StaticGenBailoutError:function(){return r},isStaticGenBailoutError:function(){return o}});let n="NEXT_STATIC_GEN_BAILOUT";class r extends Error{constructor(...e){super(...e),this.code=n}}function o(e){return"object"==typeof e&&null!==e&&"code"in e&&e.code===n}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},38137:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"unresolvedThenable",{enumerable:!0,get:function(){return n}});let n={then:()=>{}};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},47744:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{useReducerWithReduxDevtools:function(){return i},useUnwrapState:function(){return a}});let r=n(53099)._(n(2265)),o=n(24673),u=n(2103);function l(e){if(e instanceof Map){let t={};for(let[n,r]of e.entries()){if("function"==typeof r){t[n]="fn()";continue}if("object"==typeof r&&null!==r){if(r.$$typeof){t[n]=r.$$typeof.toString();continue}if(r._bundlerConfig){t[n]="FlightData";continue}}t[n]=l(r)}return t}if("object"==typeof e&&null!==e){let t={};for(let n in e){let r=e[n];if("function"==typeof r){t[n]="fn()";continue}if("object"==typeof r&&null!==r){if(r.$$typeof){t[n]=r.$$typeof.toString();continue}if(r.hasOwnProperty("_bundlerConfig")){t[n]="FlightData";continue}}t[n]=l(r)}return t}return Array.isArray(e)?e.map(l):e}function a(e){return(0,o.isThenable)(e)?(0,r.use)(e):e}let i="undefined"!=typeof window?function(e){let[t,n]=r.default.useState(e),o=(0,r.useContext)(u.ActionQueueContext);if(!o)throw Error("Invariant: Missing ActionQueueContext");let a=(0,r.useRef)(),i=(0,r.useRef)();return(0,r.useEffect)(()=>{if(!a.current&&!1!==i.current){if(void 0===i.current&&void 0===window.__REDUX_DEVTOOLS_EXTENSION__){i.current=!1;return}return a.current=window.__REDUX_DEVTOOLS_EXTENSION__.connect({instanceId:8e3,name:"next-router"}),a.current&&(a.current.init(l(e)),o&&(o.devToolsInstance=a.current)),()=>{a.current=void 0}}},[e,o]),[t,(0,r.useCallback)(t=>{o.state||(o.state=e),o.dispatch(t,n)},[o,e]),(0,r.useCallback)(e=>{a.current&&a.current.send({type:"RENDER_SYNC"},l(e))},[])]}:function(e){return[e,()=>{},()=>{}]};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},11283:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"hasBasePath",{enumerable:!0,get:function(){return o}});let r=n(10580);function o(e){return(0,r.pathHasPrefix)(e,"/ui")}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},33068:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"normalizePathTrailingSlash",{enumerable:!0,get:function(){return u}});let r=n(26674),o=n(63381),u=e=>{if(!e.startsWith("/"))return e;let{pathname:t,query:n,hash:u}=(0,o.parsePath)(e);return""+(0,r.removeTrailingSlash)(t)+n+u};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},61404:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return o}});let r=n(18993);function o(e){let t="function"==typeof reportError?reportError:e=>{window.console.error(e)};(0,r.isBailoutToCSRError)(e)||t(e)}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},35076:function(e,t,n){"use strict";function r(e){return(e=e.slice(3)).startsWith("/")||(e="/"+e),e}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"removeBasePath",{enumerable:!0,get:function(){return r}}),n(11283),("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},12010:function(e,t){"use strict";function n(e,t){var n=e.length;for(e.push(t);0>>1,o=e[r];if(0>>1;ru(i,n))cu(s,i)?(e[r]=s,e[c]=n,r=c):(e[r]=i,e[a]=n,r=a);else if(cu(s,n))e[r]=s,e[c]=n,r=c;else break}}return t}function u(e,t){var n=e.sortIndex-t.sortIndex;return 0!==n?n:e.id-t.id}if(t.unstable_now=void 0,"object"==typeof performance&&"function"==typeof performance.now){var l,a=performance;t.unstable_now=function(){return a.now()}}else{var i=Date,c=i.now();t.unstable_now=function(){return i.now()-c}}var s=[],f=[],d=1,p=null,h=3,y=!1,_=!1,v=!1,b="function"==typeof setTimeout?setTimeout:null,g="function"==typeof clearTimeout?clearTimeout:null,m="undefined"!=typeof setImmediate?setImmediate:null;function R(e){for(var t=r(f);null!==t;){if(null===t.callback)o(f);else if(t.startTime<=e)o(f),t.sortIndex=t.expirationTime,n(s,t);else break;t=r(f)}}function P(e){if(v=!1,R(e),!_){if(null!==r(s))_=!0,C();else{var t=r(f);null!==t&&A(P,t.startTime-e)}}}"undefined"!=typeof navigator&&void 0!==navigator.scheduling&&void 0!==navigator.scheduling.isInputPending&&navigator.scheduling.isInputPending.bind(navigator.scheduling);var j=!1,O=-1,S=5,E=-1;function w(){return!(t.unstable_now()-Ee&&w());){var a=p.callback;if("function"==typeof a){p.callback=null,h=p.priorityLevel;var i=a(p.expirationTime<=e);if(e=t.unstable_now(),"function"==typeof i){p.callback=i,R(e),n=!0;break t}p===r(s)&&o(s),R(e)}else o(s);p=r(s)}if(null!==p)n=!0;else{var c=r(f);null!==c&&A(P,c.startTime-e),n=!1}}break e}finally{p=null,h=u,y=!1}n=void 0}}finally{n?l():j=!1}}}if("function"==typeof m)l=function(){m(T)};else if("undefined"!=typeof MessageChannel){var M=new MessageChannel,x=M.port2;M.port1.onmessage=T,l=function(){x.postMessage(null)}}else l=function(){b(T,0)};function C(){j||(j=!0,l())}function A(e,n){O=b(function(){e(t.unstable_now())},n)}t.unstable_IdlePriority=5,t.unstable_ImmediatePriority=1,t.unstable_LowPriority=4,t.unstable_NormalPriority=3,t.unstable_Profiling=null,t.unstable_UserBlockingPriority=2,t.unstable_cancelCallback=function(e){e.callback=null},t.unstable_continueExecution=function(){_||y||(_=!0,C())},t.unstable_forceFrameRate=function(e){0>e||125l?(e.sortIndex=u,n(f,e),null===r(s)&&e===r(f)&&(v?(g(O),O=-1):v=!0,A(P,u-l))):(e.sortIndex=a,n(s,e),_||y||(_=!0,C())),e},t.unstable_shouldYield=w,t.unstable_wrapCallback=function(e){var t=h;return function(){var n=h;h=t;try{return e.apply(this,arguments)}finally{h=n}}}},71767:function(e,t,n){"use strict";e.exports=n(12010)},60934:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{getPathname:function(){return r},isFullStringUrl:function(){return o},parseUrl:function(){return u}});let n="http://n";function r(e){return new URL(e,n).pathname}function o(e){return/https?:\/\//.test(e)}function u(e){let t;try{t=new URL(e,n)}catch{}return t}},86999:function(e,t,n){"use strict";var r;Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{Postpone:function(){return d},createPostponedAbortSignal:function(){return b},createPrerenderState:function(){return c},formatDynamicAPIAccesses:function(){return _},markCurrentScopeAsDynamic:function(){return s},trackDynamicDataAccessed:function(){return f},trackDynamicFetch:function(){return p},usedDynamicAPIs:function(){return y}});let o=(r=n(2265))&&r.__esModule?r:{default:r},u=n(46177),l=n(36864),a=n(60934),i="function"==typeof o.default.unstable_postpone;function c(e){return{isDebugSkeleton:e,dynamicAccesses:[]}}function s(e,t){let n=(0,a.getPathname)(e.urlPathname);if(!e.isUnstableCacheCallback){if(e.dynamicShouldError)throw new l.StaticGenBailoutError(`Route ${n} with \`dynamic = "error"\` couldn't be rendered statically because it used \`${t}\`. See more info here: https://nextjs.org/docs/app/building-your-application/rendering/static-and-dynamic#dynamic-rendering`);if(e.prerenderState)h(e.prerenderState,t,n);else if(e.revalidate=0,e.isStaticGeneration){let r=new u.DynamicServerError(`Route ${n} couldn't be rendered statically because it used ${t}. See more info here: https://nextjs.org/docs/messages/dynamic-server-error`);throw e.dynamicUsageDescription=t,e.dynamicUsageStack=r.stack,r}}}function f(e,t){let n=(0,a.getPathname)(e.urlPathname);if(e.isUnstableCacheCallback)throw Error(`Route ${n} used "${t}" inside a function cached with "unstable_cache(...)". Accessing Dynamic data sources inside a cache scope is not supported. If you need this data inside a cached function use "${t}" outside of the cached function and pass the required dynamic data in as an argument. See more info here: https://nextjs.org/docs/app/api-reference/functions/unstable_cache`);if(e.dynamicShouldError)throw new l.StaticGenBailoutError(`Route ${n} with \`dynamic = "error"\` couldn't be rendered statically because it used \`${t}\`. See more info here: https://nextjs.org/docs/app/building-your-application/rendering/static-and-dynamic#dynamic-rendering`);if(e.prerenderState)h(e.prerenderState,t,n);else if(e.revalidate=0,e.isStaticGeneration){let r=new u.DynamicServerError(`Route ${n} couldn't be rendered statically because it used \`${t}\`. See more info here: https://nextjs.org/docs/messages/dynamic-server-error`);throw e.dynamicUsageDescription=t,e.dynamicUsageStack=r.stack,r}}function d({reason:e,prerenderState:t,pathname:n}){h(t,e,n)}function p(e,t){e.prerenderState&&h(e.prerenderState,t,e.urlPathname)}function h(e,t,n){v();let r=`Route ${n} needs to bail out of prerendering at this point because it used ${t}. React throws this special object to indicate where. It should not be caught by your own try/catch. Learn more: https://nextjs.org/docs/messages/ppr-caught-error`;e.dynamicAccesses.push({stack:e.isDebugSkeleton?Error().stack:void 0,expression:t}),o.default.unstable_postpone(r)}function y(e){return e.dynamicAccesses.length>0}function _(e){return e.dynamicAccesses.filter(e=>"string"==typeof e.stack&&e.stack.length>0).map(({expression:e,stack:t})=>(t=t.split("\n").slice(4).filter(e=>!(e.includes("node_modules/next/")||e.includes(" ()")||e.includes(" (node:"))).join("\n"),`Dynamic API Usage Debug - ${e}:
${t}`))}function v(){if(!i)throw Error("Invariant: React.unstable_postpone is not defined. This suggests the wrong version of React was loaded. This is a bug in Next.js")}function b(e){v();let t=new AbortController;try{o.default.unstable_postpone(e)}catch(e){t.abort(e)}return t.signal}},87417:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"getSegmentParam",{enumerable:!0,get:function(){return o}});let r=n(91182);function o(e){let t=r.INTERCEPTION_ROUTE_MARKERS.find(t=>e.startsWith(t));return(t&&(e=e.slice(t.length)),e.startsWith("[[...")&&e.endsWith("]]"))?{type:"optional-catchall",param:e.slice(5,-2)}:e.startsWith("[...")&&e.endsWith("]")?{type:t?"catchall-intercepted":"catchall",param:e.slice(4,-1)}:e.startsWith("[")&&e.endsWith("]")?{type:t?"dynamic-intercepted":"dynamic",param:e.slice(1,-1)}:null}},70647:function(e,t){"use strict";var n,r;Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"HMR_ACTIONS_SENT_TO_BROWSER",{enumerable:!0,get:function(){return n}}),(r=n||(n={})).ADDED_PAGE="addedPage",r.REMOVED_PAGE="removedPage",r.RELOAD_PAGE="reloadPage",r.SERVER_COMPONENT_CHANGES="serverComponentChanges",r.MIDDLEWARE_CHANGES="middlewareChanges",r.CLIENT_CHANGES="clientChanges",r.SERVER_ONLY_CHANGES="serverOnlyChanges",r.SYNC="sync",r.BUILT="built",r.BUILDING="building",r.DEV_PAGES_MANIFEST_UPDATE="devPagesManifestUpdate",r.TURBOPACK_MESSAGE="turbopack-message",r.SERVER_ERROR="serverError",r.TURBOPACK_CONNECTED="turbopack-connected"},91182:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{INTERCEPTION_ROUTE_MARKERS:function(){return o},extractInterceptionRouteInformation:function(){return l},isInterceptionRouteAppPath:function(){return u}});let r=n(20926),o=["(..)(..)","(.)","(..)","(...)"];function u(e){return void 0!==e.split("/").find(e=>o.find(t=>e.startsWith(t)))}function l(e){let t,n,u;for(let r of e.split("/"))if(n=o.find(e=>r.startsWith(e))){[t,u]=e.split(n,2);break}if(!t||!n||!u)throw Error(`Invalid interception route: ${e}. Must be in the format //(..|...|..)(..)/`);switch(t=(0,r.normalizeAppPath)(t),n){case"(.)":u="/"===t?`/${u}`:t+"/"+u;break;case"(..)":if("/"===t)throw Error(`Invalid interception route: ${e}. Cannot use (..) marker at the root level, use (.) instead.`);u=t.split("/").slice(0,-1).concat(u).join("/");break;case"(...)":u="/"+u;break;case"(..)(..)":let l=t.split("/");if(l.length<=2)throw Error(`Invalid interception route: ${e}. Cannot use (..)(..) marker at the root level or one level up.`);u=l.slice(0,-2).concat(u).join("/");break;default:throw Error("Invariant: unexpected marker")}return{interceptingRoute:t,interceptedRoute:u}}},30650:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"ReflectAdapter",{enumerable:!0,get:function(){return n}});class n{static get(e,t,n){let r=Reflect.get(e,t,n);return"function"==typeof r?r.bind(e):r}static set(e,t,n,r){return Reflect.set(e,t,n,r)}static has(e,t){return Reflect.has(e,t)}static deleteProperty(e,t){return Reflect.deleteProperty(e,t)}}},61956:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{AppRouterContext:function(){return o},GlobalLayoutRouterContext:function(){return l},LayoutRouterContext:function(){return u},MissingSlotContext:function(){return i},TemplateContext:function(){return a}});let r=n(47043)._(n(2265)),o=r.default.createContext(null),u=r.default.createContext(null),l=r.default.createContext(null),a=r.default.createContext(null),i=r.default.createContext(new Set)},37207:function(e,t){"use strict";function n(e){let t=5381;for(let n=0;n>>0}function r(e){return n(e).toString(36).slice(0,5)}Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{djb2Hash:function(){return n},hexHash:function(){return r}})},48701:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"HeadManagerContext",{enumerable:!0,get:function(){return r}});let r=n(47043)._(n(2265)).default.createContext({})},79060:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{PathParamsContext:function(){return l},PathnameContext:function(){return u},SearchParamsContext:function(){return o}});let r=n(2265),o=(0,r.createContext)(null),u=(0,r.createContext)(null),l=(0,r.createContext)(null)},18993:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{BailoutToCSRError:function(){return r},isBailoutToCSRError:function(){return o}});let n="BAILOUT_TO_CLIENT_SIDE_RENDERING";class r extends Error{constructor(e){super("Bail out to client-side rendering: "+e),this.reason=e,this.digest=n}}function o(e){return"object"==typeof e&&null!==e&&"digest"in e&&e.digest===n}},78162:function(e,t){"use strict";function n(e){return e.startsWith("/")?e:"/"+e}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"ensureLeadingSlash",{enumerable:!0,get:function(){return n}})},2103:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ActionQueueContext:function(){return a},createMutableActionQueue:function(){return s}});let r=n(53099),o=n(24673),u=n(91450),l=r._(n(2265)),a=l.default.createContext(null);function i(e,t){null!==e.pending&&(e.pending=e.pending.next,null!==e.pending?c({actionQueue:e,action:e.pending,setState:t}):e.needsRefresh&&(e.needsRefresh=!1,e.dispatch({type:o.ACTION_REFRESH,origin:window.location.origin},t)))}async function c(e){let{actionQueue:t,action:n,setState:r}=e,u=t.state;if(!u)throw Error("Invariant: Router state not initialized");t.pending=n;let l=n.payload,a=t.action(u,l);function c(e){n.discarded||(t.state=e,t.devToolsInstance&&t.devToolsInstance.send(l,e),i(t,r),n.resolve(e))}(0,o.isThenable)(a)?a.then(c,e=>{i(t,r),n.reject(e)}):c(a)}function s(){let e={state:null,dispatch:(t,n)=>(function(e,t,n){let r={resolve:n,reject:()=>{}};if(t.type!==o.ACTION_RESTORE){let e=new Promise((e,t)=>{r={resolve:e,reject:t}});(0,l.startTransition)(()=>{n(e)})}let u={payload:t,next:null,resolve:r.resolve,reject:r.reject};null===e.pending?(e.last=u,c({actionQueue:e,action:u,setState:n})):t.type===o.ACTION_NAVIGATE||t.type===o.ACTION_RESTORE?(e.pending.discarded=!0,e.last=u,e.pending.payload.type===o.ACTION_SERVER_ACTION&&(e.needsRefresh=!0),c({actionQueue:e,action:u,setState:n})):(null!==e.last&&(e.last.next=u),e.last=u)})(e,t,n),action:async(e,t)=>{if(null===e)throw Error("Invariant: Router state not initialized");return(0,u.reducer)(e,t)},pending:null,last:null};return e}},68498:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"addPathPrefix",{enumerable:!0,get:function(){return o}});let r=n(63381);function o(e,t){if(!e.startsWith("/")||!t)return e;let{pathname:n,query:o,hash:u}=(0,r.parsePath)(e);return""+t+n+o+u}},20926:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{normalizeAppPath:function(){return u},normalizeRscURL:function(){return l}});let r=n(78162),o=n(84541);function u(e){return(0,r.ensureLeadingSlash)(e.split("/").reduce((e,t,n,r)=>!t||(0,o.isGroupSegment)(t)||"@"===t[0]||("page"===t||"route"===t)&&n===r.length-1?e:e+"/"+t,""))}function l(e){return e.replace(/\.rsc($|\?)/,"$1")}},7092:function(e,t){"use strict";function n(e,t){if(void 0===t&&(t={}),t.onlyHashChange){e();return}let n=document.documentElement,r=n.style.scrollBehavior;n.style.scrollBehavior="auto",t.dontForceLayout||n.getClientRects(),e(),n.style.scrollBehavior=r}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"handleSmoothScroll",{enumerable:!0,get:function(){return n}})},86146:function(e,t){"use strict";function n(e){return/Googlebot|Mediapartners-Google|AdsBot-Google|googleweblight|Storebot-Google|Google-PageRenderer|Bingbot|BingPreview|Slurp|DuckDuckBot|baiduspider|yandex|sogou|LinkedInBot|bitlybot|tumblr|vkShare|quora link preview|facebookexternalhit|facebookcatalog|Twitterbot|applebot|redditbot|Slackbot|Discordbot|WhatsApp|SkypeUriPreview|ia_archiver/i.test(e)}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isBot",{enumerable:!0,get:function(){return n}})},63381:function(e,t){"use strict";function n(e){let t=e.indexOf("#"),n=e.indexOf("?"),r=n>-1&&(t<0||n-1?{pathname:e.substring(0,r?n:t),query:r?e.substring(n,t>-1?t:void 0):"",hash:t>-1?e.slice(t):""}:{pathname:e,query:"",hash:""}}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"parsePath",{enumerable:!0,get:function(){return n}})},10580:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"pathHasPrefix",{enumerable:!0,get:function(){return o}});let r=n(63381);function o(e,t){if("string"!=typeof e)return!1;let{pathname:n}=(0,r.parsePath)(e);return n===t||n.startsWith(t+"/")}},26674:function(e,t){"use strict";function n(e){return e.replace(/\/$/,"")||"/"}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"removeTrailingSlash",{enumerable:!0,get:function(){return n}})},84541:function(e,t){"use strict";function n(e){return"("===e[0]&&e.endsWith(")")}Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{DEFAULT_SEGMENT_KEY:function(){return o},PAGE_SEGMENT_KEY:function(){return r},isGroupSegment:function(){return n}});let r="__PAGE__",o="__DEFAULT__"},55501:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var n in t)Object.defineProperty(e,n,{enumerable:!0,get:t[n]})}(t,{ServerInsertedHTMLContext:function(){return o},useServerInsertedHTML:function(){return u}});let r=n(53099)._(n(2265)),o=r.default.createContext(null);function u(e){let t=(0,r.useContext)(o);t&&t(e)}},31765:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"warnOnce",{enumerable:!0,get:function(){return n}});let n=e=>{}},47149:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"actionAsyncStorage",{enumerable:!0,get:function(){return r}});let r=(0,n(54832).createAsyncLocalStorage)();("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},54832:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createAsyncLocalStorage",{enumerable:!0,get:function(){return u}});let n=Error("Invariant: AsyncLocalStorage accessed in runtime where it is not available");class r{disable(){throw n}getStore(){}run(){throw n}exit(){throw n}enterWith(){throw n}}let o=globalThis.AsyncLocalStorage;function u(){return o?new o:new r}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},25575:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"requestAsyncStorage",{enumerable:!0,get:function(){return r}});let r=(0,n(54832).createAsyncLocalStorage)();("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},20030:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"staticGenerationAsyncStorage",{enumerable:!0,get:function(){return r}});let r=(0,n(54832).createAsyncLocalStorage)();("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},34040:function(e,t,n){"use strict";var r=n(54887);t.createRoot=r.createRoot,t.hydrateRoot=r.hydrateRoot},54887:function(e,t,n){"use strict";!function e(){if("undefined"!=typeof __REACT_DEVTOOLS_GLOBAL_HOOK__&&"function"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE)try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(e){console.error(e)}}(),e.exports=n(84417)},97950:function(e,t,n){"use strict";var r=n(54887),o={stream:!0},u=new Map;function l(e){var t=n(e);return"function"!=typeof t.then||"fulfilled"===t.status?null:(t.then(function(e){t.status="fulfilled",t.value=e},function(e){t.status="rejected",t.reason=e}),t)}function a(){}var i=new Map,c=n.u;n.u=function(e){var t=i.get(e);return void 0!==t?t:c(e)};var s=r.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.Dispatcher,f=Symbol.for("react.element"),d=Symbol.for("react.lazy"),p=Symbol.iterator,h=Array.isArray,y=Object.getPrototypeOf,_=Object.prototype,v=new WeakMap;function b(e,t,n,r){this.status=e,this.value=t,this.reason=n,this._response=r}function g(e){switch(e.status){case"resolved_model":E(e);break;case"resolved_module":w(e)}switch(e.status){case"fulfilled":return e.value;case"pending":case"blocked":case"cyclic":throw e;default:throw e.reason}}function m(e,t){for(var n=0;nh?(_=h,h=3,p++):(_=0,h=3);continue;case 2:44===(m=d[p++])?h=4:v=v<<4|(96d.length&&(m=-1)}var O=d.byteOffset+p;if(-1{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M11.9999 13.1714L16.9497 8.22168L18.3639 9.63589L11.9999 15.9999L5.63599 9.63589L7.0502 8.22168L11.9999 13.1714Z"}))}},4537:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(5853),o=n(2265);let i=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M12 22C6.47715 22 2 17.5228 2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22ZM12 10.5858L9.17157 7.75736L7.75736 9.17157L10.5858 12L7.75736 14.8284L9.17157 16.2426L12 13.4142L14.8284 16.2426L16.2426 14.8284L13.4142 12L16.2426 9.17157L14.8284 7.75736L12 10.5858Z"}))}},75105:function(e,t,n){"use strict";n.d(t,{Z:function(){return et}});var r=n(5853),o=n(2265),i=n(47625),a=n(93765),l=n(61994),c=n(59221),s=n(86757),u=n.n(s),d=n(95645),f=n.n(d),p=n(77571),h=n.n(p),m=n(82559),g=n.n(m),v=n(21652),y=n.n(v),b=n(57165),x=n(81889),w=n(9841),S=n(58772),k=n(34067),E=n(16630),C=n(85355),O=n(82944),j=["layout","type","stroke","connectNulls","isRange","ref"];function P(e){return(P="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function N(){return(N=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(i,j));return o.createElement(w.m,{clipPath:n?"url(#clipPath-".concat(r,")"):null},o.createElement(b.H,N({},(0,O.L6)(d,!0),{points:e,connectNulls:s,type:l,baseLine:t,layout:a,stroke:"none",className:"recharts-area-area"})),"none"!==c&&o.createElement(b.H,N({},(0,O.L6)(this.props,!1),{className:"recharts-area-curve",layout:a,type:l,connectNulls:s,fill:"none",points:e})),"none"!==c&&u&&o.createElement(b.H,N({},(0,O.L6)(this.props,!1),{className:"recharts-area-curve",layout:a,type:l,connectNulls:s,fill:"none",points:t})))}},{key:"renderAreaWithAnimation",value:function(e,t){var n=this,r=this.props,i=r.points,a=r.baseLine,l=r.isAnimationActive,s=r.animationBegin,u=r.animationDuration,d=r.animationEasing,f=r.animationId,p=this.state,m=p.prevPoints,v=p.prevBaseLine;return o.createElement(c.ZP,{begin:s,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"area-".concat(f),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var l=r.t;if(m){var c,s=m.length/i.length,u=i.map(function(e,t){var n=Math.floor(t*s);if(m[n]){var r=m[n],o=(0,E.k4)(r.x,e.x),i=(0,E.k4)(r.y,e.y);return I(I({},e),{},{x:o(l),y:i(l)})}return e});return c=(0,E.hj)(a)&&"number"==typeof a?(0,E.k4)(v,a)(l):h()(a)||g()(a)?(0,E.k4)(v,0)(l):a.map(function(e,t){var n=Math.floor(t*s);if(v[n]){var r=v[n],o=(0,E.k4)(r.x,e.x),i=(0,E.k4)(r.y,e.y);return I(I({},e),{},{x:o(l),y:i(l)})}return e}),n.renderAreaStatically(u,c,e,t)}return o.createElement(w.m,null,o.createElement("defs",null,o.createElement("clipPath",{id:"animationClipPath-".concat(t)},n.renderClipRect(l))),o.createElement(w.m,{clipPath:"url(#animationClipPath-".concat(t,")")},n.renderAreaStatically(i,a,e,t)))})}},{key:"renderArea",value:function(e,t){var n=this.props,r=n.points,o=n.baseLine,i=n.isAnimationActive,a=this.state,l=a.prevPoints,c=a.prevBaseLine,s=a.totalLength;return i&&r&&r.length&&(!l&&s>0||!y()(l,r)||!y()(c,o))?this.renderAreaWithAnimation(e,t):this.renderAreaStatically(r,o,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,i=t.points,a=t.className,c=t.top,s=t.left,u=t.xAxis,d=t.yAxis,f=t.width,p=t.height,m=t.isAnimationActive,g=t.id;if(n||!i||!i.length)return null;var v=this.state.isAnimationFinished,y=1===i.length,b=(0,l.Z)("recharts-area",a),x=u&&u.allowDataOverflow,k=d&&d.allowDataOverflow,E=x||k,C=h()(g)?this.id:g,j=null!==(e=(0,O.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},P=j.r,N=j.strokeWidth,M=((0,O.$k)(r)?r:{}).clipDot,I=void 0===M||M,R=2*(void 0===P?3:P)+(void 0===N?2:N);return o.createElement(w.m,{className:b},x||k?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(C)},o.createElement("rect",{x:x?s:s-f/2,y:k?c:c-p/2,width:x?f:2*f,height:k?p:2*p})),!I&&o.createElement("clipPath",{id:"clipPath-dots-".concat(C)},o.createElement("rect",{x:s-R/2,y:c-R/2,width:f+R,height:p+R}))):null,y?null:this.renderArea(E,C),(r||y)&&this.renderDots(E,I,C),(!m||v)&&S.e.renderCallByParent(this.props,i))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,curBaseLine:e.baseLine,prevPoints:t.curPoints,prevBaseLine:t.curBaseLine}:e.points!==t.curPoints||e.baseLine!==t.curBaseLine?{curPoints:e.points,curBaseLine:e.baseLine}:null}}],n&&R(a.prototype,n),r&&R(a,r),Object.defineProperty(a,"prototype",{writable:!1}),a}(o.PureComponent);D(L,"displayName","Area"),D(L,"defaultProps",{stroke:"#3182bd",fill:"#3182bd",fillOpacity:.6,xAxisId:0,yAxisId:0,legendType:"line",connectNulls:!1,points:[],dot:!1,activeDot:!0,hide:!1,isAnimationActive:!k.x.isSsr,animationBegin:0,animationDuration:1500,animationEasing:"ease"}),D(L,"getBaseValue",function(e,t,n,r){var o=e.layout,i=e.baseValue,a=t.props.baseValue,l=null!=a?a:i;if((0,E.hj)(l)&&"number"==typeof l)return l;var c="horizontal"===o?r:n,s=c.scale.domain();if("number"===c.type){var u=Math.max(s[0],s[1]),d=Math.min(s[0],s[1]);return"dataMin"===l?d:"dataMax"===l?u:u<0?u:Math.max(Math.min(s[0],s[1]),0)}return"dataMin"===l?s[0]:"dataMax"===l?s[1]:s[0]}),D(L,"getComposedData",function(e){var t,n=e.props,r=e.item,o=e.xAxis,i=e.yAxis,a=e.xAxisTicks,l=e.yAxisTicks,c=e.bandSize,s=e.dataKey,u=e.stackedData,d=e.dataStartIndex,f=e.displayedData,p=e.offset,h=n.layout,m=u&&u.length,g=L.getBaseValue(n,r,o,i),v="horizontal"===h,y=!1,b=f.map(function(e,t){m?n=u[d+t]:Array.isArray(n=(0,C.F$)(e,s))?y=!0:n=[g,n];var n,r=null==n[1]||m&&null==(0,C.F$)(e,s);return v?{x:(0,C.Hv)({axis:o,ticks:a,bandSize:c,entry:e,index:t}),y:r?null:i.scale(n[1]),value:n,payload:e}:{x:r?null:o.scale(n[1]),y:(0,C.Hv)({axis:i,ticks:l,bandSize:c,entry:e,index:t}),value:n,payload:e}});return t=m||y?b.map(function(e){var t=Array.isArray(e.value)?e.value[0]:null;return v?{x:e.x,y:null!=t&&null!=e.y?i.scale(t):null}:{x:null!=t?o.scale(t):null,y:e.y}}):v?i.scale(g):o.scale(g),I({points:b,baseLine:t,layout:h,isRange:y},p)}),D(L,"renderDotItem",function(e,t){return o.isValidElement(e)?o.cloneElement(e,t):u()(e)?e(t):o.createElement(x.o,N({},t,{className:"recharts-area-dot"}))});var z=n(97059),B=n(62994),F=n(25311),H=(0,a.z)({chartName:"AreaChart",GraphicalChild:L,axisComponents:[{axisType:"xAxis",AxisComp:z.K},{axisType:"yAxis",AxisComp:B.B}],formatAxisMap:F.t9}),q=n(56940),W=n(8147),K=n(22190),V=n(54061),U=n(65278),G=n(98593),X=n(69448),$=n(32644),Y=n(7084),Q=n(26898),J=n(65954),ee=n(1153);let et=o.forwardRef((e,t)=>{let{data:n=[],categories:a=[],index:l,stack:c=!1,colors:s=Q.s,valueFormatter:u=ee.Cj,startEndOnly:d=!1,showXAxis:f=!0,showYAxis:p=!0,yAxisWidth:h=56,intervalType:m="equidistantPreserveStart",showAnimation:g=!1,animationDuration:v=900,showTooltip:y=!0,showLegend:b=!0,showGridLines:w=!0,showGradient:S=!0,autoMinValue:k=!1,curveType:E="linear",minValue:C,maxValue:O,connectNulls:j=!1,allowDecimals:P=!0,noDataText:N,className:M,onValueChange:I,enableLegendSlider:R=!1,customTooltip:T,rotateLabelX:A,tickGap:_=5}=e,D=(0,r._T)(e,["data","categories","index","stack","colors","valueFormatter","startEndOnly","showXAxis","showYAxis","yAxisWidth","intervalType","showAnimation","animationDuration","showTooltip","showLegend","showGridLines","showGradient","autoMinValue","curveType","minValue","maxValue","connectNulls","allowDecimals","noDataText","className","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap"]),Z=(f||p)&&(!d||p)?20:0,[F,et]=(0,o.useState)(60),[en,er]=(0,o.useState)(void 0),[eo,ei]=(0,o.useState)(void 0),ea=(0,$.me)(a,s),el=(0,$.i4)(k,C,O),ec=!!I;function es(e){ec&&(e===eo&&!en||(0,$.FB)(n,e)&&en&&en.dataKey===e?(ei(void 0),null==I||I(null)):(ei(e),null==I||I({eventType:"category",categoryClicked:e})),er(void 0))}return o.createElement("div",Object.assign({ref:t,className:(0,J.q)("w-full h-80",M)},D),o.createElement(i.h,{className:"h-full w-full"},(null==n?void 0:n.length)?o.createElement(H,{data:n,onClick:ec&&(eo||en)?()=>{er(void 0),ei(void 0),null==I||I(null)}:void 0},w?o.createElement(q.q,{className:(0,J.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:!0,vertical:!1}):null,o.createElement(z.K,{padding:{left:Z,right:Z},hide:!f,dataKey:l,tick:{transform:"translate(0, 6)"},ticks:d?[n[0][l],n[n.length-1][l]]:void 0,fill:"",stroke:"",className:(0,J.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),interval:d?"preserveStartEnd":m,tickLine:!1,axisLine:!1,minTickGap:_,angle:null==A?void 0:A.angle,dy:null==A?void 0:A.verticalShift,height:null==A?void 0:A.xAxisHeight}),o.createElement(B.B,{width:h,hide:!p,axisLine:!1,tickLine:!1,type:"number",domain:el,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,J.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:u,allowDecimals:P}),o.createElement(W.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{stroke:"#d1d5db",strokeWidth:1},content:y?e=>{let{active:t,payload:n,label:r}=e;return T?o.createElement(T,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=ea.get(e.dataKey))&&void 0!==t?t:Y.fr.Gray})}),active:t,label:r}):o.createElement(G.ZP,{active:t,payload:n,label:r,valueFormatter:u,categoryColors:ea})}:o.createElement(o.Fragment,null),position:{y:0}}),b?o.createElement(K.D,{verticalAlign:"top",height:F,content:e=>{let{payload:t}=e;return(0,U.Z)({payload:t},ea,et,eo,ec?e=>es(e):void 0,R)}}):null,a.map(e=>{var t,n;return o.createElement("defs",{key:e},S?o.createElement("linearGradient",{className:(0,ee.bM)(null!==(t=ea.get(e))&&void 0!==t?t:Y.fr.Gray,Q.K.text).textColor,id:ea.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{offset:"5%",stopColor:"currentColor",stopOpacity:en||eo&&eo!==e?.15:.4}),o.createElement("stop",{offset:"95%",stopColor:"currentColor",stopOpacity:0})):o.createElement("linearGradient",{className:(0,ee.bM)(null!==(n=ea.get(e))&&void 0!==n?n:Y.fr.Gray,Q.K.text).textColor,id:ea.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{stopColor:"currentColor",stopOpacity:en||eo&&eo!==e?.1:.3})))}),a.map(e=>{var t;return o.createElement(L,{className:(0,ee.bM)(null!==(t=ea.get(e))&&void 0!==t?t:Y.fr.Gray,Q.K.text).strokeColor,strokeOpacity:en||eo&&eo!==e?.3:1,activeDot:e=>{var t;let{cx:r,cy:i,stroke:a,strokeLinecap:l,strokeLinejoin:c,strokeWidth:s,dataKey:u}=e;return o.createElement(x.o,{className:(0,J.q)("stroke-tremor-background dark:stroke-dark-tremor-background",I?"cursor-pointer":"",(0,ee.bM)(null!==(t=ea.get(u))&&void 0!==t?t:Y.fr.Gray,Q.K.text).fillColor),cx:r,cy:i,r:5,fill:"",stroke:a,strokeLinecap:l,strokeLinejoin:c,strokeWidth:s,onClick:(t,r)=>{r.stopPropagation(),ec&&(e.index===(null==en?void 0:en.index)&&e.dataKey===(null==en?void 0:en.dataKey)||(0,$.FB)(n,e.dataKey)&&eo&&eo===e.dataKey?(ei(void 0),er(void 0),null==I||I(null)):(ei(e.dataKey),er({index:e.index,dataKey:e.dataKey}),null==I||I(Object.assign({eventType:"dot",categoryClicked:e.dataKey},e.payload))))}})},dot:t=>{var r;let{stroke:i,strokeLinecap:a,strokeLinejoin:l,strokeWidth:c,cx:s,cy:u,dataKey:d,index:f}=t;return(0,$.FB)(n,e)&&!(en||eo&&eo!==e)||(null==en?void 0:en.index)===f&&(null==en?void 0:en.dataKey)===e?o.createElement(x.o,{key:f,cx:s,cy:u,r:5,stroke:i,fill:"",strokeLinecap:a,strokeLinejoin:l,strokeWidth:c,className:(0,J.q)("stroke-tremor-background dark:stroke-dark-tremor-background",I?"cursor-pointer":"",(0,ee.bM)(null!==(r=ea.get(d))&&void 0!==r?r:Y.fr.Gray,Q.K.text).fillColor)}):o.createElement(o.Fragment,{key:f})},key:e,name:e,type:E,dataKey:e,stroke:"",fill:"url(#".concat(ea.get(e),")"),strokeWidth:2,strokeLinejoin:"round",strokeLinecap:"round",isAnimationActive:g,animationDuration:v,stackId:c?"a":void 0,connectNulls:j})}),I?a.map(e=>o.createElement(V.x,{className:(0,J.q)("cursor-pointer"),strokeOpacity:0,key:e,name:e,type:E,dataKey:e,stroke:"transparent",fill:"transparent",legendType:"none",tooltipType:"none",strokeWidth:12,connectNulls:j,onClick:(e,t)=>{t.stopPropagation();let{name:n}=e;es(n)}})):null):o.createElement(X.Z,{noDataText:N})))});et.displayName="AreaChart"},40278:function(e,t,n){"use strict";n.d(t,{Z:function(){return k}});var r=n(5853),o=n(7084),i=n(26898),a=n(65954),l=n(1153),c=n(2265),s=n(47625),u=n(93765),d=n(31699),f=n(97059),p=n(62994),h=n(25311),m=(0,u.z)({chartName:"BarChart",GraphicalChild:d.$,defaultTooltipEventType:"axis",validateTooltipEventTypes:["axis","item"],axisComponents:[{axisType:"xAxis",AxisComp:f.K},{axisType:"yAxis",AxisComp:p.B}],formatAxisMap:h.t9}),g=n(56940),v=n(8147),y=n(22190),b=n(65278),x=n(98593),w=n(69448),S=n(32644);let k=c.forwardRef((e,t)=>{let{data:n=[],categories:u=[],index:h,colors:k=i.s,valueFormatter:E=l.Cj,layout:C="horizontal",stack:O=!1,relative:j=!1,startEndOnly:P=!1,animationDuration:N=900,showAnimation:M=!1,showXAxis:I=!0,showYAxis:R=!0,yAxisWidth:T=56,intervalType:A="equidistantPreserveStart",showTooltip:_=!0,showLegend:D=!0,showGridLines:Z=!0,autoMinValue:L=!1,minValue:z,maxValue:B,allowDecimals:F=!0,noDataText:H,onValueChange:q,enableLegendSlider:W=!1,customTooltip:K,rotateLabelX:V,tickGap:U=5,className:G}=e,X=(0,r._T)(e,["data","categories","index","colors","valueFormatter","layout","stack","relative","startEndOnly","animationDuration","showAnimation","showXAxis","showYAxis","yAxisWidth","intervalType","showTooltip","showLegend","showGridLines","autoMinValue","minValue","maxValue","allowDecimals","noDataText","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap","className"]),$=I||R?20:0,[Y,Q]=(0,c.useState)(60),J=(0,S.me)(u,k),[ee,et]=c.useState(void 0),[en,er]=(0,c.useState)(void 0),eo=!!q;function ei(e,t,n){var r,o,i,a;n.stopPropagation(),q&&((0,S.vZ)(ee,Object.assign(Object.assign({},e.payload),{value:e.value}))?(er(void 0),et(void 0),null==q||q(null)):(er(null===(o=null===(r=e.tooltipPayload)||void 0===r?void 0:r[0])||void 0===o?void 0:o.dataKey),et(Object.assign(Object.assign({},e.payload),{value:e.value})),null==q||q(Object.assign({eventType:"bar",categoryClicked:null===(a=null===(i=e.tooltipPayload)||void 0===i?void 0:i[0])||void 0===a?void 0:a.dataKey},e.payload))))}let ea=(0,S.i4)(L,z,B);return c.createElement("div",Object.assign({ref:t,className:(0,a.q)("w-full h-80",G)},X),c.createElement(s.h,{className:"h-full w-full"},(null==n?void 0:n.length)?c.createElement(m,{data:n,stackOffset:O?"sign":j?"expand":"none",layout:"vertical"===C?"vertical":"horizontal",onClick:eo&&(en||ee)?()=>{et(void 0),er(void 0),null==q||q(null)}:void 0},Z?c.createElement(g.q,{className:(0,a.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:"vertical"!==C,vertical:"vertical"===C}):null,"vertical"!==C?c.createElement(f.K,{padding:{left:$,right:$},hide:!I,dataKey:h,interval:P?"preserveStartEnd":A,tick:{transform:"translate(0, 6)"},ticks:P?[n[0][h],n[n.length-1][h]]:void 0,fill:"",stroke:"",className:(0,a.q)("mt-4 text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,angle:null==V?void 0:V.angle,dy:null==V?void 0:V.verticalShift,height:null==V?void 0:V.xAxisHeight,minTickGap:U}):c.createElement(f.K,{hide:!I,type:"number",tick:{transform:"translate(-3, 0)"},domain:ea,fill:"",stroke:"",className:(0,a.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,tickFormatter:E,minTickGap:U,allowDecimals:F,angle:null==V?void 0:V.angle,dy:null==V?void 0:V.verticalShift,height:null==V?void 0:V.xAxisHeight}),"vertical"!==C?c.createElement(p.B,{width:T,hide:!R,axisLine:!1,tickLine:!1,type:"number",domain:ea,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,a.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:j?e=>"".concat((100*e).toString()," %"):E,allowDecimals:F}):c.createElement(p.B,{width:T,hide:!R,dataKey:h,axisLine:!1,tickLine:!1,ticks:P?[n[0][h],n[n.length-1][h]]:void 0,type:"category",interval:"preserveStartEnd",tick:{transform:"translate(0, 6)"},fill:"",stroke:"",className:(0,a.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content")}),c.createElement(v.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{fill:"#d1d5db",opacity:"0.15"},content:_?e=>{let{active:t,payload:n,label:r}=e;return K?c.createElement(K,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=J.get(e.dataKey))&&void 0!==t?t:o.fr.Gray})}),active:t,label:r}):c.createElement(x.ZP,{active:t,payload:n,label:r,valueFormatter:E,categoryColors:J})}:c.createElement(c.Fragment,null),position:{y:0}}),D?c.createElement(y.D,{verticalAlign:"top",height:Y,content:e=>{let{payload:t}=e;return(0,b.Z)({payload:t},J,Q,en,eo?e=>{eo&&(e!==en||ee?(er(e),null==q||q({eventType:"category",categoryClicked:e})):(er(void 0),null==q||q(null)),et(void 0))}:void 0,W)}}):null,u.map(e=>{var t;return c.createElement(d.$,{className:(0,a.q)((0,l.bM)(null!==(t=J.get(e))&&void 0!==t?t:o.fr.Gray,i.K.background).fillColor,q?"cursor-pointer":""),key:e,name:e,type:"linear",stackId:O||j?"a":void 0,dataKey:e,fill:"",isAnimationActive:M,animationDuration:N,shape:e=>((e,t,n,r)=>{let{fillOpacity:o,name:i,payload:a,value:l}=e,{x:s,width:u,y:d,height:f}=e;return"horizontal"===r&&f<0?(d+=f,f=Math.abs(f)):"vertical"===r&&u<0&&(s+=u,u=Math.abs(u)),c.createElement("rect",{x:s,y:d,width:u,height:f,opacity:t||n&&n!==i?(0,S.vZ)(t,Object.assign(Object.assign({},a),{value:l}))?o:.3:o})})(e,ee,en,C),onClick:ei})})):c.createElement(w.Z,{noDataText:H})))});k.displayName="BarChart"},14042:function(e,t,n){"use strict";n.d(t,{Z:function(){return ez}});var r=n(5853),o=n(7084),i=n(26898),a=n(65954),l=n(1153),c=n(2265),s=n(60474),u=n(47625),d=n(93765),f=n(86757),p=n.n(f),h=n(9841),m=n(81889),g=n(61994),v=n(82944),y=["points","className","baseLinePoints","connectNulls"];function b(){return(b=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&void 0!==arguments[0]?arguments[0]:[],t=[[]];return e.forEach(function(e){S(e)?t[t.length-1].push(e):t[t.length-1].length>0&&t.push([])}),S(e[0])&&t[t.length-1].push(e[0]),t[t.length-1].length<=0&&(t=t.slice(0,-1)),t},E=function(e,t){var n=k(e);t&&(n=[n.reduce(function(e,t){return[].concat(x(e),x(t))},[])]);var r=n.map(function(e){return e.reduce(function(e,t,n){return"".concat(e).concat(0===n?"M":"L").concat(t.x,",").concat(t.y)},"")}).join("");return 1===n.length?"".concat(r,"Z"):r},C=function(e,t,n){var r=E(e,n);return"".concat("Z"===r.slice(-1)?r.slice(0,-1):r,"L").concat(E(t.reverse(),n).slice(1))},O=function(e){var t=e.points,n=e.className,r=e.baseLinePoints,o=e.connectNulls,i=function(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(e,y);if(!t||!t.length)return null;var a=(0,g.Z)("recharts-polygon",n);if(r&&r.length){var l=i.stroke&&"none"!==i.stroke,s=C(t,r,o);return c.createElement("g",{className:a},c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"Z"===s.slice(-1)?i.fill:"none",stroke:"none",d:s})),l?c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"none",d:E(t,o)})):null,l?c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"none",d:E(r,o)})):null)}var u=E(t,o);return c.createElement("path",b({},(0,v.L6)(i,!0),{fill:"Z"===u.slice(-1)?i.fill:"none",className:a,d:u}))},j=n(58811),P=n(41637),N=n(39206);function M(e){return(M="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function I(){return(I=Object.assign?Object.assign.bind():function(e){for(var t=1;t1e-5?"outer"===t?"start":"end":n<-.00001?"outer"===t?"end":"start":"middle"}},{key:"renderAxisLine",value:function(){var e=this.props,t=e.cx,n=e.cy,r=e.radius,o=e.axisLine,i=e.axisLineType,a=T(T({},(0,v.L6)(this.props,!1)),{},{fill:"none"},(0,v.L6)(o,!1));if("circle"===i)return c.createElement(m.o,I({className:"recharts-polar-angle-axis-line"},a,{cx:t,cy:n,r:r}));var l=this.props.ticks.map(function(e){return(0,N.op)(t,n,r,e.coordinate)});return c.createElement(O,I({className:"recharts-polar-angle-axis-line"},a,{points:l}))}},{key:"renderTicks",value:function(){var e=this,t=this.props,n=t.ticks,r=t.tick,o=t.tickLine,a=t.tickFormatter,l=t.stroke,s=(0,v.L6)(this.props,!1),u=(0,v.L6)(r,!1),d=T(T({},s),{},{fill:"none"},(0,v.L6)(o,!1)),f=n.map(function(t,n){var f=e.getTickLineCoord(t),p=T(T(T({textAnchor:e.getTickTextAnchor(t)},s),{},{stroke:"none",fill:l},u),{},{index:n,payload:t,x:f.x2,y:f.y2});return c.createElement(h.m,I({className:"recharts-polar-angle-axis-tick",key:"tick-".concat(t.coordinate)},(0,P.bw)(e.props,t,n)),o&&c.createElement("line",I({className:"recharts-polar-angle-axis-tick-line"},d,f)),r&&i.renderTickItem(r,p,a?a(t.value,n):t.value))});return c.createElement(h.m,{className:"recharts-polar-angle-axis-ticks"},f)}},{key:"render",value:function(){var e=this.props,t=e.ticks,n=e.radius,r=e.axisLine;return!(n<=0)&&t&&t.length?c.createElement(h.m,{className:"recharts-polar-angle-axis"},r&&this.renderAxisLine(),this.renderTicks()):null}}],r=[{key:"renderTickItem",value:function(e,t,n){return c.isValidElement(e)?c.cloneElement(e,t):p()(e)?e(t):c.createElement(j.x,I({},t,{className:"recharts-polar-angle-axis-tick-value"}),n)}}],n&&A(i.prototype,n),r&&A(i,r),Object.defineProperty(i,"prototype",{writable:!1}),i}(c.PureComponent);Z(B,"displayName","PolarAngleAxis"),Z(B,"axisType","angleAxis"),Z(B,"defaultProps",{type:"category",angleAxisId:0,scale:"auto",cx:0,cy:0,orientation:"outer",axisLine:!0,tickLine:!0,tickSize:8,tick:!0,hide:!1,allowDuplicatedCategory:!0});var F=n(35802),H=n.n(F),q=n(37891),W=n.n(q),K=n(26680),V=["cx","cy","angle","ticks","axisLine"],U=["ticks","tick","angle","tickFormatter","stroke"];function G(e){return(G="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function X(){return(X=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}function J(e,t){for(var n=0;n0?el()(e,"paddingAngle",0):0;if(n){var l=(0,eg.k4)(n.endAngle-n.startAngle,e.endAngle-e.startAngle),c=ek(ek({},e),{},{startAngle:i+a,endAngle:i+l(r)+a});o.push(c),i=c.endAngle}else{var s=e.endAngle,d=e.startAngle,f=(0,eg.k4)(0,s-d)(r),p=ek(ek({},e),{},{startAngle:i+a,endAngle:i+f+a});o.push(p),i=p.endAngle}}),c.createElement(h.m,null,e.renderSectorsStatically(o))})}},{key:"attachKeyboardHandlers",value:function(e){var t=this;e.onkeydown=function(e){if(!e.altKey)switch(e.key){case"ArrowLeft":var n=++t.state.sectorToFocus%t.sectorRefs.length;t.sectorRefs[n].focus(),t.setState({sectorToFocus:n});break;case"ArrowRight":var r=--t.state.sectorToFocus<0?t.sectorRefs.length-1:t.state.sectorToFocus%t.sectorRefs.length;t.sectorRefs[r].focus(),t.setState({sectorToFocus:r});break;case"Escape":t.sectorRefs[t.state.sectorToFocus].blur(),t.setState({sectorToFocus:0})}}}},{key:"renderSectors",value:function(){var e=this.props,t=e.sectors,n=e.isAnimationActive,r=this.state.prevSectors;return n&&t&&t.length&&(!r||!es()(r,t))?this.renderSectorsWithAnimation():this.renderSectorsStatically(t)}},{key:"componentDidMount",value:function(){this.pieRef&&this.attachKeyboardHandlers(this.pieRef)}},{key:"render",value:function(){var e=this,t=this.props,n=t.hide,r=t.sectors,o=t.className,i=t.label,a=t.cx,l=t.cy,s=t.innerRadius,u=t.outerRadius,d=t.isAnimationActive,f=this.state.isAnimationFinished;if(n||!r||!r.length||!(0,eg.hj)(a)||!(0,eg.hj)(l)||!(0,eg.hj)(s)||!(0,eg.hj)(u))return null;var p=(0,g.Z)("recharts-pie",o);return c.createElement(h.m,{tabIndex:this.props.rootTabIndex,className:p,ref:function(t){e.pieRef=t}},this.renderSectors(),i&&this.renderLabels(r),K._.renderCallByParent(this.props,null,!1),(!d||f)&&ep.e.renderCallByParent(this.props,r,!1))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return t.prevIsAnimationActive!==e.isAnimationActive?{prevIsAnimationActive:e.isAnimationActive,prevAnimationId:e.animationId,curSectors:e.sectors,prevSectors:[],isAnimationFinished:!0}:e.isAnimationActive&&e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curSectors:e.sectors,prevSectors:t.curSectors,isAnimationFinished:!0}:e.sectors!==t.curSectors?{curSectors:e.sectors,isAnimationFinished:!0}:null}},{key:"getTextAnchor",value:function(e,t){return e>t?"start":e=360?x:x-1)*u,S=i.reduce(function(e,t){var n=(0,ev.F$)(t,b,0);return e+((0,eg.hj)(n)?n:0)},0);return S>0&&(t=i.map(function(e,t){var r,o=(0,ev.F$)(e,b,0),i=(0,ev.F$)(e,f,t),a=((0,eg.hj)(o)?o:0)/S,s=(r=t?n.endAngle+(0,eg.uY)(v)*u*(0!==o?1:0):c)+(0,eg.uY)(v)*((0!==o?m:0)+a*w),d=(r+s)/2,p=(g.innerRadius+g.outerRadius)/2,y=[{name:i,value:o,payload:e,dataKey:b,type:h}],x=(0,N.op)(g.cx,g.cy,p,d);return n=ek(ek(ek({percent:a,cornerRadius:l,name:i,tooltipPayload:y,midAngle:d,middleRadius:p,tooltipPosition:x},e),g),{},{value:(0,ev.F$)(e,b),startAngle:r,endAngle:s,payload:e,paddingAngle:(0,eg.uY)(v)*u})})),ek(ek({},g),{},{sectors:t,data:i})});var eI=(0,d.z)({chartName:"PieChart",GraphicalChild:eM,validateTooltipEventTypes:["item"],defaultTooltipEventType:"item",legendContent:"children",axisComponents:[{axisType:"angleAxis",AxisComp:B},{axisType:"radiusAxis",AxisComp:eo}],formatAxisMap:N.t9,defaultProps:{layout:"centric",startAngle:0,endAngle:360,cx:"50%",cy:"50%",innerRadius:0,outerRadius:"80%"}}),eR=n(8147),eT=n(69448),eA=n(98593);let e_=e=>{let{active:t,payload:n,valueFormatter:r}=e;if(t&&(null==n?void 0:n[0])){let e=null==n?void 0:n[0];return c.createElement(eA.$B,null,c.createElement("div",{className:(0,a.q)("px-4 py-2")},c.createElement(eA.zX,{value:r(e.value),name:e.name,color:e.payload.color})))}return null},eD=(e,t)=>e.map((e,n)=>{let r=ne||t((0,l.vP)(n.map(e=>e[r]))),eL=e=>{let{cx:t,cy:n,innerRadius:r,outerRadius:o,startAngle:i,endAngle:a,className:l}=e;return c.createElement("g",null,c.createElement(s.L,{cx:t,cy:n,innerRadius:r,outerRadius:o,startAngle:i,endAngle:a,className:l,fill:"",opacity:.3,style:{outline:"none"}}))},ez=c.forwardRef((e,t)=>{let{data:n=[],category:s="value",index:d="name",colors:f=i.s,variant:p="donut",valueFormatter:h=l.Cj,label:m,showLabel:g=!0,animationDuration:v=900,showAnimation:y=!1,showTooltip:b=!0,noDataText:x,onValueChange:w,customTooltip:S,className:k}=e,E=(0,r._T)(e,["data","category","index","colors","variant","valueFormatter","label","showLabel","animationDuration","showAnimation","showTooltip","noDataText","onValueChange","customTooltip","className"]),C="donut"==p,O=eZ(m,h,n,s),[j,P]=c.useState(void 0),N=!!w;return(0,c.useEffect)(()=>{let e=document.querySelectorAll(".recharts-pie-sector");e&&e.forEach(e=>{e.setAttribute("style","outline: none")})},[j]),c.createElement("div",Object.assign({ref:t,className:(0,a.q)("w-full h-40",k)},E),c.createElement(u.h,{className:"h-full w-full"},(null==n?void 0:n.length)?c.createElement(eI,{onClick:N&&j?()=>{P(void 0),null==w||w(null)}:void 0,margin:{top:0,left:0,right:0,bottom:0}},g&&C?c.createElement("text",{className:(0,a.q)("fill-tremor-content-emphasis","dark:fill-dark-tremor-content-emphasis"),x:"50%",y:"50%",textAnchor:"middle",dominantBaseline:"middle"},O):null,c.createElement(eM,{className:(0,a.q)("stroke-tremor-background dark:stroke-dark-tremor-background",w?"cursor-pointer":"cursor-default"),data:eD(n,f),cx:"50%",cy:"50%",startAngle:90,endAngle:-270,innerRadius:C?"75%":"0%",outerRadius:"100%",stroke:"",strokeLinejoin:"round",dataKey:s,nameKey:d,isAnimationActive:y,animationDuration:v,onClick:function(e,t,n){n.stopPropagation(),N&&(j===t?(P(void 0),null==w||w(null)):(P(t),null==w||w(Object.assign({eventType:"slice"},e.payload.payload))))},activeIndex:j,inactiveShape:eL,style:{outline:"none"}}),c.createElement(eR.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,content:b?e=>{var t;let{active:n,payload:r}=e;return S?c.createElement(S,{payload:null==r?void 0:r.map(e=>{var t,n,i;return Object.assign(Object.assign({},e),{color:null!==(i=null===(n=null===(t=null==r?void 0:r[0])||void 0===t?void 0:t.payload)||void 0===n?void 0:n.color)&&void 0!==i?i:o.fr.Gray})}),active:n,label:null===(t=null==r?void 0:r[0])||void 0===t?void 0:t.name}):c.createElement(e_,{active:n,payload:r,valueFormatter:h})}:c.createElement(c.Fragment,null)})):c.createElement(eT.Z,{noDataText:x})))});ez.displayName="DonutChart"},59664:function(e,t,n){"use strict";n.d(t,{Z:function(){return E}});var r=n(5853),o=n(2265),i=n(47625),a=n(93765),l=n(54061),c=n(97059),s=n(62994),u=n(25311),d=(0,a.z)({chartName:"LineChart",GraphicalChild:l.x,axisComponents:[{axisType:"xAxis",AxisComp:c.K},{axisType:"yAxis",AxisComp:s.B}],formatAxisMap:u.t9}),f=n(56940),p=n(8147),h=n(22190),m=n(81889),g=n(65278),v=n(98593),y=n(69448),b=n(32644),x=n(7084),w=n(26898),S=n(65954),k=n(1153);let E=o.forwardRef((e,t)=>{let{data:n=[],categories:a=[],index:u,colors:E=w.s,valueFormatter:C=k.Cj,startEndOnly:O=!1,showXAxis:j=!0,showYAxis:P=!0,yAxisWidth:N=56,intervalType:M="equidistantPreserveStart",animationDuration:I=900,showAnimation:R=!1,showTooltip:T=!0,showLegend:A=!0,showGridLines:_=!0,autoMinValue:D=!1,curveType:Z="linear",minValue:L,maxValue:z,connectNulls:B=!1,allowDecimals:F=!0,noDataText:H,className:q,onValueChange:W,enableLegendSlider:K=!1,customTooltip:V,rotateLabelX:U,tickGap:G=5}=e,X=(0,r._T)(e,["data","categories","index","colors","valueFormatter","startEndOnly","showXAxis","showYAxis","yAxisWidth","intervalType","animationDuration","showAnimation","showTooltip","showLegend","showGridLines","autoMinValue","curveType","minValue","maxValue","connectNulls","allowDecimals","noDataText","className","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap"]),$=j||P?20:0,[Y,Q]=(0,o.useState)(60),[J,ee]=(0,o.useState)(void 0),[et,en]=(0,o.useState)(void 0),er=(0,b.me)(a,E),eo=(0,b.i4)(D,L,z),ei=!!W;function ea(e){ei&&(e===et&&!J||(0,b.FB)(n,e)&&J&&J.dataKey===e?(en(void 0),null==W||W(null)):(en(e),null==W||W({eventType:"category",categoryClicked:e})),ee(void 0))}return o.createElement("div",Object.assign({ref:t,className:(0,S.q)("w-full h-80",q)},X),o.createElement(i.h,{className:"h-full w-full"},(null==n?void 0:n.length)?o.createElement(d,{data:n,onClick:ei&&(et||J)?()=>{ee(void 0),en(void 0),null==W||W(null)}:void 0},_?o.createElement(f.q,{className:(0,S.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:!0,vertical:!1}):null,o.createElement(c.K,{padding:{left:$,right:$},hide:!j,dataKey:u,interval:O?"preserveStartEnd":M,tick:{transform:"translate(0, 6)"},ticks:O?[n[0][u],n[n.length-1][u]]:void 0,fill:"",stroke:"",className:(0,S.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,minTickGap:G,angle:null==U?void 0:U.angle,dy:null==U?void 0:U.verticalShift,height:null==U?void 0:U.xAxisHeight}),o.createElement(s.B,{width:N,hide:!P,axisLine:!1,tickLine:!1,type:"number",domain:eo,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,S.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:C,allowDecimals:F}),o.createElement(p.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{stroke:"#d1d5db",strokeWidth:1},content:T?e=>{let{active:t,payload:n,label:r}=e;return V?o.createElement(V,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=er.get(e.dataKey))&&void 0!==t?t:x.fr.Gray})}),active:t,label:r}):o.createElement(v.ZP,{active:t,payload:n,label:r,valueFormatter:C,categoryColors:er})}:o.createElement(o.Fragment,null),position:{y:0}}),A?o.createElement(h.D,{verticalAlign:"top",height:Y,content:e=>{let{payload:t}=e;return(0,g.Z)({payload:t},er,Q,et,ei?e=>ea(e):void 0,K)}}):null,a.map(e=>{var t;return o.createElement(l.x,{className:(0,S.q)((0,k.bM)(null!==(t=er.get(e))&&void 0!==t?t:x.fr.Gray,w.K.text).strokeColor),strokeOpacity:J||et&&et!==e?.3:1,activeDot:e=>{var t;let{cx:r,cy:i,stroke:a,strokeLinecap:l,strokeLinejoin:c,strokeWidth:s,dataKey:u}=e;return o.createElement(m.o,{className:(0,S.q)("stroke-tremor-background dark:stroke-dark-tremor-background",W?"cursor-pointer":"",(0,k.bM)(null!==(t=er.get(u))&&void 0!==t?t:x.fr.Gray,w.K.text).fillColor),cx:r,cy:i,r:5,fill:"",stroke:a,strokeLinecap:l,strokeLinejoin:c,strokeWidth:s,onClick:(t,r)=>{r.stopPropagation(),ei&&(e.index===(null==J?void 0:J.index)&&e.dataKey===(null==J?void 0:J.dataKey)||(0,b.FB)(n,e.dataKey)&&et&&et===e.dataKey?(en(void 0),ee(void 0),null==W||W(null)):(en(e.dataKey),ee({index:e.index,dataKey:e.dataKey}),null==W||W(Object.assign({eventType:"dot",categoryClicked:e.dataKey},e.payload))))}})},dot:t=>{var r;let{stroke:i,strokeLinecap:a,strokeLinejoin:l,strokeWidth:c,cx:s,cy:u,dataKey:d,index:f}=t;return(0,b.FB)(n,e)&&!(J||et&&et!==e)||(null==J?void 0:J.index)===f&&(null==J?void 0:J.dataKey)===e?o.createElement(m.o,{key:f,cx:s,cy:u,r:5,stroke:i,fill:"",strokeLinecap:a,strokeLinejoin:l,strokeWidth:c,className:(0,S.q)("stroke-tremor-background dark:stroke-dark-tremor-background",W?"cursor-pointer":"",(0,k.bM)(null!==(r=er.get(d))&&void 0!==r?r:x.fr.Gray,w.K.text).fillColor)}):o.createElement(o.Fragment,{key:f})},key:e,name:e,type:Z,dataKey:e,stroke:"",strokeWidth:2,strokeLinejoin:"round",strokeLinecap:"round",isAnimationActive:R,animationDuration:I,connectNulls:B})}),W?a.map(e=>o.createElement(l.x,{className:(0,S.q)("cursor-pointer"),strokeOpacity:0,key:e,name:e,type:Z,dataKey:e,stroke:"transparent",fill:"transparent",legendType:"none",tooltipType:"none",strokeWidth:12,connectNulls:B,onClick:(e,t)=>{t.stopPropagation();let{name:n}=e;ea(n)}})):null):o.createElement(y.Z,{noDataText:H})))});E.displayName="LineChart"},65278:function(e,t,n){"use strict";n.d(t,{Z:function(){return m}});var r=n(2265);let o=(e,t)=>{let[n,o]=(0,r.useState)(t);(0,r.useEffect)(()=>{let t=()=>{o(window.innerWidth),e()};return t(),window.addEventListener("resize",t),()=>window.removeEventListener("resize",t)},[e,n])};var i=n(5853),a=n(26898),l=n(65954),c=n(1153);let s=e=>{var t=(0,i._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M8 12L14 6V18L8 12Z"}))},u=e=>{var t=(0,i._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M16 12L10 18V6L16 12Z"}))},d=(0,c.fn)("Legend"),f=e=>{let{name:t,color:n,onClick:o,activeLegend:i}=e,s=!!o;return r.createElement("li",{className:(0,l.q)(d("legendItem"),"group inline-flex items-center px-2 py-0.5 rounded-tremor-small transition whitespace-nowrap",s?"cursor-pointer":"cursor-default","text-tremor-content",s?"hover:bg-tremor-background-subtle":"","dark:text-dark-tremor-content",s?"dark:hover:bg-dark-tremor-background-subtle":""),onClick:e=>{e.stopPropagation(),null==o||o(t,n)}},r.createElement("svg",{className:(0,l.q)("flex-none h-2 w-2 mr-1.5",(0,c.bM)(n,a.K.text).textColor,i&&i!==t?"opacity-40":"opacity-100"),fill:"currentColor",viewBox:"0 0 8 8"},r.createElement("circle",{cx:4,cy:4,r:4})),r.createElement("p",{className:(0,l.q)("whitespace-nowrap truncate text-tremor-default","text-tremor-content",s?"group-hover:text-tremor-content-emphasis":"","dark:text-dark-tremor-content",i&&i!==t?"opacity-40":"opacity-100",s?"dark:group-hover:text-dark-tremor-content-emphasis":"")},t))},p=e=>{let{icon:t,onClick:n,disabled:o}=e,[i,a]=r.useState(!1),c=r.useRef(null);return r.useEffect(()=>(i?c.current=setInterval(()=>{null==n||n()},300):clearInterval(c.current),()=>clearInterval(c.current)),[i,n]),(0,r.useEffect)(()=>{o&&(clearInterval(c.current),a(!1))},[o]),r.createElement("button",{type:"button",className:(0,l.q)(d("legendSliderButton"),"w-5 group inline-flex items-center truncate rounded-tremor-small transition",o?"cursor-not-allowed":"cursor-pointer",o?"text-tremor-content-subtle":"text-tremor-content hover:text-tremor-content-emphasis hover:bg-tremor-background-subtle",o?"dark:text-dark-tremor-subtle":"dark:text-dark-tremor dark:hover:text-tremor-content-emphasis dark:hover:bg-dark-tremor-background-subtle"),disabled:o,onClick:e=>{e.stopPropagation(),null==n||n()},onMouseDown:e=>{e.stopPropagation(),a(!0)},onMouseUp:e=>{e.stopPropagation(),a(!1)}},r.createElement(t,{className:"w-full"}))},h=r.forwardRef((e,t)=>{var n,o;let{categories:c,colors:h=a.s,className:m,onClickLegendItem:g,activeLegend:v,enableLegendSlider:y=!1}=e,b=(0,i._T)(e,["categories","colors","className","onClickLegendItem","activeLegend","enableLegendSlider"]),x=r.useRef(null),[w,S]=r.useState(null),[k,E]=r.useState(null),C=r.useRef(null),O=(0,r.useCallback)(()=>{let e=null==x?void 0:x.current;e&&S({left:e.scrollLeft>0,right:e.scrollWidth-e.clientWidth>e.scrollLeft})},[S]),j=(0,r.useCallback)(e=>{var t;let n=null==x?void 0:x.current,r=null!==(t=null==n?void 0:n.clientWidth)&&void 0!==t?t:0;n&&y&&(n.scrollTo({left:"left"===e?n.scrollLeft-r:n.scrollLeft+r,behavior:"smooth"}),setTimeout(()=>{O()},400))},[y,O]);r.useEffect(()=>{let e=e=>{"ArrowLeft"===e?j("left"):"ArrowRight"===e&&j("right")};return k?(e(k),C.current=setInterval(()=>{e(k)},300)):clearInterval(C.current),()=>clearInterval(C.current)},[k,j]);let P=e=>{e.stopPropagation(),"ArrowLeft"!==e.key&&"ArrowRight"!==e.key||(e.preventDefault(),E(e.key))},N=e=>{e.stopPropagation(),E(null)};return r.useEffect(()=>{let e=null==x?void 0:x.current;return y&&(O(),null==e||e.addEventListener("keydown",P),null==e||e.addEventListener("keyup",N)),()=>{null==e||e.removeEventListener("keydown",P),null==e||e.removeEventListener("keyup",N)}},[O,y]),r.createElement("ol",Object.assign({ref:t,className:(0,l.q)(d("root"),"relative overflow-hidden",m)},b),r.createElement("div",{ref:x,tabIndex:0,className:(0,l.q)("h-full flex",y?(null==w?void 0:w.right)||(null==w?void 0:w.left)?"pl-4 pr-12 items-center overflow-auto snap-mandatory [&::-webkit-scrollbar]:hidden [scrollbar-width:none]":"":"flex-wrap")},c.map((e,t)=>r.createElement(f,{key:"item-".concat(t),name:e,color:h[t],onClick:g,activeLegend:v}))),y&&((null==w?void 0:w.right)||(null==w?void 0:w.left))?r.createElement(r.Fragment,null,r.createElement("div",{className:(0,l.q)("from-tremor-background","dark:from-dark-tremor-background","absolute top-0 bottom-0 left-0 w-4 bg-gradient-to-r to-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("to-tremor-background","dark:to-dark-tremor-background","absolute top-0 bottom-0 right-10 w-4 bg-gradient-to-r from-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("bg-tremor-background","dark:bg-dark-tremor-background","absolute flex top-0 pr-1 bottom-0 right-0 items-center justify-center h-full")},r.createElement(p,{icon:s,onClick:()=>{E(null),j("left")},disabled:!(null==w?void 0:w.left)}),r.createElement(p,{icon:u,onClick:()=>{E(null),j("right")},disabled:!(null==w?void 0:w.right)}))):null)});h.displayName="Legend";let m=(e,t,n,i,a,l)=>{let{payload:c}=e,s=(0,r.useRef)(null);o(()=>{var e,t;n((t=null===(e=s.current)||void 0===e?void 0:e.clientHeight)?Number(t)+20:60)});let u=c.filter(e=>"none"!==e.type);return r.createElement("div",{ref:s,className:"flex items-center justify-end"},r.createElement(h,{categories:u.map(e=>e.value),colors:u.map(e=>t.get(e.value)),onClickLegendItem:a,activeLegend:i,enableLegendSlider:l}))}},98593:function(e,t,n){"use strict";n.d(t,{$B:function(){return c},ZP:function(){return u},zX:function(){return s}});var r=n(2265),o=n(7084),i=n(26898),a=n(65954),l=n(1153);let c=e=>{let{children:t}=e;return r.createElement("div",{className:(0,a.q)("rounded-tremor-default text-tremor-default border","bg-tremor-background shadow-tremor-dropdown border-tremor-border","dark:bg-dark-tremor-background dark:shadow-dark-tremor-dropdown dark:border-dark-tremor-border")},t)},s=e=>{let{value:t,name:n,color:o}=e;return r.createElement("div",{className:"flex items-center justify-between space-x-8"},r.createElement("div",{className:"flex items-center space-x-2"},r.createElement("span",{className:(0,a.q)("shrink-0 rounded-tremor-full border-2 h-3 w-3","border-tremor-background shadow-tremor-card","dark:border-dark-tremor-background dark:shadow-dark-tremor-card",(0,l.bM)(o,i.K.background).bgColor)}),r.createElement("p",{className:(0,a.q)("text-right whitespace-nowrap","text-tremor-content","dark:text-dark-tremor-content")},n)),r.createElement("p",{className:(0,a.q)("font-medium tabular-nums text-right whitespace-nowrap","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},t))},u=e=>{let{active:t,payload:n,label:i,categoryColors:l,valueFormatter:u}=e;if(t&&n){let e=n.filter(e=>"none"!==e.type);return r.createElement(c,null,r.createElement("div",{className:(0,a.q)("border-tremor-border border-b px-4 py-2","dark:border-dark-tremor-border")},r.createElement("p",{className:(0,a.q)("font-medium","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},i)),r.createElement("div",{className:(0,a.q)("px-4 py-2 space-y-1")},e.map((e,t)=>{var n;let{value:i,name:a}=e;return r.createElement(s,{key:"id-".concat(t),value:u(i),name:a,color:null!==(n=l.get(a))&&void 0!==n?n:o.fr.Blue})})))}return null}},69448:function(e,t,n){"use strict";n.d(t,{Z:function(){return f}});var r=n(65954),o=n(2265),i=n(5853);let a=(0,n(1153).fn)("Flex"),l={start:"justify-start",end:"justify-end",center:"justify-center",between:"justify-between",around:"justify-around",evenly:"justify-evenly"},c={start:"items-start",end:"items-end",center:"items-center",baseline:"items-baseline",stretch:"items-stretch"},s={row:"flex-row",col:"flex-col","row-reverse":"flex-row-reverse","col-reverse":"flex-col-reverse"},u=o.forwardRef((e,t)=>{let{flexDirection:n="row",justifyContent:u="between",alignItems:d="center",children:f,className:p}=e,h=(0,i._T)(e,["flexDirection","justifyContent","alignItems","children","className"]);return o.createElement("div",Object.assign({ref:t,className:(0,r.q)(a("root"),"flex w-full",s[n],l[u],c[d],p)},h),f)});u.displayName="Flex";var d=n(84264);let f=e=>{let{noDataText:t="No data"}=e;return o.createElement(u,{alignItems:"center",justifyContent:"center",className:(0,r.q)("w-full h-full border border-dashed rounded-tremor-default","border-tremor-border","dark:border-dark-tremor-border")},o.createElement(d.Z,{className:(0,r.q)("text-tremor-content","dark:text-dark-tremor-content")},t))}},32644:function(e,t,n){"use strict";n.d(t,{FB:function(){return i},i4:function(){return o},me:function(){return r},vZ:function(){return function e(t,n){if(t===n)return!0;if("object"!=typeof t||"object"!=typeof n||null===t||null===n)return!1;let r=Object.keys(t),o=Object.keys(n);if(r.length!==o.length)return!1;for(let i of r)if(!o.includes(i)||!e(t[i],n[i]))return!1;return!0}}});let r=(e,t)=>{let n=new Map;return e.forEach((e,r)=>{n.set(e,t[r])}),n},o=(e,t,n)=>[e?"auto":null!=t?t:0,null!=n?n:"auto"];function i(e,t){let n=[];for(let r of e)if(Object.prototype.hasOwnProperty.call(r,t)&&(n.push(r[t]),n.length>1))return!1;return!0}},41649:function(e,t,n){"use strict";n.d(t,{Z:function(){return p}});var r=n(5853),o=n(2265),i=n(1526),a=n(7084),l=n(26898),c=n(65954),s=n(1153);let u={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},d={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},f=(0,s.fn)("Badge"),p=o.forwardRef((e,t)=>{let{color:n,icon:p,size:h=a.u8.SM,tooltip:m,className:g,children:v}=e,y=(0,r._T)(e,["color","icon","size","tooltip","className","children"]),b=p||null,{tooltipProps:x,getReferenceProps:w}=(0,i.l)();return o.createElement("span",Object.assign({ref:(0,s.lq)([t,x.refs.setReference]),className:(0,c.q)(f("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full",n?(0,c.q)((0,s.bM)(n,l.K.background).bgColor,(0,s.bM)(n,l.K.text).textColor,"bg-opacity-20 dark:bg-opacity-25"):(0,c.q)("bg-tremor-brand-muted text-tremor-brand-emphasis","dark:bg-dark-tremor-brand-muted dark:text-dark-tremor-brand-emphasis"),u[h].paddingX,u[h].paddingY,u[h].fontSize,g)},w,y),o.createElement(i.Z,Object.assign({text:m},x)),b?o.createElement(b,{className:(0,c.q)(f("icon"),"shrink-0 -ml-1 mr-1.5",d[h].height,d[h].width)}):null,o.createElement("p",{className:(0,c.q)(f("text"),"text-sm whitespace-nowrap")},v))});p.displayName="Badge"},47323:function(e,t,n){"use strict";n.d(t,{Z:function(){return m}});var r=n(5853),o=n(2265),i=n(1526),a=n(7084),l=n(65954),c=n(1153),s=n(26898);let u={xs:{paddingX:"px-1.5",paddingY:"py-1.5"},sm:{paddingX:"px-1.5",paddingY:"py-1.5"},md:{paddingX:"px-2",paddingY:"py-2"},lg:{paddingX:"px-2",paddingY:"py-2"},xl:{paddingX:"px-2.5",paddingY:"py-2.5"}},d={xs:{height:"h-3",width:"w-3"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-7",width:"w-7"},xl:{height:"h-9",width:"w-9"}},f={simple:{rounded:"",border:"",ring:"",shadow:""},light:{rounded:"rounded-tremor-default",border:"",ring:"",shadow:""},shadow:{rounded:"rounded-tremor-default",border:"border",ring:"",shadow:"shadow-tremor-card dark:shadow-dark-tremor-card"},solid:{rounded:"rounded-tremor-default",border:"border-2",ring:"ring-1",shadow:""},outlined:{rounded:"rounded-tremor-default",border:"border",ring:"ring-2",shadow:""}},p=(e,t)=>{switch(e){case"simple":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:"",borderColor:"",ringColor:""};case"light":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand-muted dark:bg-dark-tremor-brand-muted",borderColor:"",ringColor:""};case"shadow":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:"border-tremor-border dark:border-dark-tremor-border",ringColor:""};case"solid":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand dark:bg-dark-tremor-brand",borderColor:"border-tremor-brand-inverted dark:border-dark-tremor-brand-inverted",ringColor:"ring-tremor-ring dark:ring-dark-tremor-ring"};case"outlined":return{textColor:t?(0,c.bM)(t,s.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,c.bM)(t,s.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:t?(0,c.bM)(t,s.K.ring).borderColor:"border-tremor-brand-subtle dark:border-dark-tremor-brand-subtle",ringColor:t?(0,l.q)((0,c.bM)(t,s.K.ring).ringColor,"ring-opacity-40"):"ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted"}}},h=(0,c.fn)("Icon"),m=o.forwardRef((e,t)=>{let{icon:n,variant:s="simple",tooltip:m,size:g=a.u8.SM,color:v,className:y}=e,b=(0,r._T)(e,["icon","variant","tooltip","size","color","className"]),x=p(s,v),{tooltipProps:w,getReferenceProps:S}=(0,i.l)();return o.createElement("span",Object.assign({ref:(0,c.lq)([t,w.refs.setReference]),className:(0,l.q)(h("root"),"inline-flex flex-shrink-0 items-center",x.bgColor,x.textColor,x.borderColor,x.ringColor,f[s].rounded,f[s].border,f[s].shadow,f[s].ring,u[g].paddingX,u[g].paddingY,y)},S,b),o.createElement(i.Z,Object.assign({text:m},w)),o.createElement(n,{className:(0,l.q)(h("icon"),"shrink-0",d[g].height,d[g].width)}))});m.displayName="Icon"},53003:function(e,t,n){"use strict";let r,o,i;n.d(t,{Z:function(){return nF}});var a,l,c,s,u=n(5853),d=n(2265),f=n(54887),p=n(13323),h=n(64518),m=n(96822),g=n(40293);function v(){for(var e=arguments.length,t=Array(e),n=0;n(0,g.r)(...t),[...t])}var y=n(72238),b=n(93689);let x=(0,d.createContext)(!1);var w=n(61424),S=n(27847);let k=d.Fragment,E=d.Fragment,C=(0,d.createContext)(null),O=(0,d.createContext)(null);Object.assign((0,S.yV)(function(e,t){var n;let r,o,i=(0,d.useRef)(null),a=(0,b.T)((0,b.h)(e=>{i.current=e}),t),l=v(i),c=function(e){let t=(0,d.useContext)(x),n=(0,d.useContext)(C),r=v(e),[o,i]=(0,d.useState)(()=>{if(!t&&null!==n||w.O.isServer)return null;let e=null==r?void 0:r.getElementById("headlessui-portal-root");if(e)return e;if(null===r)return null;let o=r.createElement("div");return o.setAttribute("id","headlessui-portal-root"),r.body.appendChild(o)});return(0,d.useEffect)(()=>{null!==o&&(null!=r&&r.body.contains(o)||null==r||r.body.appendChild(o))},[o,r]),(0,d.useEffect)(()=>{t||null!==n&&i(n.current)},[n,i,t]),o}(i),[s]=(0,d.useState)(()=>{var e;return w.O.isServer?null:null!=(e=null==l?void 0:l.createElement("div"))?e:null}),u=(0,d.useContext)(O),g=(0,y.H)();return(0,h.e)(()=>{!c||!s||c.contains(s)||(s.setAttribute("data-headlessui-portal",""),c.appendChild(s))},[c,s]),(0,h.e)(()=>{if(s&&u)return u.register(s)},[u,s]),n=()=>{var e;c&&s&&(s instanceof Node&&c.contains(s)&&c.removeChild(s),c.childNodes.length<=0&&(null==(e=c.parentElement)||e.removeChild(c)))},r=(0,p.z)(n),o=(0,d.useRef)(!1),(0,d.useEffect)(()=>(o.current=!1,()=>{o.current=!0,(0,m.Y)(()=>{o.current&&r()})}),[r]),g&&c&&s?(0,f.createPortal)((0,S.sY)({ourProps:{ref:a},theirProps:e,defaultTag:k,name:"Portal"}),s):null}),{Group:(0,S.yV)(function(e,t){let{target:n,...r}=e,o={ref:(0,b.T)(t)};return d.createElement(C.Provider,{value:n},(0,S.sY)({ourProps:o,theirProps:r,defaultTag:E,name:"Popover.Group"}))})});var j=n(31948),P=n(17684),N=n(98505),M=n(80004),I=n(38198),R=n(3141),T=((r=T||{})[r.Forwards=0]="Forwards",r[r.Backwards=1]="Backwards",r);function A(){let e=(0,d.useRef)(0);return(0,R.s)("keydown",t=>{"Tab"===t.key&&(e.current=t.shiftKey?1:0)},!0),e}var _=n(37863),D=n(47634),Z=n(37105),L=n(24536),z=n(37388),B=((o=B||{})[o.Open=0]="Open",o[o.Closed=1]="Closed",o),F=((i=F||{})[i.TogglePopover=0]="TogglePopover",i[i.ClosePopover=1]="ClosePopover",i[i.SetButton=2]="SetButton",i[i.SetButtonId=3]="SetButtonId",i[i.SetPanel=4]="SetPanel",i[i.SetPanelId=5]="SetPanelId",i);let H={0:e=>{let t={...e,popoverState:(0,L.E)(e.popoverState,{0:1,1:0})};return 0===t.popoverState&&(t.__demoMode=!1),t},1:e=>1===e.popoverState?e:{...e,popoverState:1},2:(e,t)=>e.button===t.button?e:{...e,button:t.button},3:(e,t)=>e.buttonId===t.buttonId?e:{...e,buttonId:t.buttonId},4:(e,t)=>e.panel===t.panel?e:{...e,panel:t.panel},5:(e,t)=>e.panelId===t.panelId?e:{...e,panelId:t.panelId}},q=(0,d.createContext)(null);function W(e){let t=(0,d.useContext)(q);if(null===t){let t=Error("<".concat(e," /> is missing a parent component."));throw Error.captureStackTrace&&Error.captureStackTrace(t,W),t}return t}q.displayName="PopoverContext";let K=(0,d.createContext)(null);function V(e){let t=(0,d.useContext)(K);if(null===t){let t=Error("<".concat(e," /> is missing a parent component."));throw Error.captureStackTrace&&Error.captureStackTrace(t,V),t}return t}K.displayName="PopoverAPIContext";let U=(0,d.createContext)(null);function G(){return(0,d.useContext)(U)}U.displayName="PopoverGroupContext";let X=(0,d.createContext)(null);function $(e,t){return(0,L.E)(t.type,H,e,t)}X.displayName="PopoverPanelContext";let Y=S.AN.RenderStrategy|S.AN.Static,Q=S.AN.RenderStrategy|S.AN.Static,J=Object.assign((0,S.yV)(function(e,t){var n,r,o,i;let a,l,c,s,u,f;let{__demoMode:h=!1,...m}=e,g=(0,d.useRef)(null),y=(0,b.T)(t,(0,b.h)(e=>{g.current=e})),x=(0,d.useRef)([]),w=(0,d.useReducer)($,{__demoMode:h,popoverState:h?0:1,buttons:x,button:null,buttonId:null,panel:null,panelId:null,beforePanelSentinel:(0,d.createRef)(),afterPanelSentinel:(0,d.createRef)()}),[{popoverState:k,button:E,buttonId:C,panel:P,panelId:M,beforePanelSentinel:R,afterPanelSentinel:T},A]=w,D=v(null!=(n=g.current)?n:E),z=(0,d.useMemo)(()=>{if(!E||!P)return!1;for(let e of document.querySelectorAll("body > *"))if(Number(null==e?void 0:e.contains(E))^Number(null==e?void 0:e.contains(P)))return!0;let e=(0,Z.GO)(),t=e.indexOf(E),n=(t+e.length-1)%e.length,r=(t+1)%e.length,o=e[n],i=e[r];return!P.contains(o)&&!P.contains(i)},[E,P]),B=(0,j.E)(C),F=(0,j.E)(M),H=(0,d.useMemo)(()=>({buttonId:B,panelId:F,close:()=>A({type:1})}),[B,F,A]),W=G(),V=null==W?void 0:W.registerPopover,U=(0,p.z)(()=>{var e;return null!=(e=null==W?void 0:W.isFocusWithinPopoverGroup())?e:(null==D?void 0:D.activeElement)&&((null==E?void 0:E.contains(D.activeElement))||(null==P?void 0:P.contains(D.activeElement)))});(0,d.useEffect)(()=>null==V?void 0:V(H),[V,H]);let[Y,Q]=(a=(0,d.useContext)(O),l=(0,d.useRef)([]),c=(0,p.z)(e=>(l.current.push(e),a&&a.register(e),()=>s(e))),s=(0,p.z)(e=>{let t=l.current.indexOf(e);-1!==t&&l.current.splice(t,1),a&&a.unregister(e)}),u=(0,d.useMemo)(()=>({register:c,unregister:s,portals:l}),[c,s,l]),[l,(0,d.useMemo)(()=>function(e){let{children:t}=e;return d.createElement(O.Provider,{value:u},t)},[u])]),J=function(){var e;let{defaultContainers:t=[],portals:n,mainTreeNodeRef:r}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},o=(0,d.useRef)(null!=(e=null==r?void 0:r.current)?e:null),i=v(o),a=(0,p.z)(()=>{var e,r,a;let l=[];for(let e of t)null!==e&&(e instanceof HTMLElement?l.push(e):"current"in e&&e.current instanceof HTMLElement&&l.push(e.current));if(null!=n&&n.current)for(let e of n.current)l.push(e);for(let t of null!=(e=null==i?void 0:i.querySelectorAll("html > *, body > *"))?e:[])t!==document.body&&t!==document.head&&t instanceof HTMLElement&&"headlessui-portal-root"!==t.id&&(t.contains(o.current)||t.contains(null==(a=null==(r=o.current)?void 0:r.getRootNode())?void 0:a.host)||l.some(e=>t.contains(e))||l.push(t));return l});return{resolveContainers:a,contains:(0,p.z)(e=>a().some(t=>t.contains(e))),mainTreeNodeRef:o,MainTreeNode:(0,d.useMemo)(()=>function(){return null!=r?null:d.createElement(I._,{features:I.A.Hidden,ref:o})},[o,r])}}({mainTreeNodeRef:null==W?void 0:W.mainTreeNodeRef,portals:Y,defaultContainers:[E,P]});r=null==D?void 0:D.defaultView,o="focus",i=e=>{var t,n,r,o;e.target!==window&&e.target instanceof HTMLElement&&0===k&&(U()||E&&P&&(J.contains(e.target)||null!=(n=null==(t=R.current)?void 0:t.contains)&&n.call(t,e.target)||null!=(o=null==(r=T.current)?void 0:r.contains)&&o.call(r,e.target)||A({type:1})))},f=(0,j.E)(i),(0,d.useEffect)(()=>{function e(e){f.current(e)}return(r=null!=r?r:window).addEventListener(o,e,!0),()=>r.removeEventListener(o,e,!0)},[r,o,!0]),(0,N.O)(J.resolveContainers,(e,t)=>{A({type:1}),(0,Z.sP)(t,Z.tJ.Loose)||(e.preventDefault(),null==E||E.focus())},0===k);let ee=(0,p.z)(e=>{A({type:1});let t=e?e instanceof HTMLElement?e:"current"in e&&e.current instanceof HTMLElement?e.current:E:E;null==t||t.focus()}),et=(0,d.useMemo)(()=>({close:ee,isPortalled:z}),[ee,z]),en=(0,d.useMemo)(()=>({open:0===k,close:ee}),[k,ee]);return d.createElement(X.Provider,{value:null},d.createElement(q.Provider,{value:w},d.createElement(K.Provider,{value:et},d.createElement(_.up,{value:(0,L.E)(k,{0:_.ZM.Open,1:_.ZM.Closed})},d.createElement(Q,null,(0,S.sY)({ourProps:{ref:y},theirProps:m,slot:en,defaultTag:"div",name:"Popover"}),d.createElement(J.MainTreeNode,null))))))}),{Button:(0,S.yV)(function(e,t){let n=(0,P.M)(),{id:r="headlessui-popover-button-".concat(n),...o}=e,[i,a]=W("Popover.Button"),{isPortalled:l}=V("Popover.Button"),c=(0,d.useRef)(null),s="headlessui-focus-sentinel-".concat((0,P.M)()),u=G(),f=null==u?void 0:u.closeOthers,h=null!==(0,d.useContext)(X);(0,d.useEffect)(()=>{if(!h)return a({type:3,buttonId:r}),()=>{a({type:3,buttonId:null})}},[h,r,a]);let[m]=(0,d.useState)(()=>Symbol()),g=(0,b.T)(c,t,h?null:e=>{if(e)i.buttons.current.push(m);else{let e=i.buttons.current.indexOf(m);-1!==e&&i.buttons.current.splice(e,1)}i.buttons.current.length>1&&console.warn("You are already using a but only 1 is supported."),e&&a({type:2,button:e})}),y=(0,b.T)(c,t),x=v(c),w=(0,p.z)(e=>{var t,n,r;if(h){if(1===i.popoverState)return;switch(e.key){case z.R.Space:case z.R.Enter:e.preventDefault(),null==(n=(t=e.target).click)||n.call(t),a({type:1}),null==(r=i.button)||r.focus()}}else switch(e.key){case z.R.Space:case z.R.Enter:e.preventDefault(),e.stopPropagation(),1===i.popoverState&&(null==f||f(i.buttonId)),a({type:0});break;case z.R.Escape:if(0!==i.popoverState)return null==f?void 0:f(i.buttonId);if(!c.current||null!=x&&x.activeElement&&!c.current.contains(x.activeElement))return;e.preventDefault(),e.stopPropagation(),a({type:1})}}),k=(0,p.z)(e=>{h||e.key===z.R.Space&&e.preventDefault()}),E=(0,p.z)(t=>{var n,r;(0,D.P)(t.currentTarget)||e.disabled||(h?(a({type:1}),null==(n=i.button)||n.focus()):(t.preventDefault(),t.stopPropagation(),1===i.popoverState&&(null==f||f(i.buttonId)),a({type:0}),null==(r=i.button)||r.focus()))}),C=(0,p.z)(e=>{e.preventDefault(),e.stopPropagation()}),O=0===i.popoverState,j=(0,d.useMemo)(()=>({open:O}),[O]),N=(0,M.f)(e,c),R=h?{ref:y,type:N,onKeyDown:w,onClick:E}:{ref:g,id:i.buttonId,type:N,"aria-expanded":0===i.popoverState,"aria-controls":i.panel?i.panelId:void 0,onKeyDown:w,onKeyUp:k,onClick:E,onMouseDown:C},_=A(),B=(0,p.z)(()=>{let e=i.panel;e&&(0,L.E)(_.current,{[T.Forwards]:()=>(0,Z.jA)(e,Z.TO.First),[T.Backwards]:()=>(0,Z.jA)(e,Z.TO.Last)})===Z.fE.Error&&(0,Z.jA)((0,Z.GO)().filter(e=>"true"!==e.dataset.headlessuiFocusGuard),(0,L.E)(_.current,{[T.Forwards]:Z.TO.Next,[T.Backwards]:Z.TO.Previous}),{relativeTo:i.button})});return d.createElement(d.Fragment,null,(0,S.sY)({ourProps:R,theirProps:o,slot:j,defaultTag:"button",name:"Popover.Button"}),O&&!h&&l&&d.createElement(I._,{id:s,features:I.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:B}))}),Overlay:(0,S.yV)(function(e,t){let n=(0,P.M)(),{id:r="headlessui-popover-overlay-".concat(n),...o}=e,[{popoverState:i},a]=W("Popover.Overlay"),l=(0,b.T)(t),c=(0,_.oJ)(),s=null!==c?(c&_.ZM.Open)===_.ZM.Open:0===i,u=(0,p.z)(e=>{if((0,D.P)(e.currentTarget))return e.preventDefault();a({type:1})}),f=(0,d.useMemo)(()=>({open:0===i}),[i]);return(0,S.sY)({ourProps:{ref:l,id:r,"aria-hidden":!0,onClick:u},theirProps:o,slot:f,defaultTag:"div",features:Y,visible:s,name:"Popover.Overlay"})}),Panel:(0,S.yV)(function(e,t){let n=(0,P.M)(),{id:r="headlessui-popover-panel-".concat(n),focus:o=!1,...i}=e,[a,l]=W("Popover.Panel"),{close:c,isPortalled:s}=V("Popover.Panel"),u="headlessui-focus-sentinel-before-".concat((0,P.M)()),f="headlessui-focus-sentinel-after-".concat((0,P.M)()),m=(0,d.useRef)(null),g=(0,b.T)(m,t,e=>{l({type:4,panel:e})}),y=v(m),x=(0,S.Y2)();(0,h.e)(()=>(l({type:5,panelId:r}),()=>{l({type:5,panelId:null})}),[r,l]);let w=(0,_.oJ)(),k=null!==w?(w&_.ZM.Open)===_.ZM.Open:0===a.popoverState,E=(0,p.z)(e=>{var t;if(e.key===z.R.Escape){if(0!==a.popoverState||!m.current||null!=y&&y.activeElement&&!m.current.contains(y.activeElement))return;e.preventDefault(),e.stopPropagation(),l({type:1}),null==(t=a.button)||t.focus()}});(0,d.useEffect)(()=>{var t;e.static||1===a.popoverState&&(null==(t=e.unmount)||t)&&l({type:4,panel:null})},[a.popoverState,e.unmount,e.static,l]),(0,d.useEffect)(()=>{if(a.__demoMode||!o||0!==a.popoverState||!m.current)return;let e=null==y?void 0:y.activeElement;m.current.contains(e)||(0,Z.jA)(m.current,Z.TO.First)},[a.__demoMode,o,m,a.popoverState]);let C=(0,d.useMemo)(()=>({open:0===a.popoverState,close:c}),[a,c]),O={ref:g,id:r,onKeyDown:E,onBlur:o&&0===a.popoverState?e=>{var t,n,r,o,i;let c=e.relatedTarget;c&&m.current&&(null!=(t=m.current)&&t.contains(c)||(l({type:1}),(null!=(r=null==(n=a.beforePanelSentinel.current)?void 0:n.contains)&&r.call(n,c)||null!=(i=null==(o=a.afterPanelSentinel.current)?void 0:o.contains)&&i.call(o,c))&&c.focus({preventScroll:!0})))}:void 0,tabIndex:-1},j=A(),N=(0,p.z)(()=>{let e=m.current;e&&(0,L.E)(j.current,{[T.Forwards]:()=>{var t;(0,Z.jA)(e,Z.TO.First)===Z.fE.Error&&(null==(t=a.afterPanelSentinel.current)||t.focus())},[T.Backwards]:()=>{var e;null==(e=a.button)||e.focus({preventScroll:!0})}})}),M=(0,p.z)(()=>{let e=m.current;e&&(0,L.E)(j.current,{[T.Forwards]:()=>{var e;if(!a.button)return;let t=(0,Z.GO)(),n=t.indexOf(a.button),r=t.slice(0,n+1),o=[...t.slice(n+1),...r];for(let t of o.slice())if("true"===t.dataset.headlessuiFocusGuard||null!=(e=a.panel)&&e.contains(t)){let e=o.indexOf(t);-1!==e&&o.splice(e,1)}(0,Z.jA)(o,Z.TO.First,{sorted:!1})},[T.Backwards]:()=>{var t;(0,Z.jA)(e,Z.TO.Previous)===Z.fE.Error&&(null==(t=a.button)||t.focus())}})});return d.createElement(X.Provider,{value:r},k&&s&&d.createElement(I._,{id:u,ref:a.beforePanelSentinel,features:I.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:N}),(0,S.sY)({mergeRefs:x,ourProps:O,theirProps:i,slot:C,defaultTag:"div",features:Q,visible:k,name:"Popover.Panel"}),k&&s&&d.createElement(I._,{id:f,ref:a.afterPanelSentinel,features:I.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:M}))}),Group:(0,S.yV)(function(e,t){let n;let r=(0,d.useRef)(null),o=(0,b.T)(r,t),[i,a]=(0,d.useState)([]),l={mainTreeNodeRef:n=(0,d.useRef)(null),MainTreeNode:(0,d.useMemo)(()=>function(){return d.createElement(I._,{features:I.A.Hidden,ref:n})},[n])},c=(0,p.z)(e=>{a(t=>{let n=t.indexOf(e);if(-1!==n){let e=t.slice();return e.splice(n,1),e}return t})}),s=(0,p.z)(e=>(a(t=>[...t,e]),()=>c(e))),u=(0,p.z)(()=>{var e;let t=(0,g.r)(r);if(!t)return!1;let n=t.activeElement;return!!(null!=(e=r.current)&&e.contains(n))||i.some(e=>{var r,o;return(null==(r=t.getElementById(e.buttonId.current))?void 0:r.contains(n))||(null==(o=t.getElementById(e.panelId.current))?void 0:o.contains(n))})}),f=(0,p.z)(e=>{for(let t of i)t.buttonId.current!==e&&t.close()}),h=(0,d.useMemo)(()=>({registerPopover:s,unregisterPopover:c,isFocusWithinPopoverGroup:u,closeOthers:f,mainTreeNodeRef:l.mainTreeNodeRef}),[s,c,u,f,l.mainTreeNodeRef]),m=(0,d.useMemo)(()=>({}),[]);return d.createElement(U.Provider,{value:h},(0,S.sY)({ourProps:{ref:o},theirProps:e,slot:m,defaultTag:"div",name:"Popover.Group"}),d.createElement(l.MainTreeNode,null))})});var ee=n(33044),et=n(28517);let en=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 20 20",fill:"currentColor"}),d.createElement("path",{fillRule:"evenodd",d:"M6 2a1 1 0 00-1 1v1H4a2 2 0 00-2 2v10a2 2 0 002 2h12a2 2 0 002-2V6a2 2 0 00-2-2h-1V3a1 1 0 10-2 0v1H7V3a1 1 0 00-1-1zm0 5a1 1 0 000 2h8a1 1 0 100-2H6z",clipRule:"evenodd"}))};var er=n(4537),eo=n(99735),ei=n(7656);function ea(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return t.setHours(0,0,0,0),t}function el(){return ea(Date.now())}function ec(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return t.setDate(1),t.setHours(0,0,0,0),t}var es=n(65954),eu=n(96398),ed=n(41154);function ef(e){var t,n;if((0,ei.Z)(1,arguments),e&&"function"==typeof e.forEach)t=e;else{if("object"!==(0,ed.Z)(e)||null===e)return new Date(NaN);t=Array.prototype.slice.call(e)}return t.forEach(function(e){var t=(0,eo.Z)(e);(void 0===n||nt||isNaN(t.getDate()))&&(n=t)}),n||new Date(NaN)}var eh=n(25721),em=n(47869);function eg(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,eh.Z)(e,-n)}var ev=n(55463);function ey(e,t){if((0,ei.Z)(2,arguments),!t||"object"!==(0,ed.Z)(t))return new Date(NaN);var n=t.years?(0,em.Z)(t.years):0,r=t.months?(0,em.Z)(t.months):0,o=t.weeks?(0,em.Z)(t.weeks):0,i=t.days?(0,em.Z)(t.days):0,a=t.hours?(0,em.Z)(t.hours):0,l=t.minutes?(0,em.Z)(t.minutes):0,c=t.seconds?(0,em.Z)(t.seconds):0;return new Date(eg(function(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,ev.Z)(e,-n)}(e,r+12*n),i+7*o).getTime()-1e3*(c+60*(l+60*a)))}function eb(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=new Date(0);return n.setFullYear(t.getFullYear(),0,1),n.setHours(0,0,0,0),n}function ex(e){return(0,ei.Z)(1,arguments),e instanceof Date||"object"===(0,ed.Z)(e)&&"[object Date]"===Object.prototype.toString.call(e)}function ew(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getUTCDay();return t.setUTCDate(t.getUTCDate()-((n<1?7:0)+n-1)),t.setUTCHours(0,0,0,0),t}function eS(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getUTCFullYear(),r=new Date(0);r.setUTCFullYear(n+1,0,4),r.setUTCHours(0,0,0,0);var o=ew(r),i=new Date(0);i.setUTCFullYear(n,0,4),i.setUTCHours(0,0,0,0);var a=ew(i);return t.getTime()>=o.getTime()?n+1:t.getTime()>=a.getTime()?n:n-1}var ek={};function eE(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.weekStartsOn)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.weekStartsOn)&&void 0!==o?o:ek.weekStartsOn)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.weekStartsOn)&&void 0!==n?n:0);if(!(u>=0&&u<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");var d=(0,eo.Z)(e),f=d.getUTCDay();return d.setUTCDate(d.getUTCDate()-((f=1&&f<=7))throw RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var p=new Date(0);p.setUTCFullYear(d+1,0,f),p.setUTCHours(0,0,0,0);var h=eE(p,t),m=new Date(0);m.setUTCFullYear(d,0,f),m.setUTCHours(0,0,0,0);var g=eE(m,t);return u.getTime()>=h.getTime()?d+1:u.getTime()>=g.getTime()?d:d-1}function eO(e,t){for(var n=Math.abs(e).toString();n.length0?n:1-n;return eO("yy"===t?r%100:r,t.length)},M:function(e,t){var n=e.getUTCMonth();return"M"===t?String(n+1):eO(n+1,2)},d:function(e,t){return eO(e.getUTCDate(),t.length)},h:function(e,t){return eO(e.getUTCHours()%12||12,t.length)},H:function(e,t){return eO(e.getUTCHours(),t.length)},m:function(e,t){return eO(e.getUTCMinutes(),t.length)},s:function(e,t){return eO(e.getUTCSeconds(),t.length)},S:function(e,t){var n=t.length;return eO(Math.floor(e.getUTCMilliseconds()*Math.pow(10,n-3)),t.length)}},eP={midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"};function eN(e,t){var n=e>0?"-":"+",r=Math.abs(e),o=Math.floor(r/60),i=r%60;return 0===i?n+String(o):n+String(o)+(t||"")+eO(i,2)}function eM(e,t){return e%60==0?(e>0?"-":"+")+eO(Math.abs(e)/60,2):eI(e,t)}function eI(e,t){var n=Math.abs(e);return(e>0?"-":"+")+eO(Math.floor(n/60),2)+(t||"")+eO(n%60,2)}var eR={G:function(e,t,n){var r=e.getUTCFullYear()>0?1:0;switch(t){case"G":case"GG":case"GGG":return n.era(r,{width:"abbreviated"});case"GGGGG":return n.era(r,{width:"narrow"});default:return n.era(r,{width:"wide"})}},y:function(e,t,n){if("yo"===t){var r=e.getUTCFullYear();return n.ordinalNumber(r>0?r:1-r,{unit:"year"})}return ej.y(e,t)},Y:function(e,t,n,r){var o=eC(e,r),i=o>0?o:1-o;return"YY"===t?eO(i%100,2):"Yo"===t?n.ordinalNumber(i,{unit:"year"}):eO(i,t.length)},R:function(e,t){return eO(eS(e),t.length)},u:function(e,t){return eO(e.getUTCFullYear(),t.length)},Q:function(e,t,n){var r=Math.ceil((e.getUTCMonth()+1)/3);switch(t){case"Q":return String(r);case"QQ":return eO(r,2);case"Qo":return n.ordinalNumber(r,{unit:"quarter"});case"QQQ":return n.quarter(r,{width:"abbreviated",context:"formatting"});case"QQQQQ":return n.quarter(r,{width:"narrow",context:"formatting"});default:return n.quarter(r,{width:"wide",context:"formatting"})}},q:function(e,t,n){var r=Math.ceil((e.getUTCMonth()+1)/3);switch(t){case"q":return String(r);case"qq":return eO(r,2);case"qo":return n.ordinalNumber(r,{unit:"quarter"});case"qqq":return n.quarter(r,{width:"abbreviated",context:"standalone"});case"qqqqq":return n.quarter(r,{width:"narrow",context:"standalone"});default:return n.quarter(r,{width:"wide",context:"standalone"})}},M:function(e,t,n){var r=e.getUTCMonth();switch(t){case"M":case"MM":return ej.M(e,t);case"Mo":return n.ordinalNumber(r+1,{unit:"month"});case"MMM":return n.month(r,{width:"abbreviated",context:"formatting"});case"MMMMM":return n.month(r,{width:"narrow",context:"formatting"});default:return n.month(r,{width:"wide",context:"formatting"})}},L:function(e,t,n){var r=e.getUTCMonth();switch(t){case"L":return String(r+1);case"LL":return eO(r+1,2);case"Lo":return n.ordinalNumber(r+1,{unit:"month"});case"LLL":return n.month(r,{width:"abbreviated",context:"standalone"});case"LLLLL":return n.month(r,{width:"narrow",context:"standalone"});default:return n.month(r,{width:"wide",context:"standalone"})}},w:function(e,t,n,r){var o=function(e,t){(0,ei.Z)(1,arguments);var n=(0,eo.Z)(e);return Math.round((eE(n,t).getTime()-(function(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.firstWeekContainsDate)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.firstWeekContainsDate)&&void 0!==o?o:ek.firstWeekContainsDate)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.firstWeekContainsDate)&&void 0!==n?n:1),d=eC(e,t),f=new Date(0);return f.setUTCFullYear(d,0,u),f.setUTCHours(0,0,0,0),eE(f,t)})(n,t).getTime())/6048e5)+1}(e,r);return"wo"===t?n.ordinalNumber(o,{unit:"week"}):eO(o,t.length)},I:function(e,t,n){var r=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return Math.round((ew(t).getTime()-(function(e){(0,ei.Z)(1,arguments);var t=eS(e),n=new Date(0);return n.setUTCFullYear(t,0,4),n.setUTCHours(0,0,0,0),ew(n)})(t).getTime())/6048e5)+1}(e);return"Io"===t?n.ordinalNumber(r,{unit:"week"}):eO(r,t.length)},d:function(e,t,n){return"do"===t?n.ordinalNumber(e.getUTCDate(),{unit:"date"}):ej.d(e,t)},D:function(e,t,n){var r=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getTime();return t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0),Math.floor((n-t.getTime())/864e5)+1}(e);return"Do"===t?n.ordinalNumber(r,{unit:"dayOfYear"}):eO(r,t.length)},E:function(e,t,n){var r=e.getUTCDay();switch(t){case"E":case"EE":case"EEE":return n.day(r,{width:"abbreviated",context:"formatting"});case"EEEEE":return n.day(r,{width:"narrow",context:"formatting"});case"EEEEEE":return n.day(r,{width:"short",context:"formatting"});default:return n.day(r,{width:"wide",context:"formatting"})}},e:function(e,t,n,r){var o=e.getUTCDay(),i=(o-r.weekStartsOn+8)%7||7;switch(t){case"e":return String(i);case"ee":return eO(i,2);case"eo":return n.ordinalNumber(i,{unit:"day"});case"eee":return n.day(o,{width:"abbreviated",context:"formatting"});case"eeeee":return n.day(o,{width:"narrow",context:"formatting"});case"eeeeee":return n.day(o,{width:"short",context:"formatting"});default:return n.day(o,{width:"wide",context:"formatting"})}},c:function(e,t,n,r){var o=e.getUTCDay(),i=(o-r.weekStartsOn+8)%7||7;switch(t){case"c":return String(i);case"cc":return eO(i,t.length);case"co":return n.ordinalNumber(i,{unit:"day"});case"ccc":return n.day(o,{width:"abbreviated",context:"standalone"});case"ccccc":return n.day(o,{width:"narrow",context:"standalone"});case"cccccc":return n.day(o,{width:"short",context:"standalone"});default:return n.day(o,{width:"wide",context:"standalone"})}},i:function(e,t,n){var r=e.getUTCDay(),o=0===r?7:r;switch(t){case"i":return String(o);case"ii":return eO(o,t.length);case"io":return n.ordinalNumber(o,{unit:"day"});case"iii":return n.day(r,{width:"abbreviated",context:"formatting"});case"iiiii":return n.day(r,{width:"narrow",context:"formatting"});case"iiiiii":return n.day(r,{width:"short",context:"formatting"});default:return n.day(r,{width:"wide",context:"formatting"})}},a:function(e,t,n){var r=e.getUTCHours()/12>=1?"pm":"am";switch(t){case"a":case"aa":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"});case"aaa":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"}).toLowerCase();case"aaaaa":return n.dayPeriod(r,{width:"narrow",context:"formatting"});default:return n.dayPeriod(r,{width:"wide",context:"formatting"})}},b:function(e,t,n){var r,o=e.getUTCHours();switch(r=12===o?eP.noon:0===o?eP.midnight:o/12>=1?"pm":"am",t){case"b":case"bb":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"});case"bbb":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"}).toLowerCase();case"bbbbb":return n.dayPeriod(r,{width:"narrow",context:"formatting"});default:return n.dayPeriod(r,{width:"wide",context:"formatting"})}},B:function(e,t,n){var r,o=e.getUTCHours();switch(r=o>=17?eP.evening:o>=12?eP.afternoon:o>=4?eP.morning:eP.night,t){case"B":case"BB":case"BBB":return n.dayPeriod(r,{width:"abbreviated",context:"formatting"});case"BBBBB":return n.dayPeriod(r,{width:"narrow",context:"formatting"});default:return n.dayPeriod(r,{width:"wide",context:"formatting"})}},h:function(e,t,n){if("ho"===t){var r=e.getUTCHours()%12;return 0===r&&(r=12),n.ordinalNumber(r,{unit:"hour"})}return ej.h(e,t)},H:function(e,t,n){return"Ho"===t?n.ordinalNumber(e.getUTCHours(),{unit:"hour"}):ej.H(e,t)},K:function(e,t,n){var r=e.getUTCHours()%12;return"Ko"===t?n.ordinalNumber(r,{unit:"hour"}):eO(r,t.length)},k:function(e,t,n){var r=e.getUTCHours();return(0===r&&(r=24),"ko"===t)?n.ordinalNumber(r,{unit:"hour"}):eO(r,t.length)},m:function(e,t,n){return"mo"===t?n.ordinalNumber(e.getUTCMinutes(),{unit:"minute"}):ej.m(e,t)},s:function(e,t,n){return"so"===t?n.ordinalNumber(e.getUTCSeconds(),{unit:"second"}):ej.s(e,t)},S:function(e,t){return ej.S(e,t)},X:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();if(0===o)return"Z";switch(t){case"X":return eM(o);case"XXXX":case"XX":return eI(o);default:return eI(o,":")}},x:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();switch(t){case"x":return eM(o);case"xxxx":case"xx":return eI(o);default:return eI(o,":")}},O:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();switch(t){case"O":case"OO":case"OOO":return"GMT"+eN(o,":");default:return"GMT"+eI(o,":")}},z:function(e,t,n,r){var o=(r._originalDate||e).getTimezoneOffset();switch(t){case"z":case"zz":case"zzz":return"GMT"+eN(o,":");default:return"GMT"+eI(o,":")}},t:function(e,t,n,r){return eO(Math.floor((r._originalDate||e).getTime()/1e3),t.length)},T:function(e,t,n,r){return eO((r._originalDate||e).getTime(),t.length)}},eT=function(e,t){switch(e){case"P":return t.date({width:"short"});case"PP":return t.date({width:"medium"});case"PPP":return t.date({width:"long"});default:return t.date({width:"full"})}},eA=function(e,t){switch(e){case"p":return t.time({width:"short"});case"pp":return t.time({width:"medium"});case"ppp":return t.time({width:"long"});default:return t.time({width:"full"})}},e_={p:eA,P:function(e,t){var n,r=e.match(/(P+)(p+)?/)||[],o=r[1],i=r[2];if(!i)return eT(e,t);switch(o){case"P":n=t.dateTime({width:"short"});break;case"PP":n=t.dateTime({width:"medium"});break;case"PPP":n=t.dateTime({width:"long"});break;default:n=t.dateTime({width:"full"})}return n.replace("{{date}}",eT(o,t)).replace("{{time}}",eA(i,t))}};function eD(e){var t=new Date(Date.UTC(e.getFullYear(),e.getMonth(),e.getDate(),e.getHours(),e.getMinutes(),e.getSeconds(),e.getMilliseconds()));return t.setUTCFullYear(e.getFullYear()),e.getTime()-t.getTime()}var eZ=["D","DD"],eL=["YY","YYYY"];function ez(e,t,n){if("YYYY"===e)throw RangeError("Use `yyyy` instead of `YYYY` (in `".concat(t,"`) for formatting years to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("YY"===e)throw RangeError("Use `yy` instead of `YY` (in `".concat(t,"`) for formatting years to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("D"===e)throw RangeError("Use `d` instead of `D` (in `".concat(t,"`) for formatting days of the month to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"));if("DD"===e)throw RangeError("Use `dd` instead of `DD` (in `".concat(t,"`) for formatting days of the month to the input `").concat(n,"`; see: https://github.com/date-fns/date-fns/blob/master/docs/unicodeTokens.md"))}var eB={lessThanXSeconds:{one:"less than a second",other:"less than {{count}} seconds"},xSeconds:{one:"1 second",other:"{{count}} seconds"},halfAMinute:"half a minute",lessThanXMinutes:{one:"less than a minute",other:"less than {{count}} minutes"},xMinutes:{one:"1 minute",other:"{{count}} minutes"},aboutXHours:{one:"about 1 hour",other:"about {{count}} hours"},xHours:{one:"1 hour",other:"{{count}} hours"},xDays:{one:"1 day",other:"{{count}} days"},aboutXWeeks:{one:"about 1 week",other:"about {{count}} weeks"},xWeeks:{one:"1 week",other:"{{count}} weeks"},aboutXMonths:{one:"about 1 month",other:"about {{count}} months"},xMonths:{one:"1 month",other:"{{count}} months"},aboutXYears:{one:"about 1 year",other:"about {{count}} years"},xYears:{one:"1 year",other:"{{count}} years"},overXYears:{one:"over 1 year",other:"over {{count}} years"},almostXYears:{one:"almost 1 year",other:"almost {{count}} years"}};function eF(e){return function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=t.width?String(t.width):e.defaultWidth;return e.formats[n]||e.formats[e.defaultWidth]}}var eH={date:eF({formats:{full:"EEEE, MMMM do, y",long:"MMMM do, y",medium:"MMM d, y",short:"MM/dd/yyyy"},defaultWidth:"full"}),time:eF({formats:{full:"h:mm:ss a zzzz",long:"h:mm:ss a z",medium:"h:mm:ss a",short:"h:mm a"},defaultWidth:"full"}),dateTime:eF({formats:{full:"{{date}} 'at' {{time}}",long:"{{date}} 'at' {{time}}",medium:"{{date}}, {{time}}",short:"{{date}}, {{time}}"},defaultWidth:"full"})},eq={lastWeek:"'last' eeee 'at' p",yesterday:"'yesterday at' p",today:"'today at' p",tomorrow:"'tomorrow at' p",nextWeek:"eeee 'at' p",other:"P"};function eW(e){return function(t,n){var r;if("formatting"===(null!=n&&n.context?String(n.context):"standalone")&&e.formattingValues){var o=e.defaultFormattingWidth||e.defaultWidth,i=null!=n&&n.width?String(n.width):o;r=e.formattingValues[i]||e.formattingValues[o]}else{var a=e.defaultWidth,l=null!=n&&n.width?String(n.width):e.defaultWidth;r=e.values[l]||e.values[a]}return r[e.argumentCallback?e.argumentCallback(t):t]}}function eK(e){return function(t){var n,r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},o=r.width,i=o&&e.matchPatterns[o]||e.matchPatterns[e.defaultMatchWidth],a=t.match(i);if(!a)return null;var l=a[0],c=o&&e.parsePatterns[o]||e.parsePatterns[e.defaultParseWidth],s=Array.isArray(c)?function(e,t){for(var n=0;n0?"in "+r:r+" ago":r},formatLong:eH,formatRelative:function(e,t,n,r){return eq[e]},localize:{ordinalNumber:function(e,t){var n=Number(e),r=n%100;if(r>20||r<10)switch(r%10){case 1:return n+"st";case 2:return n+"nd";case 3:return n+"rd"}return n+"th"},era:eW({values:{narrow:["B","A"],abbreviated:["BC","AD"],wide:["Before Christ","Anno Domini"]},defaultWidth:"wide"}),quarter:eW({values:{narrow:["1","2","3","4"],abbreviated:["Q1","Q2","Q3","Q4"],wide:["1st quarter","2nd quarter","3rd quarter","4th quarter"]},defaultWidth:"wide",argumentCallback:function(e){return e-1}}),month:eW({values:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],abbreviated:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],wide:["January","February","March","April","May","June","July","August","September","October","November","December"]},defaultWidth:"wide"}),day:eW({values:{narrow:["S","M","T","W","T","F","S"],short:["Su","Mo","Tu","We","Th","Fr","Sa"],abbreviated:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],wide:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},defaultWidth:"wide"}),dayPeriod:eW({values:{narrow:{am:"a",pm:"p",midnight:"mi",noon:"n",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"},abbreviated:{am:"AM",pm:"PM",midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"},wide:{am:"a.m.",pm:"p.m.",midnight:"midnight",noon:"noon",morning:"morning",afternoon:"afternoon",evening:"evening",night:"night"}},defaultWidth:"wide",formattingValues:{narrow:{am:"a",pm:"p",midnight:"mi",noon:"n",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"},abbreviated:{am:"AM",pm:"PM",midnight:"midnight",noon:"noon",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"},wide:{am:"a.m.",pm:"p.m.",midnight:"midnight",noon:"noon",morning:"in the morning",afternoon:"in the afternoon",evening:"in the evening",night:"at night"}},defaultFormattingWidth:"wide"})},match:{ordinalNumber:(a={matchPattern:/^(\d+)(th|st|nd|rd)?/i,parsePattern:/\d+/i,valueCallback:function(e){return parseInt(e,10)}},function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=e.match(a.matchPattern);if(!n)return null;var r=n[0],o=e.match(a.parsePattern);if(!o)return null;var i=a.valueCallback?a.valueCallback(o[0]):o[0];return{value:i=t.valueCallback?t.valueCallback(i):i,rest:e.slice(r.length)}}),era:eK({matchPatterns:{narrow:/^(b|a)/i,abbreviated:/^(b\.?\s?c\.?|b\.?\s?c\.?\s?e\.?|a\.?\s?d\.?|c\.?\s?e\.?)/i,wide:/^(before christ|before common era|anno domini|common era)/i},defaultMatchWidth:"wide",parsePatterns:{any:[/^b/i,/^(a|c)/i]},defaultParseWidth:"any"}),quarter:eK({matchPatterns:{narrow:/^[1234]/i,abbreviated:/^q[1234]/i,wide:/^[1234](th|st|nd|rd)? quarter/i},defaultMatchWidth:"wide",parsePatterns:{any:[/1/i,/2/i,/3/i,/4/i]},defaultParseWidth:"any",valueCallback:function(e){return e+1}}),month:eK({matchPatterns:{narrow:/^[jfmasond]/i,abbreviated:/^(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)/i,wide:/^(january|february|march|april|may|june|july|august|september|october|november|december)/i},defaultMatchWidth:"wide",parsePatterns:{narrow:[/^j/i,/^f/i,/^m/i,/^a/i,/^m/i,/^j/i,/^j/i,/^a/i,/^s/i,/^o/i,/^n/i,/^d/i],any:[/^ja/i,/^f/i,/^mar/i,/^ap/i,/^may/i,/^jun/i,/^jul/i,/^au/i,/^s/i,/^o/i,/^n/i,/^d/i]},defaultParseWidth:"any"}),day:eK({matchPatterns:{narrow:/^[smtwf]/i,short:/^(su|mo|tu|we|th|fr|sa)/i,abbreviated:/^(sun|mon|tue|wed|thu|fri|sat)/i,wide:/^(sunday|monday|tuesday|wednesday|thursday|friday|saturday)/i},defaultMatchWidth:"wide",parsePatterns:{narrow:[/^s/i,/^m/i,/^t/i,/^w/i,/^t/i,/^f/i,/^s/i],any:[/^su/i,/^m/i,/^tu/i,/^w/i,/^th/i,/^f/i,/^sa/i]},defaultParseWidth:"any"}),dayPeriod:eK({matchPatterns:{narrow:/^(a|p|mi|n|(in the|at) (morning|afternoon|evening|night))/i,any:/^([ap]\.?\s?m\.?|midnight|noon|(in the|at) (morning|afternoon|evening|night))/i},defaultMatchWidth:"any",parsePatterns:{any:{am:/^a/i,pm:/^p/i,midnight:/^mi/i,noon:/^no/i,morning:/morning/i,afternoon:/afternoon/i,evening:/evening/i,night:/night/i}},defaultParseWidth:"any"})},options:{weekStartsOn:0,firstWeekContainsDate:1}},eU=/[yYQqMLwIdDecihHKkms]o|(\w)\1*|''|'(''|[^'])+('|$)|./g,eG=/P+p+|P+|p+|''|'(''|[^'])+('|$)|./g,eX=/^'([^]*?)'?$/,e$=/''/g,eY=/[a-zA-Z]/;function eQ(e,t,n){(0,ei.Z)(2,arguments);var r,o,i,a,l,c,s,u,d,f,p,h,m,g,v,y,b,x,w=String(t),S=null!==(r=null!==(o=null==n?void 0:n.locale)&&void 0!==o?o:ek.locale)&&void 0!==r?r:eV,k=(0,em.Z)(null!==(i=null!==(a=null!==(l=null!==(c=null==n?void 0:n.firstWeekContainsDate)&&void 0!==c?c:null==n?void 0:null===(s=n.locale)||void 0===s?void 0:null===(u=s.options)||void 0===u?void 0:u.firstWeekContainsDate)&&void 0!==l?l:ek.firstWeekContainsDate)&&void 0!==a?a:null===(d=ek.locale)||void 0===d?void 0:null===(f=d.options)||void 0===f?void 0:f.firstWeekContainsDate)&&void 0!==i?i:1);if(!(k>=1&&k<=7))throw RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var E=(0,em.Z)(null!==(p=null!==(h=null!==(m=null!==(g=null==n?void 0:n.weekStartsOn)&&void 0!==g?g:null==n?void 0:null===(v=n.locale)||void 0===v?void 0:null===(y=v.options)||void 0===y?void 0:y.weekStartsOn)&&void 0!==m?m:ek.weekStartsOn)&&void 0!==h?h:null===(b=ek.locale)||void 0===b?void 0:null===(x=b.options)||void 0===x?void 0:x.weekStartsOn)&&void 0!==p?p:0);if(!(E>=0&&E<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");if(!S.localize)throw RangeError("locale must contain localize property");if(!S.formatLong)throw RangeError("locale must contain formatLong property");var C=(0,eo.Z)(e);if(!function(e){return(0,ei.Z)(1,arguments),(!!ex(e)||"number"==typeof e)&&!isNaN(Number((0,eo.Z)(e)))}(C))throw RangeError("Invalid time value");var O=eD(C),j=function(e,t){return(0,ei.Z)(2,arguments),function(e,t){return(0,ei.Z)(2,arguments),new Date((0,eo.Z)(e).getTime()+(0,em.Z)(t))}(e,-(0,em.Z)(t))}(C,O),P={firstWeekContainsDate:k,weekStartsOn:E,locale:S,_originalDate:C};return w.match(eG).map(function(e){var t=e[0];return"p"===t||"P"===t?(0,e_[t])(e,S.formatLong):e}).join("").match(eU).map(function(r){if("''"===r)return"'";var o,i=r[0];if("'"===i)return(o=r.match(eX))?o[1].replace(e$,"'"):r;var a=eR[i];if(a)return null!=n&&n.useAdditionalWeekYearTokens||-1===eL.indexOf(r)||ez(r,t,String(e)),null!=n&&n.useAdditionalDayOfYearTokens||-1===eZ.indexOf(r)||ez(r,t,String(e)),a(j,r,S.localize,P);if(i.match(eY))throw RangeError("Format string contains an unescaped latin alphabet character `"+i+"`");return r}).join("")}var eJ=n(1153);let e0=(0,eJ.fn)("DateRangePicker"),e1=(e,t,n,r)=>{var o;if(n&&(e=null===(o=r.get(n))||void 0===o?void 0:o.from),e)return ea(e&&!t?e:ef([e,t]))},e2=(e,t,n,r)=>{var o,i;if(n&&(e=ea(null!==(i=null===(o=r.get(n))||void 0===o?void 0:o.to)&&void 0!==i?i:el())),e)return ea(e&&!t?e:ep([e,t]))},e6=[{value:"tdy",text:"Today",from:el()},{value:"w",text:"Last 7 days",from:ey(el(),{days:7})},{value:"t",text:"Last 30 days",from:ey(el(),{days:30})},{value:"m",text:"Month to Date",from:ec(el())},{value:"y",text:"Year to Date",from:eb(el())}],e4=(e,t,n,r)=>{let o=(null==n?void 0:n.code)||"en-US";if(!e&&!t)return"";if(e&&!t)return r?eQ(e,r):e.toLocaleDateString(o,{year:"numeric",month:"short",day:"numeric"});if(e&&t){if(function(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return n.getTime()===r.getTime()}(e,t))return r?eQ(e,r):e.toLocaleDateString(o,{year:"numeric",month:"short",day:"numeric"});if(e.getMonth()===t.getMonth()&&e.getFullYear()===t.getFullYear())return r?"".concat(eQ(e,r)," - ").concat(eQ(t,r)):"".concat(e.toLocaleDateString(o,{month:"short",day:"numeric"})," - \n ").concat(t.getDate(),", ").concat(t.getFullYear());{if(r)return"".concat(eQ(e,r)," - ").concat(eQ(t,r));let n={year:"numeric",month:"short",day:"numeric"};return"".concat(e.toLocaleDateString(o,n)," - \n ").concat(t.toLocaleDateString(o,n))}}return""};function e3(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(23,59,59,999),t}function e5(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,em.Z)(t),o=n.getFullYear(),i=n.getDate(),a=new Date(0);a.setFullYear(o,r,15),a.setHours(0,0,0,0);var l=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getFullYear(),r=t.getMonth(),o=new Date(0);return o.setFullYear(n,r+1,0),o.setHours(0,0,0,0),o.getDate()}(a);return n.setMonth(r,Math.min(i,l)),n}function e8(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,em.Z)(t);return isNaN(n.getTime())?new Date(NaN):(n.setFullYear(r),n)}function e7(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return 12*(n.getFullYear()-r.getFullYear())+(n.getMonth()-r.getMonth())}function e9(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return n.getFullYear()===r.getFullYear()&&n.getMonth()===r.getMonth()}function te(e,t){(0,ei.Z)(2,arguments);var n=(0,eo.Z)(e),r=(0,eo.Z)(t);return n.getTime()=0&&u<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");var d=(0,eo.Z)(e),f=d.getDay();return d.setDate(d.getDate()-((fr.getTime()}function ti(e,t){(0,ei.Z)(2,arguments);var n=ea(e),r=ea(t);return Math.round((n.getTime()-eD(n)-(r.getTime()-eD(r)))/864e5)}function ta(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,eh.Z)(e,7*n)}function tl(e,t){(0,ei.Z)(2,arguments);var n=(0,em.Z)(t);return(0,ev.Z)(e,12*n)}function tc(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.weekStartsOn)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.weekStartsOn)&&void 0!==o?o:ek.weekStartsOn)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.weekStartsOn)&&void 0!==n?n:0);if(!(u>=0&&u<=6))throw RangeError("weekStartsOn must be between 0 and 6 inclusively");var d=(0,eo.Z)(e),f=d.getDay();return d.setDate(d.getDate()+((fe7(l,a)&&(a=(0,ev.Z)(l,-1*((void 0===s?1:s)-1))),c&&0>e7(a,c)&&(a=c),u=ec(a),f=t.month,h=(p=(0,d.useState)(u))[0],m=[void 0===f?h:f,p[1]])[0],v=m[1],[g,function(e){if(!t.disableNavigation){var n,r=ec(e);v(r),null===(n=t.onMonthChange)||void 0===n||n.call(t,r)}}]),x=b[0],w=b[1],S=function(e,t){for(var n=t.reverseMonths,r=t.numberOfMonths,o=ec(e),i=e7(ec((0,ev.Z)(o,r)),o),a=[],l=0;l=e7(i,n)))return(0,ev.Z)(i,-(r?void 0===o?1:o:1))}}(x,y),C=function(e){return S.some(function(t){return e9(e,t)})};return th.jsx(tN.Provider,{value:{currentMonth:x,displayMonths:S,goToMonth:w,goToDate:function(e,t){C(e)||(t&&te(e,t)?w((0,ev.Z)(e,1+-1*y.numberOfMonths)):w(e))},previousMonth:E,nextMonth:k,isDateDisplayed:C},children:e.children})}function tI(){var e=(0,d.useContext)(tN);if(!e)throw Error("useNavigation must be used within a NavigationProvider");return e}function tR(e){var t,n=tk(),r=n.classNames,o=n.styles,i=n.components,a=tI().goToMonth,l=function(t){a((0,ev.Z)(t,e.displayIndex?-e.displayIndex:0))},c=null!==(t=null==i?void 0:i.CaptionLabel)&&void 0!==t?t:tE,s=th.jsx(c,{id:e.id,displayMonth:e.displayMonth});return th.jsxs("div",{className:r.caption_dropdowns,style:o.caption_dropdowns,children:[th.jsx("div",{className:r.vhidden,children:s}),th.jsx(tj,{onChange:l,displayMonth:e.displayMonth}),th.jsx(tP,{onChange:l,displayMonth:e.displayMonth})]})}function tT(e){return th.jsx("svg",tu({width:"16px",height:"16px",viewBox:"0 0 120 120"},e,{children:th.jsx("path",{d:"M69.490332,3.34314575 C72.6145263,0.218951416 77.6798462,0.218951416 80.8040405,3.34314575 C83.8617626,6.40086786 83.9268205,11.3179931 80.9992143,14.4548388 L80.8040405,14.6568542 L35.461,60 L80.8040405,105.343146 C83.8617626,108.400868 83.9268205,113.317993 80.9992143,116.454839 L80.8040405,116.656854 C77.7463184,119.714576 72.8291931,119.779634 69.6923475,116.852028 L69.490332,116.656854 L18.490332,65.6568542 C15.4326099,62.5991321 15.367552,57.6820069 18.2951583,54.5451612 L18.490332,54.3431458 L69.490332,3.34314575 Z",fill:"currentColor",fillRule:"nonzero"})}))}function tA(e){return th.jsx("svg",tu({width:"16px",height:"16px",viewBox:"0 0 120 120"},e,{children:th.jsx("path",{d:"M49.8040405,3.34314575 C46.6798462,0.218951416 41.6145263,0.218951416 38.490332,3.34314575 C35.4326099,6.40086786 35.367552,11.3179931 38.2951583,14.4548388 L38.490332,14.6568542 L83.8333725,60 L38.490332,105.343146 C35.4326099,108.400868 35.367552,113.317993 38.2951583,116.454839 L38.490332,116.656854 C41.5480541,119.714576 46.4651794,119.779634 49.602025,116.852028 L49.8040405,116.656854 L100.804041,65.6568542 C103.861763,62.5991321 103.926821,57.6820069 100.999214,54.5451612 L100.804041,54.3431458 L49.8040405,3.34314575 Z",fill:"currentColor"})}))}var t_=(0,d.forwardRef)(function(e,t){var n=tk(),r=n.classNames,o=n.styles,i=[r.button_reset,r.button];e.className&&i.push(e.className);var a=i.join(" "),l=tu(tu({},o.button_reset),o.button);return e.style&&Object.assign(l,e.style),th.jsx("button",tu({},e,{ref:t,type:"button",className:a,style:l}))});function tD(e){var t,n,r=tk(),o=r.dir,i=r.locale,a=r.classNames,l=r.styles,c=r.labels,s=c.labelPrevious,u=c.labelNext,d=r.components;if(!e.nextMonth&&!e.previousMonth)return th.jsx(th.Fragment,{});var f=s(e.previousMonth,{locale:i}),p=[a.nav_button,a.nav_button_previous].join(" "),h=u(e.nextMonth,{locale:i}),m=[a.nav_button,a.nav_button_next].join(" "),g=null!==(t=null==d?void 0:d.IconRight)&&void 0!==t?t:tA,v=null!==(n=null==d?void 0:d.IconLeft)&&void 0!==n?n:tT;return th.jsxs("div",{className:a.nav,style:l.nav,children:[!e.hidePrevious&&th.jsx(t_,{name:"previous-month","aria-label":f,className:p,style:l.nav_button_previous,disabled:!e.previousMonth,onClick:e.onPreviousClick,children:"rtl"===o?th.jsx(g,{className:a.nav_icon,style:l.nav_icon}):th.jsx(v,{className:a.nav_icon,style:l.nav_icon})}),!e.hideNext&&th.jsx(t_,{name:"next-month","aria-label":h,className:m,style:l.nav_button_next,disabled:!e.nextMonth,onClick:e.onNextClick,children:"rtl"===o?th.jsx(v,{className:a.nav_icon,style:l.nav_icon}):th.jsx(g,{className:a.nav_icon,style:l.nav_icon})})]})}function tZ(e){var t=tk().numberOfMonths,n=tI(),r=n.previousMonth,o=n.nextMonth,i=n.goToMonth,a=n.displayMonths,l=a.findIndex(function(t){return e9(e.displayMonth,t)}),c=0===l,s=l===a.length-1;return th.jsx(tD,{displayMonth:e.displayMonth,hideNext:t>1&&(c||!s),hidePrevious:t>1&&(s||!c),nextMonth:o,previousMonth:r,onPreviousClick:function(){r&&i(r)},onNextClick:function(){o&&i(o)}})}function tL(e){var t,n,r=tk(),o=r.classNames,i=r.disableNavigation,a=r.styles,l=r.captionLayout,c=r.components,s=null!==(t=null==c?void 0:c.CaptionLabel)&&void 0!==t?t:tE;return n=i?th.jsx(s,{id:e.id,displayMonth:e.displayMonth}):"dropdown"===l?th.jsx(tR,{displayMonth:e.displayMonth,id:e.id}):"dropdown-buttons"===l?th.jsxs(th.Fragment,{children:[th.jsx(tR,{displayMonth:e.displayMonth,displayIndex:e.displayIndex,id:e.id}),th.jsx(tZ,{displayMonth:e.displayMonth,displayIndex:e.displayIndex,id:e.id})]}):th.jsxs(th.Fragment,{children:[th.jsx(s,{id:e.id,displayMonth:e.displayMonth,displayIndex:e.displayIndex}),th.jsx(tZ,{displayMonth:e.displayMonth,id:e.id})]}),th.jsx("div",{className:o.caption,style:a.caption,children:n})}function tz(e){var t=tk(),n=t.footer,r=t.styles,o=t.classNames.tfoot;return n?th.jsx("tfoot",{className:o,style:r.tfoot,children:th.jsx("tr",{children:th.jsx("td",{colSpan:8,children:n})})}):th.jsx(th.Fragment,{})}function tB(){var e=tk(),t=e.classNames,n=e.styles,r=e.showWeekNumber,o=e.locale,i=e.weekStartsOn,a=e.ISOWeek,l=e.formatters.formatWeekdayName,c=e.labels.labelWeekday,s=function(e,t,n){for(var r=n?tn(new Date):tt(new Date,{locale:e,weekStartsOn:t}),o=[],i=0;i<7;i++){var a=(0,eh.Z)(r,i);o.push(a)}return o}(o,i,a);return th.jsxs("tr",{style:n.head_row,className:t.head_row,children:[r&&th.jsx("td",{style:n.head_cell,className:t.head_cell}),s.map(function(e,r){return th.jsx("th",{scope:"col",className:t.head_cell,style:n.head_cell,"aria-label":c(e,{locale:o}),children:l(e,{locale:o})},r)})]})}function tF(){var e,t=tk(),n=t.classNames,r=t.styles,o=t.components,i=null!==(e=null==o?void 0:o.HeadRow)&&void 0!==e?e:tB;return th.jsx("thead",{style:r.head,className:n.head,children:th.jsx(i,{})})}function tH(e){var t=tk(),n=t.locale,r=t.formatters.formatDay;return th.jsx(th.Fragment,{children:r(e.date,{locale:n})})}var tq=(0,d.createContext)(void 0);function tW(e){return tm(e.initialProps)?th.jsx(tK,{initialProps:e.initialProps,children:e.children}):th.jsx(tq.Provider,{value:{selected:void 0,modifiers:{disabled:[]}},children:e.children})}function tK(e){var t=e.initialProps,n=e.children,r=t.selected,o=t.min,i=t.max,a={disabled:[]};return r&&a.disabled.push(function(e){var t=i&&r.length>i-1,n=r.some(function(t){return tr(t,e)});return!!(t&&!n)}),th.jsx(tq.Provider,{value:{selected:r,onDayClick:function(e,n,a){if(null===(l=t.onDayClick)||void 0===l||l.call(t,e,n,a),(!n.selected||!o||(null==r?void 0:r.length)!==o)&&(n.selected||!i||(null==r?void 0:r.length)!==i)){var l,c,s=r?td([],r,!0):[];if(n.selected){var u=s.findIndex(function(t){return tr(e,t)});s.splice(u,1)}else s.push(e);null===(c=t.onSelect)||void 0===c||c.call(t,s,e,n,a)}},modifiers:a},children:n})}function tV(){var e=(0,d.useContext)(tq);if(!e)throw Error("useSelectMultiple must be used within a SelectMultipleProvider");return e}var tU=(0,d.createContext)(void 0);function tG(e){return tg(e.initialProps)?th.jsx(tX,{initialProps:e.initialProps,children:e.children}):th.jsx(tU.Provider,{value:{selected:void 0,modifiers:{range_start:[],range_end:[],range_middle:[],disabled:[]}},children:e.children})}function tX(e){var t=e.initialProps,n=e.children,r=t.selected,o=r||{},i=o.from,a=o.to,l=t.min,c=t.max,s={range_start:[],range_end:[],range_middle:[],disabled:[]};if(i?(s.range_start=[i],a?(s.range_end=[a],tr(i,a)||(s.range_middle=[{after:i,before:a}])):s.range_end=[i]):a&&(s.range_start=[a],s.range_end=[a]),l&&(i&&!a&&s.disabled.push({after:eg(i,l-1),before:(0,eh.Z)(i,l-1)}),i&&a&&s.disabled.push({after:i,before:(0,eh.Z)(i,l-1)}),!i&&a&&s.disabled.push({after:eg(a,l-1),before:(0,eh.Z)(a,l-1)})),c){if(i&&!a&&(s.disabled.push({before:(0,eh.Z)(i,-c+1)}),s.disabled.push({after:(0,eh.Z)(i,c-1)})),i&&a){var u=c-(ti(a,i)+1);s.disabled.push({before:eg(i,u)}),s.disabled.push({after:(0,eh.Z)(a,u)})}!i&&a&&(s.disabled.push({before:(0,eh.Z)(a,-c+1)}),s.disabled.push({after:(0,eh.Z)(a,c-1)}))}return th.jsx(tU.Provider,{value:{selected:r,onDayClick:function(e,n,o){null===(c=t.onDayClick)||void 0===c||c.call(t,e,n,o);var i,a,l,c,s,u=(a=(i=r||{}).from,l=i.to,a&&l?tr(l,e)&&tr(a,e)?void 0:tr(l,e)?{from:l,to:void 0}:tr(a,e)?void 0:to(a,e)?{from:e,to:l}:{from:a,to:e}:l?to(e,l)?{from:l,to:e}:{from:e,to:l}:a?te(e,a)?{from:e,to:a}:{from:a,to:e}:{from:e,to:void 0});null===(s=t.onSelect)||void 0===s||s.call(t,u,e,n,o)},modifiers:s},children:n})}function t$(){var e=(0,d.useContext)(tU);if(!e)throw Error("useSelectRange must be used within a SelectRangeProvider");return e}function tY(e){return Array.isArray(e)?td([],e,!0):void 0!==e?[e]:[]}(l=s||(s={})).Outside="outside",l.Disabled="disabled",l.Selected="selected",l.Hidden="hidden",l.Today="today",l.RangeStart="range_start",l.RangeEnd="range_end",l.RangeMiddle="range_middle";var tQ=s.Selected,tJ=s.Disabled,t0=s.Hidden,t1=s.Today,t2=s.RangeEnd,t6=s.RangeMiddle,t4=s.RangeStart,t3=s.Outside,t5=(0,d.createContext)(void 0);function t8(e){var t,n,r,o=tk(),i=tV(),a=t$(),l=((t={})[tQ]=tY(o.selected),t[tJ]=tY(o.disabled),t[t0]=tY(o.hidden),t[t1]=[o.today],t[t2]=[],t[t6]=[],t[t4]=[],t[t3]=[],o.fromDate&&t[tJ].push({before:o.fromDate}),o.toDate&&t[tJ].push({after:o.toDate}),tm(o)?t[tJ]=t[tJ].concat(i.modifiers[tJ]):tg(o)&&(t[tJ]=t[tJ].concat(a.modifiers[tJ]),t[t4]=a.modifiers[t4],t[t6]=a.modifiers[t6],t[t2]=a.modifiers[t2]),t),c=(n=o.modifiers,r={},Object.entries(n).forEach(function(e){var t=e[0],n=e[1];r[t]=tY(n)}),r),s=tu(tu({},l),c);return th.jsx(t5.Provider,{value:s,children:e.children})}function t7(){var e=(0,d.useContext)(t5);if(!e)throw Error("useModifiers must be used within a ModifiersProvider");return e}function t9(e,t,n){var r=Object.keys(t).reduce(function(n,r){return t[r].some(function(t){if("boolean"==typeof t)return t;if(ex(t))return tr(e,t);if(Array.isArray(t)&&t.every(ex))return t.includes(e);if(t&&"object"==typeof t&&"from"in t)return r=t.from,o=t.to,r&&o?(0>ti(o,r)&&(r=(n=[o,r])[0],o=n[1]),ti(e,r)>=0&&ti(o,e)>=0):o?tr(o,e):!!r&&tr(r,e);if(t&&"object"==typeof t&&"dayOfWeek"in t)return t.dayOfWeek.includes(e.getDay());if(t&&"object"==typeof t&&"before"in t&&"after"in t){var n,r,o,i=ti(t.before,e),a=ti(t.after,e),l=i>0,c=a<0;return to(t.before,t.after)?c&&l:l||c}return t&&"object"==typeof t&&"after"in t?ti(e,t.after)>0:t&&"object"==typeof t&&"before"in t?ti(t.before,e)>0:"function"==typeof t&&t(e)})&&n.push(r),n},[]),o={};return r.forEach(function(e){return o[e]=!0}),n&&!e9(e,n)&&(o.outside=!0),o}var ne=(0,d.createContext)(void 0);function nt(e){var t=tI(),n=t7(),r=(0,d.useState)(),o=r[0],i=r[1],a=(0,d.useState)(),l=a[0],c=a[1],s=function(e,t){for(var n,r,o=ec(e[0]),i=e3(e[e.length-1]),a=o;a<=i;){var l=t9(a,t);if(!(!l.disabled&&!l.hidden)){a=(0,eh.Z)(a,1);continue}if(l.selected)return a;l.today&&!r&&(r=a),n||(n=a),a=(0,eh.Z)(a,1)}return r||n}(t.displayMonths,n),u=(null!=o?o:l&&t.isDateDisplayed(l))?l:s,f=function(e){i(e)},p=tk(),h=function(e,r){if(o){var i=function e(t,n){var r=n.moveBy,o=n.direction,i=n.context,a=n.modifiers,l=n.retry,c=void 0===l?{count:0,lastFocused:t}:l,s=i.weekStartsOn,u=i.fromDate,d=i.toDate,f=i.locale,p=({day:eh.Z,week:ta,month:ev.Z,year:tl,startOfWeek:function(e){return i.ISOWeek?tn(e):tt(e,{locale:f,weekStartsOn:s})},endOfWeek:function(e){return i.ISOWeek?ts(e):tc(e,{locale:f,weekStartsOn:s})}})[r](t,"after"===o?1:-1);"before"===o&&u?p=ef([u,p]):"after"===o&&d&&(p=ep([d,p]));var h=!0;if(a){var m=t9(p,a);h=!m.disabled&&!m.hidden}return h?p:c.count>365?c.lastFocused:e(p,{moveBy:r,direction:o,context:i,modifiers:a,retry:tu(tu({},c),{count:c.count+1})})}(o,{moveBy:e,direction:r,context:p,modifiers:n});tr(o,i)||(t.goToDate(i,o),f(i))}};return th.jsx(ne.Provider,{value:{focusedDay:o,focusTarget:u,blur:function(){c(o),i(void 0)},focus:f,focusDayAfter:function(){return h("day","after")},focusDayBefore:function(){return h("day","before")},focusWeekAfter:function(){return h("week","after")},focusWeekBefore:function(){return h("week","before")},focusMonthBefore:function(){return h("month","before")},focusMonthAfter:function(){return h("month","after")},focusYearBefore:function(){return h("year","before")},focusYearAfter:function(){return h("year","after")},focusStartOfWeek:function(){return h("startOfWeek","before")},focusEndOfWeek:function(){return h("endOfWeek","after")}},children:e.children})}function nn(){var e=(0,d.useContext)(ne);if(!e)throw Error("useFocusContext must be used within a FocusProvider");return e}var nr=(0,d.createContext)(void 0);function no(e){return tv(e.initialProps)?th.jsx(ni,{initialProps:e.initialProps,children:e.children}):th.jsx(nr.Provider,{value:{selected:void 0},children:e.children})}function ni(e){var t=e.initialProps,n=e.children,r={selected:t.selected,onDayClick:function(e,n,r){var o,i,a;if(null===(o=t.onDayClick)||void 0===o||o.call(t,e,n,r),n.selected&&!t.required){null===(i=t.onSelect)||void 0===i||i.call(t,void 0,e,n,r);return}null===(a=t.onSelect)||void 0===a||a.call(t,e,e,n,r)}};return th.jsx(nr.Provider,{value:r,children:n})}function na(){var e=(0,d.useContext)(nr);if(!e)throw Error("useSelectSingle must be used within a SelectSingleProvider");return e}function nl(e){var t,n,r,o,i,a,l,c,u,f,p,h,m,g,v,y,b,x,w,S,k,E,C,O,j,P,N,M,I,R,T,A,_,D,Z,L,z,B,F,H,q,W,K=(0,d.useRef)(null),V=(t=e.date,n=e.displayMonth,a=tk(),l=nn(),c=t9(t,t7(),n),u=tk(),f=na(),p=tV(),h=t$(),g=(m=nn()).focusDayAfter,v=m.focusDayBefore,y=m.focusWeekAfter,b=m.focusWeekBefore,x=m.blur,w=m.focus,S=m.focusMonthBefore,k=m.focusMonthAfter,E=m.focusYearBefore,C=m.focusYearAfter,O=m.focusStartOfWeek,j=m.focusEndOfWeek,P={onClick:function(e){var n,r,o,i;tv(u)?null===(n=f.onDayClick)||void 0===n||n.call(f,t,c,e):tm(u)?null===(r=p.onDayClick)||void 0===r||r.call(p,t,c,e):tg(u)?null===(o=h.onDayClick)||void 0===o||o.call(h,t,c,e):null===(i=u.onDayClick)||void 0===i||i.call(u,t,c,e)},onFocus:function(e){var n;w(t),null===(n=u.onDayFocus)||void 0===n||n.call(u,t,c,e)},onBlur:function(e){var n;x(),null===(n=u.onDayBlur)||void 0===n||n.call(u,t,c,e)},onKeyDown:function(e){var n;switch(e.key){case"ArrowLeft":e.preventDefault(),e.stopPropagation(),"rtl"===u.dir?g():v();break;case"ArrowRight":e.preventDefault(),e.stopPropagation(),"rtl"===u.dir?v():g();break;case"ArrowDown":e.preventDefault(),e.stopPropagation(),y();break;case"ArrowUp":e.preventDefault(),e.stopPropagation(),b();break;case"PageUp":e.preventDefault(),e.stopPropagation(),e.shiftKey?E():S();break;case"PageDown":e.preventDefault(),e.stopPropagation(),e.shiftKey?C():k();break;case"Home":e.preventDefault(),e.stopPropagation(),O();break;case"End":e.preventDefault(),e.stopPropagation(),j()}null===(n=u.onDayKeyDown)||void 0===n||n.call(u,t,c,e)},onKeyUp:function(e){var n;null===(n=u.onDayKeyUp)||void 0===n||n.call(u,t,c,e)},onMouseEnter:function(e){var n;null===(n=u.onDayMouseEnter)||void 0===n||n.call(u,t,c,e)},onMouseLeave:function(e){var n;null===(n=u.onDayMouseLeave)||void 0===n||n.call(u,t,c,e)},onPointerEnter:function(e){var n;null===(n=u.onDayPointerEnter)||void 0===n||n.call(u,t,c,e)},onPointerLeave:function(e){var n;null===(n=u.onDayPointerLeave)||void 0===n||n.call(u,t,c,e)},onTouchCancel:function(e){var n;null===(n=u.onDayTouchCancel)||void 0===n||n.call(u,t,c,e)},onTouchEnd:function(e){var n;null===(n=u.onDayTouchEnd)||void 0===n||n.call(u,t,c,e)},onTouchMove:function(e){var n;null===(n=u.onDayTouchMove)||void 0===n||n.call(u,t,c,e)},onTouchStart:function(e){var n;null===(n=u.onDayTouchStart)||void 0===n||n.call(u,t,c,e)}},N=tk(),M=na(),I=tV(),R=t$(),T=tv(N)?M.selected:tm(N)?I.selected:tg(N)?R.selected:void 0,A=!!(a.onDayClick||"default"!==a.mode),(0,d.useEffect)(function(){var e;!c.outside&&l.focusedDay&&A&&tr(l.focusedDay,t)&&(null===(e=K.current)||void 0===e||e.focus())},[l.focusedDay,t,K,A,c.outside]),D=(_=[a.classNames.day],Object.keys(c).forEach(function(e){var t=a.modifiersClassNames[e];if(t)_.push(t);else if(Object.values(s).includes(e)){var n=a.classNames["day_".concat(e)];n&&_.push(n)}}),_).join(" "),Z=tu({},a.styles.day),Object.keys(c).forEach(function(e){var t;Z=tu(tu({},Z),null===(t=a.modifiersStyles)||void 0===t?void 0:t[e])}),L=Z,z=!!(c.outside&&!a.showOutsideDays||c.hidden),B=null!==(i=null===(o=a.components)||void 0===o?void 0:o.DayContent)&&void 0!==i?i:tH,F={style:L,className:D,children:th.jsx(B,{date:t,displayMonth:n,activeModifiers:c}),role:"gridcell"},H=l.focusTarget&&tr(l.focusTarget,t)&&!c.outside,q=l.focusedDay&&tr(l.focusedDay,t),W=tu(tu(tu({},F),((r={disabled:c.disabled,role:"gridcell"})["aria-selected"]=c.selected,r.tabIndex=q||H?0:-1,r)),P),{isButton:A,isHidden:z,activeModifiers:c,selectedDays:T,buttonProps:W,divProps:F});return V.isHidden?th.jsx("div",{role:"gridcell"}):V.isButton?th.jsx(t_,tu({name:"day",ref:K},V.buttonProps)):th.jsx("div",tu({},V.divProps))}function nc(e){var t=e.number,n=e.dates,r=tk(),o=r.onWeekNumberClick,i=r.styles,a=r.classNames,l=r.locale,c=r.labels.labelWeekNumber,s=(0,r.formatters.formatWeekNumber)(Number(t),{locale:l});if(!o)return th.jsx("span",{className:a.weeknumber,style:i.weeknumber,children:s});var u=c(Number(t),{locale:l});return th.jsx(t_,{name:"week-number","aria-label":u,className:a.weeknumber,style:i.weeknumber,onClick:function(e){o(t,n,e)},children:s})}function ns(e){var t,n,r,o=tk(),i=o.styles,a=o.classNames,l=o.showWeekNumber,c=o.components,s=null!==(t=null==c?void 0:c.Day)&&void 0!==t?t:nl,u=null!==(n=null==c?void 0:c.WeekNumber)&&void 0!==n?n:nc;return l&&(r=th.jsx("td",{className:a.cell,style:i.cell,children:th.jsx(u,{number:e.weekNumber,dates:e.dates})})),th.jsxs("tr",{className:a.row,style:i.row,children:[r,e.dates.map(function(t){return th.jsx("td",{className:a.cell,style:i.cell,role:"presentation",children:th.jsx(s,{displayMonth:e.displayMonth,date:t})},function(e){return(0,ei.Z)(1,arguments),Math.floor(function(e){return(0,ei.Z)(1,arguments),(0,eo.Z)(e).getTime()}(e)/1e3)}(t))})]})}function nu(e,t,n){for(var r=(null==n?void 0:n.ISOWeek)?ts(t):tc(t,n),o=(null==n?void 0:n.ISOWeek)?tn(e):tt(e,n),i=ti(r,o),a=[],l=0;l<=i;l++)a.push((0,eh.Z)(o,l));return a.reduce(function(e,t){var r=(null==n?void 0:n.ISOWeek)?function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e);return Math.round((tn(t).getTime()-(function(e){(0,ei.Z)(1,arguments);var t=function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getFullYear(),r=new Date(0);r.setFullYear(n+1,0,4),r.setHours(0,0,0,0);var o=tn(r),i=new Date(0);i.setFullYear(n,0,4),i.setHours(0,0,0,0);var a=tn(i);return t.getTime()>=o.getTime()?n+1:t.getTime()>=a.getTime()?n:n-1}(e),n=new Date(0);return n.setFullYear(t,0,4),n.setHours(0,0,0,0),tn(n)})(t).getTime())/6048e5)+1}(t):function(e,t){(0,ei.Z)(1,arguments);var n=(0,eo.Z)(e);return Math.round((tt(n,t).getTime()-(function(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.firstWeekContainsDate)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.firstWeekContainsDate)&&void 0!==o?o:ek.firstWeekContainsDate)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.firstWeekContainsDate)&&void 0!==n?n:1),d=function(e,t){(0,ei.Z)(1,arguments);var n,r,o,i,a,l,c,s,u=(0,eo.Z)(e),d=u.getFullYear(),f=(0,em.Z)(null!==(n=null!==(r=null!==(o=null!==(i=null==t?void 0:t.firstWeekContainsDate)&&void 0!==i?i:null==t?void 0:null===(a=t.locale)||void 0===a?void 0:null===(l=a.options)||void 0===l?void 0:l.firstWeekContainsDate)&&void 0!==o?o:ek.firstWeekContainsDate)&&void 0!==r?r:null===(c=ek.locale)||void 0===c?void 0:null===(s=c.options)||void 0===s?void 0:s.firstWeekContainsDate)&&void 0!==n?n:1);if(!(f>=1&&f<=7))throw RangeError("firstWeekContainsDate must be between 1 and 7 inclusively");var p=new Date(0);p.setFullYear(d+1,0,f),p.setHours(0,0,0,0);var h=tt(p,t),m=new Date(0);m.setFullYear(d,0,f),m.setHours(0,0,0,0);var g=tt(m,t);return u.getTime()>=h.getTime()?d+1:u.getTime()>=g.getTime()?d:d-1}(e,t),f=new Date(0);return f.setFullYear(d,0,u),f.setHours(0,0,0,0),tt(f,t)})(n,t).getTime())/6048e5)+1}(t,n),o=e.find(function(e){return e.weekNumber===r});return o?o.dates.push(t):e.push({weekNumber:r,dates:[t]}),e},[])}function nd(e){var t,n,r,o=tk(),i=o.locale,a=o.classNames,l=o.styles,c=o.hideHead,s=o.fixedWeeks,u=o.components,d=o.weekStartsOn,f=o.firstWeekContainsDate,p=o.ISOWeek,h=function(e,t){var n=nu(ec(e),e3(e),t);if(null==t?void 0:t.useFixedWeeks){var r=function(e,t){return(0,ei.Z)(1,arguments),function(e,t,n){(0,ei.Z)(2,arguments);var r=tt(e,n),o=tt(t,n);return Math.round((r.getTime()-eD(r)-(o.getTime()-eD(o)))/6048e5)}(function(e){(0,ei.Z)(1,arguments);var t=(0,eo.Z)(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(0,0,0,0),t}(e),ec(e),t)+1}(e,t);if(r<6){var o=n[n.length-1],i=o.dates[o.dates.length-1],a=ta(i,6-r),l=nu(ta(i,1),a,t);n.push.apply(n,l)}}return n}(e.displayMonth,{useFixedWeeks:!!s,ISOWeek:p,locale:i,weekStartsOn:d,firstWeekContainsDate:f}),m=null!==(t=null==u?void 0:u.Head)&&void 0!==t?t:tF,g=null!==(n=null==u?void 0:u.Row)&&void 0!==n?n:ns,v=null!==(r=null==u?void 0:u.Footer)&&void 0!==r?r:tz;return th.jsxs("table",{id:e.id,className:a.table,style:l.table,role:"grid","aria-labelledby":e["aria-labelledby"],children:[!c&&th.jsx(m,{}),th.jsx("tbody",{className:a.tbody,style:l.tbody,children:h.map(function(t){return th.jsx(g,{displayMonth:e.displayMonth,dates:t.dates,weekNumber:t.weekNumber},t.weekNumber)})}),th.jsx(v,{displayMonth:e.displayMonth})]})}var nf="undefined"!=typeof window&&window.document&&window.document.createElement?d.useLayoutEffect:d.useEffect,np=!1,nh=0;function nm(){return"react-day-picker-".concat(++nh)}function ng(e){var t,n,r,o,i,a,l,c,s=tk(),u=s.dir,f=s.classNames,p=s.styles,h=s.components,m=tI().displayMonths,g=(r=null!=(t=s.id?"".concat(s.id,"-").concat(e.displayIndex):void 0)?t:np?nm():null,i=(o=(0,d.useState)(r))[0],a=o[1],nf(function(){null===i&&a(nm())},[]),(0,d.useEffect)(function(){!1===np&&(np=!0)},[]),null!==(n=null!=t?t:i)&&void 0!==n?n:void 0),v=s.id?"".concat(s.id,"-grid-").concat(e.displayIndex):void 0,y=[f.month],b=p.month,x=0===e.displayIndex,w=e.displayIndex===m.length-1,S=!x&&!w;"rtl"===u&&(w=(l=[x,w])[0],x=l[1]),x&&(y.push(f.caption_start),b=tu(tu({},b),p.caption_start)),w&&(y.push(f.caption_end),b=tu(tu({},b),p.caption_end)),S&&(y.push(f.caption_between),b=tu(tu({},b),p.caption_between));var k=null!==(c=null==h?void 0:h.Caption)&&void 0!==c?c:tL;return th.jsxs("div",{className:y.join(" "),style:b,children:[th.jsx(k,{id:g,displayMonth:e.displayMonth,displayIndex:e.displayIndex}),th.jsx(nd,{id:v,"aria-labelledby":g,displayMonth:e.displayMonth})]},e.displayIndex)}function nv(e){var t=tk(),n=t.classNames,r=t.styles;return th.jsx("div",{className:n.months,style:r.months,children:e.children})}function ny(e){var t,n,r=e.initialProps,o=tk(),i=nn(),a=tI(),l=(0,d.useState)(!1),c=l[0],s=l[1];(0,d.useEffect)(function(){o.initialFocus&&i.focusTarget&&(c||(i.focus(i.focusTarget),s(!0)))},[o.initialFocus,c,i.focus,i.focusTarget,i]);var u=[o.classNames.root,o.className];o.numberOfMonths>1&&u.push(o.classNames.multiple_months),o.showWeekNumber&&u.push(o.classNames.with_weeknumber);var f=tu(tu({},o.styles.root),o.style),p=Object.keys(r).filter(function(e){return e.startsWith("data-")}).reduce(function(e,t){var n;return tu(tu({},e),((n={})[t]=r[t],n))},{}),h=null!==(n=null===(t=r.components)||void 0===t?void 0:t.Months)&&void 0!==n?n:nv;return th.jsx("div",tu({className:u.join(" "),style:f,dir:o.dir,id:o.id,nonce:r.nonce,title:r.title,lang:r.lang},p,{children:th.jsx(h,{children:a.displayMonths.map(function(e,t){return th.jsx(ng,{displayIndex:t,displayMonth:e},t)})})}))}function nb(e){var t=e.children,n=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n}(e,["children"]);return th.jsx(tS,{initialProps:n,children:th.jsx(tM,{children:th.jsx(no,{initialProps:n,children:th.jsx(tW,{initialProps:n,children:th.jsx(tG,{initialProps:n,children:th.jsx(t8,{children:th.jsx(nt,{children:t})})})})})})})}function nx(e){return th.jsx(nb,tu({},e,{children:th.jsx(ny,{initialProps:e})}))}let nw=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M10.8284 12.0007L15.7782 16.9504L14.364 18.3646L8 12.0007L14.364 5.63672L15.7782 7.05093L10.8284 12.0007Z"}))},nS=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M13.1717 12.0007L8.22192 7.05093L9.63614 5.63672L16.0001 12.0007L9.63614 18.3646L8.22192 16.9504L13.1717 12.0007Z"}))},nk=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M4.83582 12L11.0429 18.2071L12.4571 16.7929L7.66424 12L12.4571 7.20712L11.0429 5.79291L4.83582 12ZM10.4857 12L16.6928 18.2071L18.107 16.7929L13.3141 12L18.107 7.20712L16.6928 5.79291L10.4857 12Z"}))},nE=e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M19.1642 12L12.9571 5.79291L11.5429 7.20712L16.3358 12L11.5429 16.7929L12.9571 18.2071L19.1642 12ZM13.5143 12L7.30722 5.79291L5.89301 7.20712L10.6859 12L5.89301 16.7929L7.30722 18.2071L13.5143 12Z"}))};var nC=n(84264);n(41649);var nO=n(1526),nj=n(7084),nP=n(26898);let nN={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-1",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-1.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-1.5",fontSize:"text-lg"},xl:{paddingX:"px-3.5",paddingY:"py-1.5",fontSize:"text-xl"}},nM={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},nI={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},nR={[nj.wu.Increase]:{bgColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.text).textColor},[nj.wu.ModerateIncrease]:{bgColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Emerald,nP.K.text).textColor},[nj.wu.Decrease]:{bgColor:(0,eJ.bM)(nj.fr.Rose,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Rose,nP.K.text).textColor},[nj.wu.ModerateDecrease]:{bgColor:(0,eJ.bM)(nj.fr.Rose,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Rose,nP.K.text).textColor},[nj.wu.Unchanged]:{bgColor:(0,eJ.bM)(nj.fr.Orange,nP.K.background).bgColor,textColor:(0,eJ.bM)(nj.fr.Orange,nP.K.text).textColor}},nT={[nj.wu.Increase]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M13.0001 7.82843V20H11.0001V7.82843L5.63614 13.1924L4.22192 11.7782L12.0001 4L19.7783 11.7782L18.3641 13.1924L13.0001 7.82843Z"}))},[nj.wu.ModerateIncrease]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M16.0037 9.41421L7.39712 18.0208L5.98291 16.6066L14.5895 8H7.00373V6H18.0037V17H16.0037V9.41421Z"}))},[nj.wu.Decrease]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M13.0001 16.1716L18.3641 10.8076L19.7783 12.2218L12.0001 20L4.22192 12.2218L5.63614 10.8076L11.0001 16.1716V4H13.0001V16.1716Z"}))},[nj.wu.ModerateDecrease]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M14.5895 16.0032L5.98291 7.39664L7.39712 5.98242L16.0037 14.589V7.00324H18.0037V18.0032H7.00373V16.0032H14.5895Z"}))},[nj.wu.Unchanged]:e=>{var t=(0,u._T)(e,[]);return d.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),d.createElement("path",{d:"M16.1716 10.9999L10.8076 5.63589L12.2218 4.22168L20 11.9999L12.2218 19.778L10.8076 18.3638L16.1716 12.9999H4V10.9999H16.1716Z"}))}},nA=(0,eJ.fn)("BadgeDelta");d.forwardRef((e,t)=>{let{deltaType:n=nj.wu.Increase,isIncreasePositive:r=!0,size:o=nj.u8.SM,tooltip:i,children:a,className:l}=e,c=(0,u._T)(e,["deltaType","isIncreasePositive","size","tooltip","children","className"]),s=nT[n],f=(0,eJ.Fo)(n,r),p=a?nM:nN,{tooltipProps:h,getReferenceProps:m}=(0,nO.l)();return d.createElement("span",Object.assign({ref:(0,eJ.lq)([t,h.refs.setReference]),className:(0,es.q)(nA("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full bg-opacity-20 dark:bg-opacity-25",nR[f].bgColor,nR[f].textColor,p[o].paddingX,p[o].paddingY,p[o].fontSize,l)},m,c),d.createElement(nO.Z,Object.assign({text:i},h)),d.createElement(s,{className:(0,es.q)(nA("icon"),"shrink-0",a?(0,es.q)("-ml-1 mr-1.5"):nI[o].height,nI[o].width)}),a?d.createElement("p",{className:(0,es.q)(nA("text"),"text-sm whitespace-nowrap")},a):null)}).displayName="BadgeDelta";var n_=n(47323);let nD=e=>{var{onClick:t,icon:n}=e,r=(0,u._T)(e,["onClick","icon"]);return d.createElement("button",Object.assign({type:"button",className:(0,es.q)("flex items-center justify-center p-1 h-7 w-7 outline-none focus:ring-2 transition duration-100 border border-tremor-border dark:border-dark-tremor-border hover:bg-tremor-background-muted dark:hover:bg-dark-tremor-background-muted rounded-tremor-small focus:border-tremor-brand-subtle select-none dark:focus:border-dark-tremor-brand-subtle focus:ring-tremor-brand-muted dark:focus:ring-dark-tremor-brand-muted text-tremor-content-subtle dark:text-dark-tremor-content-subtle hover:text-tremor-content dark:hover:text-dark-tremor-content")},r),d.createElement(n_.Z,{onClick:t,icon:n,variant:"simple",color:"slate",size:"sm"}))};function nZ(e){var{mode:t,defaultMonth:n,selected:r,onSelect:o,locale:i,disabled:a,enableYearNavigation:l,classNames:c,weekStartsOn:s=0}=e,f=(0,u._T)(e,["mode","defaultMonth","selected","onSelect","locale","disabled","enableYearNavigation","classNames","weekStartsOn"]);return d.createElement(nx,Object.assign({showOutsideDays:!0,mode:t,defaultMonth:n,selected:r,onSelect:o,locale:i,disabled:a,weekStartsOn:s,classNames:Object.assign({months:"flex flex-col sm:flex-row space-y-4 sm:space-x-4 sm:space-y-0",month:"space-y-4",caption:"flex justify-center pt-2 relative items-center",caption_label:"text-tremor-default text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis font-medium",nav:"space-x-1 flex items-center",nav_button:"flex items-center justify-center p-1 h-7 w-7 outline-none focus:ring-2 transition duration-100 border border-tremor-border dark:border-dark-tremor-border hover:bg-tremor-background-muted dark:hover:bg-dark-tremor-background-muted rounded-tremor-small focus:border-tremor-brand-subtle dark:focus:border-dark-tremor-brand-subtle focus:ring-tremor-brand-muted dark:focus:ring-dark-tremor-brand-muted text-tremor-content-subtle dark:text-dark-tremor-content-subtle hover:text-tremor-content dark:hover:text-dark-tremor-content",nav_button_previous:"absolute left-1",nav_button_next:"absolute right-1",table:"w-full border-collapse space-y-1",head_row:"flex",head_cell:"w-9 font-normal text-center text-tremor-content-subtle dark:text-dark-tremor-content-subtle",row:"flex w-full mt-0.5",cell:"text-center p-0 relative focus-within:relative text-tremor-default text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis",day:"h-9 w-9 p-0 hover:bg-tremor-background-subtle dark:hover:bg-dark-tremor-background-subtle outline-tremor-brand dark:outline-dark-tremor-brand rounded-tremor-default",day_today:"font-bold",day_selected:"aria-selected:bg-tremor-background-emphasis aria-selected:text-tremor-content-inverted dark:aria-selected:bg-dark-tremor-background-emphasis dark:aria-selected:text-dark-tremor-content-inverted ",day_disabled:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle disabled:hover:bg-transparent",day_outside:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle"},c),components:{IconLeft:e=>{var t=(0,u._T)(e,[]);return d.createElement(nw,Object.assign({className:"h-4 w-4"},t))},IconRight:e=>{var t=(0,u._T)(e,[]);return d.createElement(nS,Object.assign({className:"h-4 w-4"},t))},Caption:e=>{var t=(0,u._T)(e,[]);let{goToMonth:n,nextMonth:r,previousMonth:o,currentMonth:a}=tI();return d.createElement("div",{className:"flex justify-between items-center"},d.createElement("div",{className:"flex items-center space-x-1"},l&&d.createElement(nD,{onClick:()=>a&&n(tl(a,-1)),icon:nk}),d.createElement(nD,{onClick:()=>o&&n(o),icon:nw})),d.createElement(nC.Z,{className:"text-tremor-default tabular-nums capitalize text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis font-medium"},eQ(t.displayMonth,"LLLL yyy",{locale:i})),d.createElement("div",{className:"flex items-center space-x-1"},d.createElement(nD,{onClick:()=>r&&n(r),icon:nS}),l&&d.createElement(nD,{onClick:()=>a&&n(tl(a,1)),icon:nE})))}}},f))}nZ.displayName="DateRangePicker",n(27281);var nL=n(57365),nz=n(44140);let nB=el(),nF=d.forwardRef((e,t)=>{var n,r;let{value:o,defaultValue:i,onValueChange:a,enableSelect:l=!0,minDate:c,maxDate:s,placeholder:f="Select range",selectPlaceholder:p="Select range",disabled:h=!1,locale:m=eV,enableClear:g=!0,displayFormat:v,children:y,className:b,enableYearNavigation:x=!1,weekStartsOn:w=0,disabledDates:S}=e,k=(0,u._T)(e,["value","defaultValue","onValueChange","enableSelect","minDate","maxDate","placeholder","selectPlaceholder","disabled","locale","enableClear","displayFormat","children","className","enableYearNavigation","weekStartsOn","disabledDates"]),[E,C]=(0,nz.Z)(i,o),[O,j]=(0,d.useState)(!1),[P,N]=(0,d.useState)(!1),M=(0,d.useMemo)(()=>{let e=[];return c&&e.push({before:c}),s&&e.push({after:s}),[...e,...null!=S?S:[]]},[c,s,S]),I=(0,d.useMemo)(()=>{let e=new Map;return y?d.Children.forEach(y,t=>{var n;e.set(t.props.value,{text:null!==(n=(0,eu.qg)(t))&&void 0!==n?n:t.props.value,from:t.props.from,to:t.props.to})}):e6.forEach(t=>{e.set(t.value,{text:t.text,from:t.from,to:nB})}),e},[y]),R=(0,d.useMemo)(()=>{if(y)return(0,eu.sl)(y);let e=new Map;return e6.forEach(t=>e.set(t.value,t.text)),e},[y]),T=(null==E?void 0:E.selectValue)||"",A=e1(null==E?void 0:E.from,c,T,I),_=e2(null==E?void 0:E.to,s,T,I),D=A||_?e4(A,_,m,v):f,Z=ec(null!==(r=null!==(n=null!=_?_:A)&&void 0!==n?n:s)&&void 0!==r?r:nB),L=g&&!h;return d.createElement("div",Object.assign({ref:t,className:(0,es.q)("w-full min-w-[10rem] relative flex justify-between text-tremor-default max-w-sm shadow-tremor-input dark:shadow-dark-tremor-input rounded-tremor-default",b)},k),d.createElement(J,{as:"div",className:(0,es.q)("w-full",l?"rounded-l-tremor-default":"rounded-tremor-default",O&&"ring-2 ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted z-10")},d.createElement("div",{className:"relative w-full"},d.createElement(J.Button,{onFocus:()=>j(!0),onBlur:()=>j(!1),disabled:h,className:(0,es.q)("w-full outline-none text-left whitespace-nowrap truncate focus:ring-2 transition duration-100 rounded-l-tremor-default flex flex-nowrap border pl-3 py-2","rounded-l-tremor-default border-tremor-border text-tremor-content-emphasis focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:text-dark-tremor-content-emphasis dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",l?"rounded-l-tremor-default":"rounded-tremor-default",L?"pr-8":"pr-4",(0,eu.um)((0,eu.Uh)(A||_),h))},d.createElement(en,{className:(0,es.q)(e0("calendarIcon"),"flex-none shrink-0 h-5 w-5 -ml-0.5 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle"),"aria-hidden":"true"}),d.createElement("p",{className:"truncate"},D)),L&&A?d.createElement("button",{type:"button",className:(0,es.q)("absolute outline-none inset-y-0 right-0 flex items-center transition duration-100 mr-4"),onClick:e=>{e.preventDefault(),null==a||a({}),C({})}},d.createElement(er.Z,{className:(0,es.q)(e0("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null),d.createElement(ee.u,{className:"absolute z-10 min-w-min left-0",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},d.createElement(J.Panel,{focus:!0,className:(0,es.q)("divide-y overflow-y-auto outline-none rounded-tremor-default p-3 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},d.createElement(nZ,Object.assign({mode:"range",showOutsideDays:!0,defaultMonth:Z,selected:{from:A,to:_},onSelect:e=>{null==a||a({from:null==e?void 0:e.from,to:null==e?void 0:e.to}),C({from:null==e?void 0:e.from,to:null==e?void 0:e.to})},locale:m,disabled:M,enableYearNavigation:x,classNames:{day_range_middle:(0,es.q)("!rounded-none aria-selected:!bg-tremor-background-subtle aria-selected:dark:!bg-dark-tremor-background-subtle aria-selected:!text-tremor-content aria-selected:dark:!bg-dark-tremor-background-subtle"),day_range_start:"rounded-r-none rounded-l-tremor-small aria-selected:text-tremor-brand-inverted dark:aria-selected:text-dark-tremor-brand-inverted",day_range_end:"rounded-l-none rounded-r-tremor-small aria-selected:text-tremor-brand-inverted dark:aria-selected:text-dark-tremor-brand-inverted"},weekStartsOn:w},e))))),l&&d.createElement(et.R,{as:"div",className:(0,es.q)("w-48 -ml-px rounded-r-tremor-default",P&&"ring-2 ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted z-10"),value:T,onChange:e=>{let{from:t,to:n}=I.get(e),r=null!=n?n:nB;null==a||a({from:t,to:r,selectValue:e}),C({from:t,to:r,selectValue:e})},disabled:h},e=>{var t;let{value:n}=e;return d.createElement(d.Fragment,null,d.createElement(et.R.Button,{onFocus:()=>N(!0),onBlur:()=>N(!1),className:(0,es.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-r-tremor-default transition duration-100 border px-4 py-2","border-tremor-border shadow-tremor-input text-tremor-content-emphasis focus:border-tremor-brand-subtle","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:text-dark-tremor-content-emphasis dark:focus:border-dark-tremor-brand-subtle",(0,eu.um)((0,eu.Uh)(n),h))},n&&null!==(t=R.get(n))&&void 0!==t?t:p),d.createElement(ee.u,{className:"absolute z-10 w-full inset-x-0 right-0",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},d.createElement(et.R.Options,{className:(0,es.q)("divide-y overflow-y-auto outline-none border my-1","shadow-tremor-dropdown bg-tremor-background border-tremor-border divide-tremor-border rounded-tremor-default","dark:shadow-dark-tremor-dropdown dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border")},null!=y?y:e6.map(e=>d.createElement(nL.Z,{key:e.value,value:e.value},e.text)))))}))});nF.displayName="DateRangePicker"},92414:function(e,t,n){"use strict";n.d(t,{Z:function(){return v}});var r=n(5853),o=n(2265);n(42698),n(64016),n(8710);var i=n(33232),a=n(44140),l=n(58747);let c=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M18.031 16.6168L22.3137 20.8995L20.8995 22.3137L16.6168 18.031C15.0769 19.263 13.124 20 11 20C6.032 20 2 15.968 2 11C2 6.032 6.032 2 11 2C15.968 2 20 6.032 20 11C20 13.124 19.263 15.0769 18.031 16.6168ZM16.0247 15.8748C17.2475 14.6146 18 12.8956 18 11C18 7.1325 14.8675 4 11 4C7.1325 4 4 7.1325 4 11C4 14.8675 7.1325 18 11 18C12.8956 18 14.6146 17.2475 15.8748 16.0247L16.0247 15.8748Z"}))};var s=n(4537),u=n(28517),d=n(33044);let f=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",width:"100%",height:"100%",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"},t),o.createElement("line",{x1:"18",y1:"6",x2:"6",y2:"18"}),o.createElement("line",{x1:"6",y1:"6",x2:"18",y2:"18"}))};var p=n(65954),h=n(1153),m=n(96398);let g=(0,h.fn)("MultiSelect"),v=o.forwardRef((e,t)=>{let{defaultValue:n,value:h,onValueChange:v,placeholder:y="Select...",placeholderSearch:b="Search",disabled:x=!1,icon:w,children:S,className:k}=e,E=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","placeholderSearch","disabled","icon","children","className"]),[C,O]=(0,a.Z)(n,h),{reactElementChildren:j,optionsAvailable:P}=(0,o.useMemo)(()=>{let e=o.Children.toArray(S).filter(o.isValidElement);return{reactElementChildren:e,optionsAvailable:(0,m.n0)("",e)}},[S]),[N,M]=(0,o.useState)(""),I=(null!=C?C:[]).length>0,R=(0,o.useMemo)(()=>N?(0,m.n0)(N,j):P,[N,j,P]),T=()=>{M("")};return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:C,value:C,onChange:e=>{null==v||v(e),O(e)},disabled:x,className:(0,p.q)("w-full min-w-[10rem] relative text-tremor-default",k)},E,{multiple:!0}),e=>{let{value:t}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,p.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-1.5","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",w?"pl-11 -ml-0.5":"pl-3",(0,m.um)(t.length>0,x))},w&&o.createElement("span",{className:(0,p.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(w,{className:(0,p.q)(g("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("div",{className:"h-6 flex items-center"},t.length>0?o.createElement("div",{className:"flex flex-nowrap overflow-x-scroll [&::-webkit-scrollbar]:hidden [scrollbar-width:none] gap-x-1 mr-5 -ml-1.5 relative"},P.filter(e=>t.includes(e.props.value)).map((e,n)=>{var r;return o.createElement("div",{key:n,className:(0,p.q)("max-w-[100px] lg:max-w-[200px] flex justify-center items-center pl-2 pr-1.5 py-1 font-medium","rounded-tremor-small","bg-tremor-background-muted dark:bg-dark-tremor-background-muted","bg-tremor-background-subtle dark:bg-dark-tremor-background-subtle","text-tremor-content-default dark:text-dark-tremor-content-default","text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis")},o.createElement("div",{className:"text-xs truncate "},null!==(r=e.props.children)&&void 0!==r?r:e.props.value),o.createElement("div",{onClick:n=>{n.preventDefault();let r=t.filter(t=>t!==e.props.value);null==v||v(r),O(r)}},o.createElement(f,{className:(0,p.q)(g("clearIconItem"),"cursor-pointer rounded-tremor-full w-3.5 h-3.5 ml-2","text-tremor-content-subtle hover:text-tremor-content","dark:text-dark-tremor-content-subtle dark:hover:text-tremor-content")})))})):o.createElement("span",null,y)),o.createElement("span",{className:(0,p.q)("absolute inset-y-0 right-0 flex items-center mr-2.5")},o.createElement(l.Z,{className:(0,p.q)(g("arrowDownIcon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),I&&!x?o.createElement("button",{type:"button",className:(0,p.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),O([]),null==v||v([])}},o.createElement(s.Z,{className:(0,p.q)(g("clearIconAllItems"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,p.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},o.createElement("div",{className:(0,p.q)("flex items-center w-full px-2.5","bg-tremor-background-muted","dark:bg-dark-tremor-background-muted")},o.createElement("span",null,o.createElement(c,{className:(0,p.q)("flex-none w-4 h-4 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("input",{name:"search",type:"input",autoComplete:"off",placeholder:b,className:(0,p.q)("w-full focus:outline-none focus:ring-none bg-transparent text-tremor-default py-2","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis"),onKeyDown:e=>{"Space"===e.code&&""!==e.target.value&&e.stopPropagation()},onChange:e=>M(e.target.value),value:N})),o.createElement(i.Z.Provider,Object.assign({},{onBlur:{handleResetSearch:T}},{value:{selectedValue:t}}),R))))})});v.displayName="MultiSelect"},46030:function(e,t,n){"use strict";n.d(t,{Z:function(){return u}});var r=n(5853);n(42698),n(64016),n(8710);var o=n(33232),i=n(2265),a=n(65954),l=n(1153),c=n(28517);let s=(0,l.fn)("MultiSelectItem"),u=i.forwardRef((e,t)=>{let{value:n,className:u,children:d}=e,f=(0,r._T)(e,["value","className","children"]),{selectedValue:p}=(0,i.useContext)(o.Z),h=(0,l.NZ)(n,p);return i.createElement(c.R.Option,Object.assign({className:(0,a.q)(s("root"),"flex justify-start items-center cursor-default text-tremor-default p-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",u),ref:t,key:n,value:n},f),i.createElement("input",{type:"checkbox",className:(0,a.q)(s("checkbox"),"flex-none focus:ring-none focus:outline-none cursor-pointer mr-2.5","accent-tremor-brand","dark:accent-dark-tremor-brand"),checked:h,readOnly:!0}),i.createElement("span",{className:"whitespace-nowrap truncate"},null!=d?d:n))});u.displayName="MultiSelectItem"},30150:function(e,t,n){"use strict";n.d(t,{Z:function(){return f}});var r=n(5853),o=n(2265);let i=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),o.createElement("path",{d:"M12 4v16m8-8H4"}))},a=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),o.createElement("path",{d:"M20 12H4"}))};var l=n(65954),c=n(1153),s=n(69262);let u="flex mx-auto text-tremor-content-subtle dark:text-dark-tremor-content-subtle",d="cursor-pointer hover:text-tremor-content dark:hover:text-dark-tremor-content",f=o.forwardRef((e,t)=>{let{onSubmit:n,enableStepper:f=!0,disabled:p,onValueChange:h,onChange:m}=e,g=(0,r._T)(e,["onSubmit","enableStepper","disabled","onValueChange","onChange"]),v=(0,o.useRef)(null),[y,b]=o.useState(!1),x=o.useCallback(()=>{b(!0)},[]),w=o.useCallback(()=>{b(!1)},[]),[S,k]=o.useState(!1),E=o.useCallback(()=>{k(!0)},[]),C=o.useCallback(()=>{k(!1)},[]);return o.createElement(s.Z,Object.assign({type:"number",ref:(0,c.lq)([v,t]),disabled:p,makeInputClassName:(0,c.fn)("NumberInput"),onKeyDown:e=>{var t;if("Enter"===e.key&&!e.ctrlKey&&!e.altKey&&!e.shiftKey){let e=null===(t=v.current)||void 0===t?void 0:t.value;null==n||n(parseFloat(null!=e?e:""))}"ArrowDown"===e.key&&x(),"ArrowUp"===e.key&&E()},onKeyUp:e=>{"ArrowDown"===e.key&&w(),"ArrowUp"===e.key&&C()},onChange:e=>{p||(null==h||h(parseFloat(e.target.value)),null==m||m(e))},stepper:f?o.createElement("div",{className:(0,l.q)("flex justify-center align-middle")},o.createElement("div",{tabIndex:-1,onClick:e=>e.preventDefault(),onMouseDown:e=>e.preventDefault(),onTouchStart:e=>{e.cancelable&&e.preventDefault()},onMouseUp:()=>{var e,t;p||(null===(e=v.current)||void 0===e||e.stepDown(),null===(t=v.current)||void 0===t||t.dispatchEvent(new Event("input",{bubbles:!0})))},className:(0,l.q)(!p&&d,u,"group py-[10px] px-2.5 border-l border-tremor-border dark:border-dark-tremor-border")},o.createElement(a,{"data-testid":"step-down",className:(y?"scale-95":"")+" h-4 w-4 duration-75 transition group-active:scale-95"})),o.createElement("div",{tabIndex:-1,onClick:e=>e.preventDefault(),onMouseDown:e=>e.preventDefault(),onTouchStart:e=>{e.cancelable&&e.preventDefault()},onMouseUp:()=>{var e,t;p||(null===(e=v.current)||void 0===e||e.stepUp(),null===(t=v.current)||void 0===t||t.dispatchEvent(new Event("input",{bubbles:!0})))},className:(0,l.q)(!p&&d,u,"group py-[10px] px-2.5 border-l border-tremor-border dark:border-dark-tremor-border")},o.createElement(i,{"data-testid":"step-up",className:(S?"scale-95":"")+" h-4 w-4 duration-75 transition group-active:scale-95"}))):null},g))});f.displayName="NumberInput"},27281:function(e,t,n){"use strict";n.d(t,{Z:function(){return h}});var r=n(5853),o=n(2265),i=n(58747),a=n(4537),l=n(65954),c=n(1153),s=n(96398),u=n(28517),d=n(33044),f=n(44140);let p=(0,c.fn)("Select"),h=o.forwardRef((e,t)=>{let{defaultValue:n,value:c,onValueChange:h,placeholder:m="Select...",disabled:g=!1,icon:v,enableClear:y=!0,children:b,className:x}=e,w=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","disabled","icon","enableClear","children","className"]),[S,k]=(0,f.Z)(n,c),E=(0,o.useMemo)(()=>{let e=o.Children.toArray(b).filter(o.isValidElement);return(0,s.sl)(e)},[b]);return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:S,value:S,onChange:e=>{null==h||h(e),k(e)},disabled:g,className:(0,l.q)("w-full min-w-[10rem] relative text-tremor-default",x)},w),e=>{var t;let{value:n}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,l.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-2","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",v?"pl-10":"pl-3",(0,s.um)((0,s.Uh)(n),g))},v&&o.createElement("span",{className:(0,l.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(v,{className:(0,l.q)(p("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("span",{className:"w-[90%] block truncate"},n&&null!==(t=E.get(n))&&void 0!==t?t:m),o.createElement("span",{className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-3")},o.createElement(i.Z,{className:(0,l.q)(p("arrowDownIcon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),y&&S?o.createElement("button",{type:"button",className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),k(""),null==h||h("")}},o.createElement(a.Z,{className:(0,l.q)(p("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,l.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},b)))})});h.displayName="Select"},57365:function(e,t,n){"use strict";n.d(t,{Z:function(){return c}});var r=n(5853),o=n(2265),i=n(28517),a=n(65954);let l=(0,n(1153).fn)("SelectItem"),c=o.forwardRef((e,t)=>{let{value:n,icon:c,className:s,children:u}=e,d=(0,r._T)(e,["value","icon","className","children"]);return o.createElement(i.R.Option,Object.assign({className:(0,a.q)(l("root"),"flex justify-start items-center cursor-default text-tremor-default px-2.5 py-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong ui-selected:bg-tremor-background-muted text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",s),ref:t,key:n,value:n},d),c&&o.createElement(c,{className:(0,a.q)(l("icon"),"flex-none w-5 h-5 mr-1.5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}),o.createElement("span",{className:"whitespace-nowrap truncate"},null!=u?u:n))});c.displayName="SelectItem"},92858:function(e,t,n){"use strict";n.d(t,{Z:function(){return M}});var r=n(5853),o=n(2265),i=n(62963),a=n(90945),l=n(13323),c=n(17684),s=n(80004),u=n(93689),d=n(38198),f=n(47634),p=n(56314),h=n(27847),m=n(64518);let g=(0,o.createContext)(null),v=Object.assign((0,h.yV)(function(e,t){let n=(0,c.M)(),{id:r="headlessui-description-".concat(n),...i}=e,a=function e(){let t=(0,o.useContext)(g);if(null===t){let t=Error("You used a component, but it is not inside a relevant parent.");throw Error.captureStackTrace&&Error.captureStackTrace(t,e),t}return t}(),l=(0,u.T)(t);(0,m.e)(()=>a.register(r),[r,a.register]);let s={ref:l,...a.props,id:r};return(0,h.sY)({ourProps:s,theirProps:i,slot:a.slot||{},defaultTag:"p",name:a.name||"Description"})}),{});var y=n(37388);let b=(0,o.createContext)(null),x=Object.assign((0,h.yV)(function(e,t){let n=(0,c.M)(),{id:r="headlessui-label-".concat(n),passive:i=!1,...a}=e,l=function e(){let t=(0,o.useContext)(b);if(null===t){let t=Error("You used a component, but it is not inside a relevant parent.");throw Error.captureStackTrace&&Error.captureStackTrace(t,e),t}return t}(),s=(0,u.T)(t);(0,m.e)(()=>l.register(r),[r,l.register]);let d={ref:s,...l.props,id:r};return i&&("onClick"in d&&(delete d.htmlFor,delete d.onClick),"onClick"in a&&delete a.onClick),(0,h.sY)({ourProps:d,theirProps:a,slot:l.slot||{},defaultTag:"label",name:l.name||"Label"})}),{}),w=(0,o.createContext)(null);w.displayName="GroupContext";let S=o.Fragment,k=Object.assign((0,h.yV)(function(e,t){let n=(0,c.M)(),{id:r="headlessui-switch-".concat(n),checked:m,defaultChecked:g=!1,onChange:v,name:b,value:x,form:S,...k}=e,E=(0,o.useContext)(w),C=(0,o.useRef)(null),O=(0,u.T)(C,t,null===E?null:E.setSwitch),[j,P]=(0,i.q)(m,v,g),N=(0,l.z)(()=>null==P?void 0:P(!j)),M=(0,l.z)(e=>{if((0,f.P)(e.currentTarget))return e.preventDefault();e.preventDefault(),N()}),I=(0,l.z)(e=>{e.key===y.R.Space?(e.preventDefault(),N()):e.key===y.R.Enter&&(0,p.g)(e.currentTarget)}),R=(0,l.z)(e=>e.preventDefault()),T=(0,o.useMemo)(()=>({checked:j}),[j]),A={id:r,ref:O,role:"switch",type:(0,s.f)(e,C),tabIndex:0,"aria-checked":j,"aria-labelledby":null==E?void 0:E.labelledby,"aria-describedby":null==E?void 0:E.describedby,onClick:M,onKeyUp:I,onKeyPress:R},_=(0,a.G)();return(0,o.useEffect)(()=>{var e;let t=null==(e=C.current)?void 0:e.closest("form");t&&void 0!==g&&_.addEventListener(t,"reset",()=>{P(g)})},[C,P]),o.createElement(o.Fragment,null,null!=b&&j&&o.createElement(d._,{features:d.A.Hidden,...(0,h.oA)({as:"input",type:"checkbox",hidden:!0,readOnly:!0,form:S,checked:j,name:b,value:x})}),(0,h.sY)({ourProps:A,theirProps:k,slot:T,defaultTag:"button",name:"Switch"}))}),{Group:function(e){var t;let[n,r]=(0,o.useState)(null),[i,a]=function(){let[e,t]=(0,o.useState)([]);return[e.length>0?e.join(" "):void 0,(0,o.useMemo)(()=>function(e){let n=(0,l.z)(e=>(t(t=>[...t,e]),()=>t(t=>{let n=t.slice(),r=n.indexOf(e);return -1!==r&&n.splice(r,1),n}))),r=(0,o.useMemo)(()=>({register:n,slot:e.slot,name:e.name,props:e.props}),[n,e.slot,e.name,e.props]);return o.createElement(b.Provider,{value:r},e.children)},[t])]}(),[c,s]=function(){let[e,t]=(0,o.useState)([]);return[e.length>0?e.join(" "):void 0,(0,o.useMemo)(()=>function(e){let n=(0,l.z)(e=>(t(t=>[...t,e]),()=>t(t=>{let n=t.slice(),r=n.indexOf(e);return -1!==r&&n.splice(r,1),n}))),r=(0,o.useMemo)(()=>({register:n,slot:e.slot,name:e.name,props:e.props}),[n,e.slot,e.name,e.props]);return o.createElement(g.Provider,{value:r},e.children)},[t])]}(),u=(0,o.useMemo)(()=>({switch:n,setSwitch:r,labelledby:i,describedby:c}),[n,r,i,c]);return o.createElement(s,{name:"Switch.Description"},o.createElement(a,{name:"Switch.Label",props:{htmlFor:null==(t=u.switch)?void 0:t.id,onClick(e){n&&("LABEL"===e.currentTarget.tagName&&e.preventDefault(),n.click(),n.focus({preventScroll:!0}))}}},o.createElement(w.Provider,{value:u},(0,h.sY)({ourProps:{},theirProps:e,defaultTag:S,name:"Switch.Group"}))))},Label:x,Description:v});var E=n(44140),C=n(26898),O=n(65954),j=n(1153),P=n(1526);let N=(0,j.fn)("Switch"),M=o.forwardRef((e,t)=>{let{checked:n,defaultChecked:i=!1,onChange:a,color:l,name:c,error:s,errorMessage:u,disabled:d,required:f,tooltip:p,id:h}=e,m=(0,r._T)(e,["checked","defaultChecked","onChange","color","name","error","errorMessage","disabled","required","tooltip","id"]),g={bgColor:l?(0,j.bM)(l,C.K.background).bgColor:"bg-tremor-brand dark:bg-dark-tremor-brand",ringColor:l?(0,j.bM)(l,C.K.ring).ringColor:"ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted"},[v,y]=(0,E.Z)(i,n),[b,x]=(0,o.useState)(!1),{tooltipProps:w,getReferenceProps:S}=(0,P.l)(300);return o.createElement("div",{className:"flex flex-row items-center justify-start"},o.createElement(P.Z,Object.assign({text:p},w)),o.createElement("div",Object.assign({ref:(0,j.lq)([t,w.refs.setReference]),className:(0,O.q)(N("root"),"flex flex-row relative h-5")},m,S),o.createElement("input",{type:"checkbox",className:(0,O.q)(N("input"),"absolute w-5 h-5 cursor-pointer left-0 top-0 opacity-0"),name:c,required:f,checked:v,onChange:e=>{e.preventDefault()}}),o.createElement(k,{checked:v,onChange:e=>{y(e),null==a||a(e)},disabled:d,className:(0,O.q)(N("switch"),"w-10 h-5 group relative inline-flex flex-shrink-0 cursor-pointer items-center justify-center rounded-tremor-full","focus:outline-none",d?"cursor-not-allowed":""),onFocus:()=>x(!0),onBlur:()=>x(!1),id:h},o.createElement("span",{className:(0,O.q)(N("sr-only"),"sr-only")},"Switch ",v?"on":"off"),o.createElement("span",{"aria-hidden":"true",className:(0,O.q)(N("background"),v?g.bgColor:"bg-tremor-border dark:bg-dark-tremor-border","pointer-events-none absolute mx-auto h-3 w-9 rounded-tremor-full transition-colors duration-100 ease-in-out")}),o.createElement("span",{"aria-hidden":"true",className:(0,O.q)(N("round"),v?(0,O.q)(g.bgColor,"translate-x-5 border-tremor-background dark:border-dark-tremor-background"):"translate-x-0 bg-tremor-border dark:bg-dark-tremor-border border-tremor-background dark:border-dark-tremor-background","pointer-events-none absolute left-0 inline-block h-5 w-5 transform rounded-tremor-full border-2 shadow-tremor-input duration-100 ease-in-out transition",b?(0,O.q)("ring-2",g.ringColor):"")}))),s&&u?o.createElement("p",{className:(0,O.q)(N("errorMessage"),"text-sm text-red-500 mt-1 ")},u):null)});M.displayName="Switch"},87452:function(e,t,n){"use strict";n.d(t,{Z:function(){return d},r:function(){return u}});var r=n(5853),o=n(21886);n(42698),n(64016);var i=n(8710);n(33232);var a=n(65954),l=n(1153),c=n(2265);let s=(0,l.fn)("Accordion"),u=(0,c.createContext)({isOpen:!1}),d=c.forwardRef((e,t)=>{var n;let{defaultOpen:l=!1,children:d,className:f}=e,p=(0,r._T)(e,["defaultOpen","children","className"]),h=null!==(n=(0,c.useContext)(i.Z))&&void 0!==n?n:(0,a.q)("rounded-tremor-default border");return c.createElement(o.p,Object.assign({as:"div",ref:t,className:(0,a.q)(s("root"),"overflow-hidden","bg-tremor-background border-tremor-border","dark:bg-dark-tremor-background dark:border-dark-tremor-border",h,f),defaultOpen:l},p),e=>{let{open:t}=e;return c.createElement(u.Provider,{value:{isOpen:t}},d)})});d.displayName="Accordion"},88829:function(e,t,n){"use strict";n.d(t,{Z:function(){return c}});var r=n(5853),o=n(2265),i=n(21886),a=n(65954);let l=(0,n(1153).fn)("AccordionBody"),c=o.forwardRef((e,t)=>{let{children:n,className:c}=e,s=(0,r._T)(e,["children","className"]);return o.createElement(i.p.Panel,Object.assign({ref:t,className:(0,a.q)(l("root"),"w-full text-tremor-default px-4 pb-3","text-tremor-content","dark:text-dark-tremor-content",c)},s),n)});c.displayName="AccordionBody"},72208:function(e,t,n){"use strict";n.d(t,{Z:function(){return u}});var r=n(5853),o=n(2265),i=n(21886);let a=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M11.9999 10.8284L7.0502 15.7782L5.63599 14.364L11.9999 8L18.3639 14.364L16.9497 15.7782L11.9999 10.8284Z"}))};var l=n(87452),c=n(65954);let s=(0,n(1153).fn)("AccordionHeader"),u=o.forwardRef((e,t)=>{let{children:n,className:u}=e,d=(0,r._T)(e,["children","className"]),{isOpen:f}=(0,o.useContext)(l.r);return o.createElement(i.p.Button,Object.assign({ref:t,className:(0,c.q)(s("root"),"w-full flex items-center justify-between px-4 py-3","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis",u)},d),o.createElement("div",{className:(0,c.q)(s("children"),"flex flex-1 text-inherit mr-4")},n),o.createElement("div",null,o.createElement(a,{className:(0,c.q)(s("arrowIcon"),"h-5 w-5 -mr-1","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle",f?"transition-all":"transition-all -rotate-180")})))});u.displayName="AccordionHeader"},21626:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(5853),o=n(2265),i=n(65954);let a=(0,n(1153).fn)("Table"),l=o.forwardRef((e,t)=>{let{children:n,className:l}=e,c=(0,r._T)(e,["children","className"]);return o.createElement("div",{className:(0,i.q)(a("root"),"overflow-auto",l)},o.createElement("table",Object.assign({ref:t,className:(0,i.q)(a("table"),"w-full text-tremor-default","text-tremor-content","dark:text-dark-tremor-content")},c),n))});l.displayName="Table"},97214:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(5853),o=n(2265),i=n(65954);let a=(0,n(1153).fn)("TableBody"),l=o.forwardRef((e,t)=>{let{children:n,className:l}=e,c=(0,r._T)(e,["children","className"]);return o.createElement(o.Fragment,null,o.createElement("tbody",Object.assign({ref:t,className:(0,i.q)(a("root"),"align-top divide-y","divide-tremor-border","dark:divide-dark-tremor-border",l)},c),n))});l.displayName="TableBody"},28241:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(5853),o=n(2265),i=n(65954);let a=(0,n(1153).fn)("TableCell"),l=o.forwardRef((e,t)=>{let{children:n,className:l}=e,c=(0,r._T)(e,["children","className"]);return o.createElement(o.Fragment,null,o.createElement("td",Object.assign({ref:t,className:(0,i.q)(a("root"),"align-middle whitespace-nowrap text-left p-4",l)},c),n))});l.displayName="TableCell"},58834:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(5853),o=n(2265),i=n(65954);let a=(0,n(1153).fn)("TableHead"),l=o.forwardRef((e,t)=>{let{children:n,className:l}=e,c=(0,r._T)(e,["children","className"]);return o.createElement(o.Fragment,null,o.createElement("thead",Object.assign({ref:t,className:(0,i.q)(a("root"),"text-left","text-tremor-content","dark:text-dark-tremor-content",l)},c),n))});l.displayName="TableHead"},69552:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(5853),o=n(2265),i=n(65954);let a=(0,n(1153).fn)("TableHeaderCell"),l=o.forwardRef((e,t)=>{let{children:n,className:l}=e,c=(0,r._T)(e,["children","className"]);return o.createElement(o.Fragment,null,o.createElement("th",Object.assign({ref:t,className:(0,i.q)(a("root"),"whitespace-nowrap text-left font-semibold top-0 px-4 py-3.5","text-tremor-content","dark:text-dark-tremor-content",l)},c),n))});l.displayName="TableHeaderCell"},71876:function(e,t,n){"use strict";n.d(t,{Z:function(){return l}});var r=n(5853),o=n(2265),i=n(65954);let a=(0,n(1153).fn)("TableRow"),l=o.forwardRef((e,t)=>{let{children:n,className:l}=e,c=(0,r._T)(e,["children","className"]);return o.createElement(o.Fragment,null,o.createElement("tr",Object.assign({ref:t,className:(0,i.q)(a("row"),l)},c),n))});l.displayName="TableRow"},97765:function(e,t,n){"use strict";n.d(t,{Z:function(){return c}});var r=n(5853),o=n(26898),i=n(65954),a=n(1153),l=n(2265);let c=l.forwardRef((e,t)=>{let{color:n,children:c,className:s}=e,u=(0,r._T)(e,["color","children","className"]);return l.createElement("p",Object.assign({ref:t,className:(0,i.q)(n?(0,a.bM)(n,o.K.lightText).textColor:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle",s)},u),c)});c.displayName="Subtitle"},96889:function(e,t,n){"use strict";n.d(t,{Z:function(){return s}});var r=n(5853),o=n(2265),i=n(26898),a=n(65954),l=n(1153);let c=(0,l.fn)("BarList"),s=o.forwardRef((e,t)=>{var n;let s;let{data:u=[],color:d,valueFormatter:f=l.Cj,showAnimation:p=!1,className:h}=e,m=(0,r._T)(e,["data","color","valueFormatter","showAnimation","className"]),g=(n=u.map(e=>e.value),s=-1/0,n.forEach(e=>{s=Math.max(s,e)}),n.map(e=>0===e?0:Math.max(e/s*100,1)));return o.createElement("div",Object.assign({ref:t,className:(0,a.q)(c("root"),"flex justify-between space-x-6",h)},m),o.createElement("div",{className:(0,a.q)(c("bars"),"relative w-full")},u.map((e,t)=>{var n,r,s;let f=e.icon;return o.createElement("div",{key:null!==(n=e.key)&&void 0!==n?n:e.name,className:(0,a.q)(c("bar"),"flex items-center rounded-tremor-small bg-opacity-30","h-9",e.color||d?(0,l.bM)(null!==(r=e.color)&&void 0!==r?r:d,i.K.background).bgColor:"bg-tremor-brand-subtle dark:bg-dark-tremor-brand-subtle dark:bg-opacity-30",t===u.length-1?"mb-0":"mb-2"),style:{width:"".concat(g[t],"%"),transition:p?"all 1s":""}},o.createElement("div",{className:(0,a.q)("absolute max-w-full flex left-2")},f?o.createElement(f,{className:(0,a.q)(c("barIcon"),"flex-none h-5 w-5 mr-2","text-tremor-content","dark:text-dark-tremor-content")}):null,e.href?o.createElement("a",{href:e.href,target:null!==(s=e.target)&&void 0!==s?s:"_blank",rel:"noreferrer",className:(0,a.q)(c("barLink"),"whitespace-nowrap hover:underline truncate text-tremor-default","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},e.name):o.createElement("p",{className:(0,a.q)(c("barText"),"whitespace-nowrap truncate text-tremor-default","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},e.name)))})),o.createElement("div",{className:"text-right min-w-min"},u.map((e,t)=>{var n;return o.createElement("div",{key:null!==(n=e.key)&&void 0!==n?n:e.name,className:(0,a.q)(c("labelWrapper"),"flex justify-end items-center","h-9",t===u.length-1?"mb-0":"mb-2")},o.createElement("p",{className:(0,a.q)(c("labelText"),"whitespace-nowrap truncate text-tremor-default","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},f(e.value)))})))});s.displayName="BarList"},44140:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(2265);let o=(e,t)=>{let n=void 0!==t,[o,i]=(0,r.useState)(e);return[n?t:o,e=>{n||i(e)}]}},51646:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(2265);function o(){let[,e]=r.useReducer(e=>e+1,0);return e}},12757:function(e,t,n){"use strict";n.d(t,{F:function(){return a},Z:function(){return i}});var r=n(36760),o=n.n(r);function i(e,t,n){return o()({["".concat(e,"-status-success")]:"success"===t,["".concat(e,"-status-warning")]:"warning"===t,["".concat(e,"-status-error")]:"error"===t,["".concat(e,"-status-validating")]:"validating"===t,["".concat(e,"-has-feedback")]:n})}let a=(e,t)=>t||e},67960:function(e,t,n){"use strict";n.d(t,{Z:function(){return e8}});var r=n(2265),o=n(36760),i=n.n(o),a=n(18694),l=n(71744),c=n(33759),s=e=>{let{prefixCls:t,className:n,style:o,size:a,shape:l}=e,c=i()({["".concat(t,"-lg")]:"large"===a,["".concat(t,"-sm")]:"small"===a}),s=i()({["".concat(t,"-circle")]:"circle"===l,["".concat(t,"-square")]:"square"===l,["".concat(t,"-round")]:"round"===l}),u=r.useMemo(()=>"number"==typeof a?{width:a,height:a,lineHeight:"".concat(a,"px")}:{},[a]);return r.createElement("span",{className:i()(t,c,s,n),style:Object.assign(Object.assign({},u),o)})},u=n(352),d=n(80669),f=n(3104);let p=new u.E4("ant-skeleton-loading",{"0%":{backgroundPosition:"100% 50%"},"100%":{backgroundPosition:"0 50%"}}),h=e=>({height:e,lineHeight:(0,u.bf)(e)}),m=e=>Object.assign({width:e},h(e)),g=e=>({background:e.skeletonLoadingBackground,backgroundSize:"400% 100%",animationName:p,animationDuration:e.skeletonLoadingMotionDuration,animationTimingFunction:"ease",animationIterationCount:"infinite"}),v=(e,t)=>Object.assign({width:t(e).mul(5).equal(),minWidth:t(e).mul(5).equal()},h(e)),y=e=>{let{skeletonAvatarCls:t,gradientFromColor:n,controlHeight:r,controlHeightLG:o,controlHeightSM:i}=e;return{["".concat(t)]:Object.assign({display:"inline-block",verticalAlign:"top",background:n},m(r)),["".concat(t).concat(t,"-circle")]:{borderRadius:"50%"},["".concat(t).concat(t,"-lg")]:Object.assign({},m(o)),["".concat(t).concat(t,"-sm")]:Object.assign({},m(i))}},b=e=>{let{controlHeight:t,borderRadiusSM:n,skeletonInputCls:r,controlHeightLG:o,controlHeightSM:i,gradientFromColor:a,calc:l}=e;return{["".concat(r)]:Object.assign({display:"inline-block",verticalAlign:"top",background:a,borderRadius:n},v(t,l)),["".concat(r,"-lg")]:Object.assign({},v(o,l)),["".concat(r,"-sm")]:Object.assign({},v(i,l))}},x=e=>Object.assign({width:e},h(e)),w=e=>{let{skeletonImageCls:t,imageSizeBase:n,gradientFromColor:r,borderRadiusSM:o,calc:i}=e;return{["".concat(t)]:Object.assign(Object.assign({display:"flex",alignItems:"center",justifyContent:"center",verticalAlign:"top",background:r,borderRadius:o},x(i(n).mul(2).equal())),{["".concat(t,"-path")]:{fill:"#bfbfbf"},["".concat(t,"-svg")]:Object.assign(Object.assign({},x(n)),{maxWidth:i(n).mul(4).equal(),maxHeight:i(n).mul(4).equal()}),["".concat(t,"-svg").concat(t,"-svg-circle")]:{borderRadius:"50%"}}),["".concat(t).concat(t,"-circle")]:{borderRadius:"50%"}}},S=(e,t,n)=>{let{skeletonButtonCls:r}=e;return{["".concat(n).concat(r,"-circle")]:{width:t,minWidth:t,borderRadius:"50%"},["".concat(n).concat(r,"-round")]:{borderRadius:t}}},k=(e,t)=>Object.assign({width:t(e).mul(2).equal(),minWidth:t(e).mul(2).equal()},h(e)),E=e=>{let{borderRadiusSM:t,skeletonButtonCls:n,controlHeight:r,controlHeightLG:o,controlHeightSM:i,gradientFromColor:a,calc:l}=e;return Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({["".concat(n)]:Object.assign({display:"inline-block",verticalAlign:"top",background:a,borderRadius:t,width:l(r).mul(2).equal(),minWidth:l(r).mul(2).equal()},k(r,l))},S(e,r,n)),{["".concat(n,"-lg")]:Object.assign({},k(o,l))}),S(e,o,"".concat(n,"-lg"))),{["".concat(n,"-sm")]:Object.assign({},k(i,l))}),S(e,i,"".concat(n,"-sm")))},C=e=>{let{componentCls:t,skeletonAvatarCls:n,skeletonTitleCls:r,skeletonParagraphCls:o,skeletonButtonCls:i,skeletonInputCls:a,skeletonImageCls:l,controlHeight:c,controlHeightLG:s,controlHeightSM:u,gradientFromColor:d,padding:f,marginSM:p,borderRadius:h,titleHeight:v,blockRadius:x,paragraphLiHeight:S,controlHeightXS:k,paragraphMarginTop:C}=e;return{["".concat(t)]:{display:"table",width:"100%",["".concat(t,"-header")]:{display:"table-cell",paddingInlineEnd:f,verticalAlign:"top",["".concat(n)]:Object.assign({display:"inline-block",verticalAlign:"top",background:d},m(c)),["".concat(n,"-circle")]:{borderRadius:"50%"},["".concat(n,"-lg")]:Object.assign({},m(s)),["".concat(n,"-sm")]:Object.assign({},m(u))},["".concat(t,"-content")]:{display:"table-cell",width:"100%",verticalAlign:"top",["".concat(r)]:{width:"100%",height:v,background:d,borderRadius:x,["+ ".concat(o)]:{marginBlockStart:u}},["".concat(o)]:{padding:0,"> li":{width:"100%",height:S,listStyle:"none",background:d,borderRadius:x,"+ li":{marginBlockStart:k}}},["".concat(o,"> li:last-child:not(:first-child):not(:nth-child(2))")]:{width:"61%"}},["&-round ".concat(t,"-content")]:{["".concat(r,", ").concat(o," > li")]:{borderRadius:h}}},["".concat(t,"-with-avatar ").concat(t,"-content")]:{["".concat(r)]:{marginBlockStart:p,["+ ".concat(o)]:{marginBlockStart:C}}},["".concat(t).concat(t,"-element")]:Object.assign(Object.assign(Object.assign(Object.assign({display:"inline-block",width:"auto"},E(e)),y(e)),b(e)),w(e)),["".concat(t).concat(t,"-block")]:{width:"100%",["".concat(i)]:{width:"100%"},["".concat(a)]:{width:"100%"}},["".concat(t).concat(t,"-active")]:{["\n ".concat(r,",\n ").concat(o," > li,\n ").concat(n,",\n ").concat(i,",\n ").concat(a,",\n ").concat(l,"\n ")]:Object.assign({},g(e))}}};var O=(0,d.I$)("Skeleton",e=>{let{componentCls:t,calc:n}=e;return[C((0,f.TS)(e,{skeletonAvatarCls:"".concat(t,"-avatar"),skeletonTitleCls:"".concat(t,"-title"),skeletonParagraphCls:"".concat(t,"-paragraph"),skeletonButtonCls:"".concat(t,"-button"),skeletonInputCls:"".concat(t,"-input"),skeletonImageCls:"".concat(t,"-image"),imageSizeBase:n(e.controlHeight).mul(1.5).equal(),borderRadius:100,skeletonLoadingBackground:"linear-gradient(90deg, ".concat(e.gradientFromColor," 25%, ").concat(e.gradientToColor," 37%, ").concat(e.gradientFromColor," 63%)"),skeletonLoadingMotionDuration:"1.4s"}))]},e=>{let{colorFillContent:t,colorFill:n}=e;return{color:t,colorGradientEnd:n,gradientFromColor:t,gradientToColor:n,titleHeight:e.controlHeight/2,blockRadius:e.borderRadiusSM,paragraphMarginTop:e.marginLG+e.marginXXS,paragraphLiHeight:e.controlHeight/2}},{deprecatedTokens:[["color","gradientFromColor"],["colorGradientEnd","gradientToColor"]]}),j=n(1119),P={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M888 792H200V168c0-4.4-3.6-8-8-8h-56c-4.4 0-8 3.6-8 8v688c0 4.4 3.6 8 8 8h752c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8zM288 604a64 64 0 10128 0 64 64 0 10-128 0zm118-224a48 48 0 1096 0 48 48 0 10-96 0zm158 228a96 96 0 10192 0 96 96 0 10-192 0zm148-314a56 56 0 10112 0 56 56 0 10-112 0z"}}]},name:"dot-chart",theme:"outlined"},N=n(55015),M=r.forwardRef(function(e,t){return r.createElement(N.Z,(0,j.Z)({},e,{ref:t,icon:P}))}),I=n(83145),R=e=>{let t=t=>{let{width:n,rows:r=2}=e;return Array.isArray(n)?n[t]:r-1===t?n:void 0},{prefixCls:n,className:o,style:a,rows:l}=e,c=(0,I.Z)(Array(l)).map((e,n)=>r.createElement("li",{key:n,style:{width:t(n)}}));return r.createElement("ul",{className:i()(n,o),style:a},c)},T=e=>{let{prefixCls:t,className:n,width:o,style:a}=e;return r.createElement("h3",{className:i()(t,n),style:Object.assign({width:o},a)})};function A(e){return e&&"object"==typeof e?e:{}}let _=e=>{let{prefixCls:t,loading:n,className:o,rootClassName:a,style:c,children:u,avatar:d=!1,title:f=!0,paragraph:p=!0,active:h,round:m}=e,{getPrefixCls:g,direction:v,skeleton:y}=r.useContext(l.E_),b=g("skeleton",t),[x,w,S]=O(b);if(n||!("loading"in e)){let e,t;let n=!!d,l=!!f,u=!!p;if(n){let t=Object.assign(Object.assign({prefixCls:"".concat(b,"-avatar")},l&&!u?{size:"large",shape:"square"}:{size:"large",shape:"circle"}),A(d));e=r.createElement("div",{className:"".concat(b,"-header")},r.createElement(s,Object.assign({},t)))}if(l||u){let e,o;if(l){let t=Object.assign(Object.assign({prefixCls:"".concat(b,"-title")},!n&&u?{width:"38%"}:n&&u?{width:"50%"}:{}),A(f));e=r.createElement(T,Object.assign({},t))}if(u){let e=Object.assign(Object.assign({prefixCls:"".concat(b,"-paragraph")},function(e,t){let n={};return e&&t||(n.width="61%"),!e&&t?n.rows=3:n.rows=2,n}(n,l)),A(p));o=r.createElement(R,Object.assign({},e))}t=r.createElement("div",{className:"".concat(b,"-content")},e,o)}let g=i()(b,{["".concat(b,"-with-avatar")]:n,["".concat(b,"-active")]:h,["".concat(b,"-rtl")]:"rtl"===v,["".concat(b,"-round")]:m},null==y?void 0:y.className,o,a,w,S);return x(r.createElement("div",{className:g,style:Object.assign(Object.assign({},null==y?void 0:y.style),c)},e,t))}return void 0!==u?u:null};_.Button=e=>{let{prefixCls:t,className:n,rootClassName:o,active:c,block:u=!1,size:d="default"}=e,{getPrefixCls:f}=r.useContext(l.E_),p=f("skeleton",t),[h,m,g]=O(p),v=(0,a.Z)(e,["prefixCls"]),y=i()(p,"".concat(p,"-element"),{["".concat(p,"-active")]:c,["".concat(p,"-block")]:u},n,o,m,g);return h(r.createElement("div",{className:y},r.createElement(s,Object.assign({prefixCls:"".concat(p,"-button"),size:d},v))))},_.Avatar=e=>{let{prefixCls:t,className:n,rootClassName:o,active:c,shape:u="circle",size:d="default"}=e,{getPrefixCls:f}=r.useContext(l.E_),p=f("skeleton",t),[h,m,g]=O(p),v=(0,a.Z)(e,["prefixCls","className"]),y=i()(p,"".concat(p,"-element"),{["".concat(p,"-active")]:c},n,o,m,g);return h(r.createElement("div",{className:y},r.createElement(s,Object.assign({prefixCls:"".concat(p,"-avatar"),shape:u,size:d},v))))},_.Input=e=>{let{prefixCls:t,className:n,rootClassName:o,active:c,block:u,size:d="default"}=e,{getPrefixCls:f}=r.useContext(l.E_),p=f("skeleton",t),[h,m,g]=O(p),v=(0,a.Z)(e,["prefixCls"]),y=i()(p,"".concat(p,"-element"),{["".concat(p,"-active")]:c,["".concat(p,"-block")]:u},n,o,m,g);return h(r.createElement("div",{className:y},r.createElement(s,Object.assign({prefixCls:"".concat(p,"-input"),size:d},v))))},_.Image=e=>{let{prefixCls:t,className:n,rootClassName:o,style:a,active:c}=e,{getPrefixCls:s}=r.useContext(l.E_),u=s("skeleton",t),[d,f,p]=O(u),h=i()(u,"".concat(u,"-element"),{["".concat(u,"-active")]:c},n,o,f,p);return d(r.createElement("div",{className:h},r.createElement("div",{className:i()("".concat(u,"-image"),n),style:a},r.createElement("svg",{viewBox:"0 0 1098 1024",xmlns:"http://www.w3.org/2000/svg",className:"".concat(u,"-image-svg")},r.createElement("path",{d:"M365.714286 329.142857q0 45.714286-32.036571 77.677714t-77.677714 32.036571-77.677714-32.036571-32.036571-77.677714 32.036571-77.677714 77.677714-32.036571 77.677714 32.036571 32.036571 77.677714zM950.857143 548.571429l0 256-804.571429 0 0-109.714286 182.857143-182.857143 91.428571 91.428571 292.571429-292.571429zM1005.714286 146.285714l-914.285714 0q-7.460571 0-12.873143 5.412571t-5.412571 12.873143l0 694.857143q0 7.460571 5.412571 12.873143t12.873143 5.412571l914.285714 0q7.460571 0 12.873143-5.412571t5.412571-12.873143l0-694.857143q0-7.460571-5.412571-12.873143t-12.873143-5.412571zM1097.142857 164.571429l0 694.857143q0 37.741714-26.843429 64.585143t-64.585143 26.843429l-914.285714 0q-37.741714 0-64.585143-26.843429t-26.843429-64.585143l0-694.857143q0-37.741714 26.843429-64.585143t64.585143-26.843429l914.285714 0q37.741714 0 64.585143 26.843429t26.843429 64.585143z",className:"".concat(u,"-image-path")})))))},_.Node=e=>{let{prefixCls:t,className:n,rootClassName:o,style:a,active:c,children:s}=e,{getPrefixCls:u}=r.useContext(l.E_),d=u("skeleton",t),[f,p,h]=O(d),m=i()(d,"".concat(d,"-element"),{["".concat(d,"-active")]:c},p,n,o,h),g=null!=s?s:r.createElement(M,null);return f(r.createElement("div",{className:m},r.createElement("div",{className:i()("".concat(d,"-image"),n),style:a},g)))};var D=n(49638),Z=n(39760),L=n(96473),z=n(11993),B=n(31686),F=n(26365),H=n(41154),q=n(6989),W=n(50506),K=n(79267),V=(0,r.createContext)(null),U=n(31474),G=n(58525),X=n(28791),$=n(53346),Y=function(e){var t=e.activeTabOffset,n=e.horizontal,o=e.rtl,i=e.indicator,a=void 0===i?{}:i,l=a.size,c=a.align,s=void 0===c?"center":c,u=(0,r.useState)(),d=(0,F.Z)(u,2),f=d[0],p=d[1],h=(0,r.useRef)(),m=r.useCallback(function(e){return"function"==typeof l?l(e):"number"==typeof l?l:e},[l]);function g(){$.Z.cancel(h.current)}return(0,r.useEffect)(function(){var e={};if(t){if(n){e.width=m(t.width);var r=o?"right":"left";"start"===s&&(e[r]=t[r]),"center"===s&&(e[r]=t[r]+t.width/2,e.transform=o?"translateX(50%)":"translateX(-50%)"),"end"===s&&(e[r]=t[r]+t.width,e.transform="translateX(-100%)")}else e.height=m(t.height),"start"===s&&(e.top=t.top),"center"===s&&(e.top=t.top+t.height/2,e.transform="translateY(-50%)"),"end"===s&&(e.top=t.top+t.height,e.transform="translateY(-100%)")}return g(),h.current=(0,$.Z)(function(){p(e)}),g},[t,n,o,s,m]),{style:f}},Q={width:0,height:0,left:0,top:0};function J(e,t){var n=r.useRef(e),o=r.useState({}),i=(0,F.Z)(o,2)[1];return[n.current,function(e){var r="function"==typeof e?e(n.current):e;r!==n.current&&t(r,n.current),n.current=r,i({})}]}var ee=n(27380);function et(e){var t=(0,r.useState)(0),n=(0,F.Z)(t,2),o=n[0],i=n[1],a=(0,r.useRef)(0),l=(0,r.useRef)();return l.current=e,(0,ee.o)(function(){var e;null===(e=l.current)||void 0===e||e.call(l)},[o]),function(){a.current===o&&(a.current+=1,i(a.current))}}var en={width:0,height:0,left:0,top:0,right:0};function er(e){var t;return e instanceof Map?(t={},e.forEach(function(e,n){t[n]=e})):t=e,JSON.stringify(t)}function eo(e){return String(e).replace(/"/g,"TABS_DQ")}function ei(e,t,n,r){return!!n&&!r&&!1!==e&&(void 0!==e||!1!==t&&null!==t)}var ea=r.forwardRef(function(e,t){var n=e.prefixCls,o=e.editable,i=e.locale,a=e.style;return o&&!1!==o.showAdd?r.createElement("button",{ref:t,type:"button",className:"".concat(n,"-nav-add"),style:a,"aria-label":(null==i?void 0:i.addAriaLabel)||"Add tab",onClick:function(e){o.onEdit("add",{event:e})}},o.addIcon||"+"):null}),el=r.forwardRef(function(e,t){var n,o=e.position,i=e.prefixCls,a=e.extra;if(!a)return null;var l={};return"object"!==(0,H.Z)(a)||r.isValidElement(a)?l.right=a:l=a,"right"===o&&(n=l.right),"left"===o&&(n=l.left),n?r.createElement("div",{className:"".concat(i,"-extra-content"),ref:t},n):null}),ec=n(71030),es=n(33082),eu=n(95814),ed=r.forwardRef(function(e,t){var n=e.prefixCls,o=e.id,a=e.tabs,l=e.locale,c=e.mobile,s=e.moreIcon,u=e.moreTransitionName,d=e.style,f=e.className,p=e.editable,h=e.tabBarGutter,m=e.rtl,g=e.removeAriaLabel,v=e.onTabClick,y=e.getPopupContainer,b=e.popupClassName,x=(0,r.useState)(!1),w=(0,F.Z)(x,2),S=w[0],k=w[1],E=(0,r.useState)(null),C=(0,F.Z)(E,2),O=C[0],j=C[1],P="".concat(o,"-more-popup"),N="".concat(n,"-dropdown"),M=null!==O?"".concat(P,"-").concat(O):null,I=null==l?void 0:l.dropdownAriaLabel,R=r.createElement(es.ZP,{onClick:function(e){v(e.key,e.domEvent),k(!1)},prefixCls:"".concat(N,"-menu"),id:P,tabIndex:-1,role:"listbox","aria-activedescendant":M,selectedKeys:[O],"aria-label":void 0!==I?I:"expanded dropdown"},a.map(function(e){var t=e.closable,n=e.disabled,i=e.closeIcon,a=e.key,l=e.label,c=ei(t,i,p,n);return r.createElement(es.sN,{key:a,id:"".concat(P,"-").concat(a),role:"option","aria-controls":o&&"".concat(o,"-panel-").concat(a),disabled:n},r.createElement("span",null,l),c&&r.createElement("button",{type:"button","aria-label":g||"remove",tabIndex:0,className:"".concat(N,"-menu-item-remove"),onClick:function(e){e.stopPropagation(),e.preventDefault(),e.stopPropagation(),p.onEdit("remove",{key:a,event:e})}},i||p.removeIcon||"\xd7"))}));function T(e){for(var t=a.filter(function(e){return!e.disabled}),n=t.findIndex(function(e){return e.key===O})||0,r=t.length,o=0;oMath.abs(l-n)?[l,c,s-t.x,u-t.y]:[n,r,i,o]},em=function(e){var t=e.current||{},n=t.offsetWidth,r=void 0===n?0:n,o=t.offsetHeight;if(e.current){var i=e.current.getBoundingClientRect(),a=i.width,l=i.height;if(1>Math.abs(a-r))return[a,l]}return[r,void 0===o?0:o]},eg=function(e,t){return e[t?0:1]},ev=r.forwardRef(function(e,t){var n,o,a,l,c,s,u,d,f,p,h,m,g,v,y,b,x,w,S,k,E,C,O,P,N,M,R,T,A,_,D,Z,L,H,q,W,K,$,ee,ei=e.className,ec=e.style,es=e.id,eu=e.animated,ed=e.activeKey,ev=e.rtl,ey=e.extra,eb=e.editable,ex=e.locale,ew=e.tabPosition,eS=e.tabBarGutter,ek=e.children,eE=e.onTabClick,eC=e.onTabScroll,eO=e.indicator,ej=r.useContext(V),eP=ej.prefixCls,eN=ej.tabs,eM=(0,r.useRef)(null),eI=(0,r.useRef)(null),eR=(0,r.useRef)(null),eT=(0,r.useRef)(null),eA=(0,r.useRef)(null),e_=(0,r.useRef)(null),eD=(0,r.useRef)(null),eZ="top"===ew||"bottom"===ew,eL=J(0,function(e,t){eZ&&eC&&eC({direction:e>t?"left":"right"})}),ez=(0,F.Z)(eL,2),eB=ez[0],eF=ez[1],eH=J(0,function(e,t){!eZ&&eC&&eC({direction:e>t?"top":"bottom"})}),eq=(0,F.Z)(eH,2),eW=eq[0],eK=eq[1],eV=(0,r.useState)([0,0]),eU=(0,F.Z)(eV,2),eG=eU[0],eX=eU[1],e$=(0,r.useState)([0,0]),eY=(0,F.Z)(e$,2),eQ=eY[0],eJ=eY[1],e0=(0,r.useState)([0,0]),e1=(0,F.Z)(e0,2),e2=e1[0],e6=e1[1],e4=(0,r.useState)([0,0]),e3=(0,F.Z)(e4,2),e5=e3[0],e8=e3[1],e7=(n=new Map,o=(0,r.useRef)([]),a=(0,r.useState)({}),l=(0,F.Z)(a,2)[1],c=(0,r.useRef)("function"==typeof n?n():n),s=et(function(){var e=c.current;o.current.forEach(function(t){e=t(e)}),o.current=[],c.current=e,l({})}),[c.current,function(e){o.current.push(e),s()}]),e9=(0,F.Z)(e7,2),te=e9[0],tt=e9[1],tn=(u=eQ[0],(0,r.useMemo)(function(){for(var e=new Map,t=te.get(null===(o=eN[0])||void 0===o?void 0:o.key)||Q,n=t.left+t.width,r=0;rtd?td:e}eZ&&ev?(tu=0,td=Math.max(0,to-tc)):(tu=Math.min(0,tc-to),td=0);var tp=(0,r.useRef)(null),th=(0,r.useState)(),tm=(0,F.Z)(th,2),tg=tm[0],tv=tm[1];function ty(){tv(Date.now())}function tb(){tp.current&&clearTimeout(tp.current)}d=function(e,t){function n(e,t){e(function(e){return tf(e+t)})}return!!tl&&(eZ?n(eF,e):n(eK,t),tb(),ty(),!0)},f=(0,r.useState)(),h=(p=(0,F.Z)(f,2))[0],m=p[1],g=(0,r.useState)(0),y=(v=(0,F.Z)(g,2))[0],b=v[1],x=(0,r.useState)(0),S=(w=(0,F.Z)(x,2))[0],k=w[1],E=(0,r.useState)(),O=(C=(0,F.Z)(E,2))[0],P=C[1],N=(0,r.useRef)(),M=(0,r.useRef)(),(R=(0,r.useRef)(null)).current={onTouchStart:function(e){var t=e.touches[0];m({x:t.screenX,y:t.screenY}),window.clearInterval(N.current)},onTouchMove:function(e){if(h){e.preventDefault();var t=e.touches[0],n=t.screenX,r=t.screenY;m({x:n,y:r});var o=n-h.x,i=r-h.y;d(o,i);var a=Date.now();b(a),k(a-y),P({x:o,y:i})}},onTouchEnd:function(){if(h&&(m(null),P(null),O)){var e=O.x/S,t=O.y/S;if(!(.1>Math.max(Math.abs(e),Math.abs(t)))){var n=e,r=t;N.current=window.setInterval(function(){if(.01>Math.abs(n)&&.01>Math.abs(r)){window.clearInterval(N.current);return}n*=.9046104802746175,r*=.9046104802746175,d(20*n,20*r)},20)}}},onWheel:function(e){var t=e.deltaX,n=e.deltaY,r=0,o=Math.abs(t),i=Math.abs(n);o===i?r="x"===M.current?t:n:o>i?(r=t,M.current="x"):(r=n,M.current="y"),d(-r,-r)&&e.preventDefault()}},r.useEffect(function(){function e(e){R.current.onTouchMove(e)}function t(e){R.current.onTouchEnd(e)}return document.addEventListener("touchmove",e,{passive:!1}),document.addEventListener("touchend",t,{passive:!1}),eT.current.addEventListener("touchstart",function(e){R.current.onTouchStart(e)},{passive:!1}),eT.current.addEventListener("wheel",function(e){R.current.onWheel(e)}),function(){document.removeEventListener("touchmove",e),document.removeEventListener("touchend",t)}},[]),(0,r.useEffect)(function(){return tb(),tg&&(tp.current=setTimeout(function(){tv(0)},100)),tb},[tg]);var tx=(T=eZ?eB:eW,L=(A=(0,B.Z)((0,B.Z)({},e),{},{tabs:eN})).tabs,H=A.tabPosition,q=A.rtl,["top","bottom"].includes(H)?(_="width",D=q?"right":"left",Z=Math.abs(T)):(_="height",D="top",Z=-T),(0,r.useMemo)(function(){if(!L.length)return[0,0];for(var e=L.length,t=e,n=0;nZ+tc){t=n-1;break}}for(var o=0,i=e-1;i>=0;i-=1)if((tn.get(L[i].key)||en)[D]=t?[0,0]:[o,t]},[tn,tc,to,ti,ta,Z,H,L.map(function(e){return e.key}).join("_"),q])),tw=(0,F.Z)(tx,2),tS=tw[0],tk=tw[1],tE=(0,G.Z)(function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:ed,t=tn.get(e)||{width:0,height:0,left:0,right:0,top:0};if(eZ){var n=eB;ev?t.righteB+tc&&(n=t.right+t.width-tc):t.left<-eB?n=-t.left:t.left+t.width>-eB+tc&&(n=-(t.left+t.width-tc)),eK(0),eF(tf(n))}else{var r=eW;t.top<-eW?r=-t.top:t.top+t.height>-eW+tc&&(r=-(t.top+t.height-tc)),eF(0),eK(tf(r))}}),tC={};"top"===ew||"bottom"===ew?tC[ev?"marginRight":"marginLeft"]=eS:tC.marginTop=eS;var tO=eN.map(function(e,t){var n=e.key;return r.createElement(ep,{id:es,prefixCls:eP,key:n,tab:e,style:0===t?void 0:tC,closable:e.closable,editable:eb,active:n===ed,renderWrapper:ek,removeAriaLabel:null==ex?void 0:ex.removeAriaLabel,onClick:function(e){eE(n,e)},onFocus:function(){tE(n),ty(),eT.current&&(ev||(eT.current.scrollLeft=0),eT.current.scrollTop=0)}})}),tj=function(){return tt(function(){var e,t=new Map,n=null===(e=eA.current)||void 0===e?void 0:e.getBoundingClientRect();return eN.forEach(function(e){var r,o=e.key,i=null===(r=eA.current)||void 0===r?void 0:r.querySelector('[data-node-key="'.concat(eo(o),'"]'));if(i){var a=eh(i,n),l=(0,F.Z)(a,4),c=l[0],s=l[1],u=l[2],d=l[3];t.set(o,{width:c,height:s,left:u,top:d})}}),t})};(0,r.useEffect)(function(){tj()},[eN.map(function(e){return e.key}).join("_")]);var tP=et(function(){var e=em(eM),t=em(eI),n=em(eR);eX([e[0]-t[0]-n[0],e[1]-t[1]-n[1]]);var r=em(eD);e6(r),e8(em(e_));var o=em(eA);eJ([o[0]-r[0],o[1]-r[1]]),tj()}),tN=eN.slice(0,tS),tM=eN.slice(tk+1),tI=[].concat((0,I.Z)(tN),(0,I.Z)(tM)),tR=tn.get(ed),tT=Y({activeTabOffset:tR,horizontal:eZ,indicator:eO,rtl:ev}).style;(0,r.useEffect)(function(){tE()},[ed,tu,td,er(tR),er(tn),eZ]),(0,r.useEffect)(function(){tP()},[ev]);var tA=!!tI.length,t_="".concat(eP,"-nav-wrap");return eZ?ev?(K=eB>0,W=eB!==td):(W=eB<0,K=eB!==tu):($=eW<0,ee=eW!==tu),r.createElement(U.Z,{onResize:tP},r.createElement("div",{ref:(0,X.x1)(t,eM),role:"tablist",className:i()("".concat(eP,"-nav"),ei),style:ec,onKeyDown:function(){ty()}},r.createElement(el,{ref:eI,position:"left",extra:ey,prefixCls:eP}),r.createElement(U.Z,{onResize:tP},r.createElement("div",{className:i()(t_,(0,z.Z)((0,z.Z)((0,z.Z)((0,z.Z)({},"".concat(t_,"-ping-left"),W),"".concat(t_,"-ping-right"),K),"".concat(t_,"-ping-top"),$),"".concat(t_,"-ping-bottom"),ee)),ref:eT},r.createElement(U.Z,{onResize:tP},r.createElement("div",{ref:eA,className:"".concat(eP,"-nav-list"),style:{transform:"translate(".concat(eB,"px, ").concat(eW,"px)"),transition:tg?"none":void 0}},tO,r.createElement(ea,{ref:eD,prefixCls:eP,locale:ex,editable:eb,style:(0,B.Z)((0,B.Z)({},0===tO.length?void 0:tC),{},{visibility:tA?"hidden":null})}),r.createElement("div",{className:i()("".concat(eP,"-ink-bar"),(0,z.Z)({},"".concat(eP,"-ink-bar-animated"),eu.inkBar)),style:tT}))))),r.createElement(ef,(0,j.Z)({},e,{removeAriaLabel:null==ex?void 0:ex.removeAriaLabel,ref:e_,prefixCls:eP,tabs:tI,className:!tA&&ts,tabMoving:!!tg})),r.createElement(el,{ref:eR,position:"right",extra:ey,prefixCls:eP})))}),ey=r.forwardRef(function(e,t){var n=e.prefixCls,o=e.className,a=e.style,l=e.id,c=e.active,s=e.tabKey,u=e.children;return r.createElement("div",{id:l&&"".concat(l,"-panel-").concat(s),role:"tabpanel",tabIndex:c?0:-1,"aria-labelledby":l&&"".concat(l,"-tab-").concat(s),"aria-hidden":!c,style:a,className:i()(n,c&&"".concat(n,"-active"),o),ref:t},u)}),eb=["renderTabBar"],ex=["label","key"],ew=function(e){var t=e.renderTabBar,n=(0,q.Z)(e,eb),o=r.useContext(V).tabs;return t?t((0,B.Z)((0,B.Z)({},n),{},{panes:o.map(function(e){var t=e.label,n=e.key,o=(0,q.Z)(e,ex);return r.createElement(ey,(0,j.Z)({tab:t,key:n,tabKey:n},o))})}),ev):r.createElement(ev,n)},eS=n(47970),ek=["key","forceRender","style","className","destroyInactiveTabPane"],eE=function(e){var t=e.id,n=e.activeKey,o=e.animated,a=e.tabPosition,l=e.destroyInactiveTabPane,c=r.useContext(V),s=c.prefixCls,u=c.tabs,d=o.tabPane,f="".concat(s,"-tabpane");return r.createElement("div",{className:i()("".concat(s,"-content-holder"))},r.createElement("div",{className:i()("".concat(s,"-content"),"".concat(s,"-content-").concat(a),(0,z.Z)({},"".concat(s,"-content-animated"),d))},u.map(function(e){var a=e.key,c=e.forceRender,s=e.style,u=e.className,p=e.destroyInactiveTabPane,h=(0,q.Z)(e,ek),m=a===n;return r.createElement(eS.ZP,(0,j.Z)({key:a,visible:m,forceRender:c,removeOnLeave:!!(l||p),leavedClassName:"".concat(f,"-hidden")},o.tabPaneMotion),function(e,n){var o=e.style,l=e.className;return r.createElement(ey,(0,j.Z)({},h,{prefixCls:f,id:t,tabKey:a,animated:d,active:m,style:(0,B.Z)((0,B.Z)({},s),o),className:i()(u,l),ref:n}))})})))};n(32559);var eC=["id","prefixCls","className","items","direction","activeKey","defaultActiveKey","editable","animated","tabPosition","tabBarGutter","tabBarStyle","tabBarExtraContent","locale","moreIcon","moreTransitionName","destroyInactiveTabPane","renderTabBar","onChange","onTabClick","onTabScroll","getPopupContainer","popupClassName","indicator"],eO=0,ej=r.forwardRef(function(e,t){var n=e.id,o=e.prefixCls,a=void 0===o?"rc-tabs":o,l=e.className,c=e.items,s=e.direction,u=e.activeKey,d=e.defaultActiveKey,f=e.editable,p=e.animated,h=e.tabPosition,m=void 0===h?"top":h,g=e.tabBarGutter,v=e.tabBarStyle,y=e.tabBarExtraContent,b=e.locale,x=e.moreIcon,w=e.moreTransitionName,S=e.destroyInactiveTabPane,k=e.renderTabBar,E=e.onChange,C=e.onTabClick,O=e.onTabScroll,P=e.getPopupContainer,N=e.popupClassName,M=e.indicator,I=(0,q.Z)(e,eC),R=r.useMemo(function(){return(c||[]).filter(function(e){return e&&"object"===(0,H.Z)(e)&&"key"in e})},[c]),T="rtl"===s,A=function(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{inkBar:!0,tabPane:!1};return(e=!1===t?{inkBar:!1,tabPane:!1}:!0===t?{inkBar:!0,tabPane:!1}:(0,B.Z)({inkBar:!0},"object"===(0,H.Z)(t)?t:{})).tabPaneMotion&&void 0===e.tabPane&&(e.tabPane=!0),!e.tabPaneMotion&&e.tabPane&&(e.tabPane=!1),e}(p),_=(0,r.useState)(!1),D=(0,F.Z)(_,2),Z=D[0],L=D[1];(0,r.useEffect)(function(){L((0,K.Z)())},[]);var U=(0,W.Z)(function(){var e;return null===(e=R[0])||void 0===e?void 0:e.key},{value:u,defaultValue:d}),G=(0,F.Z)(U,2),X=G[0],$=G[1],Y=(0,r.useState)(function(){return R.findIndex(function(e){return e.key===X})}),Q=(0,F.Z)(Y,2),J=Q[0],ee=Q[1];(0,r.useEffect)(function(){var e,t=R.findIndex(function(e){return e.key===X});-1===t&&(t=Math.max(0,Math.min(J,R.length-1)),$(null===(e=R[t])||void 0===e?void 0:e.key)),ee(t)},[R.map(function(e){return e.key}).join("_"),X,J]);var et=(0,W.Z)(null,{value:n}),en=(0,F.Z)(et,2),er=en[0],eo=en[1];(0,r.useEffect)(function(){n||(eo("rc-tabs-".concat(eO)),eO+=1)},[]);var ei={id:er,activeKey:X,animated:A,tabPosition:m,rtl:T,mobile:Z},ea=(0,B.Z)((0,B.Z)({},ei),{},{editable:f,locale:b,moreIcon:x,moreTransitionName:w,tabBarGutter:g,onTabClick:function(e,t){null==C||C(e,t);var n=e!==X;$(e),n&&(null==E||E(e))},onTabScroll:O,extra:y,style:v,panes:null,getPopupContainer:P,popupClassName:N,indicator:M});return r.createElement(V.Provider,{value:{tabs:R,prefixCls:a}},r.createElement("div",(0,j.Z)({ref:t,id:n,className:i()(a,"".concat(a,"-").concat(m),(0,z.Z)((0,z.Z)((0,z.Z)({},"".concat(a,"-mobile"),Z),"".concat(a,"-editable"),f),"".concat(a,"-rtl"),T),l)},I),r.createElement(ew,(0,j.Z)({},ea,{renderTabBar:k})),r.createElement(eE,(0,j.Z)({destroyInactiveTabPane:S},ei,{animated:A}))))}),eP=n(64024),eN=n(68710);let eM={motionAppear:!1,motionEnter:!0,motionLeave:!0};var eI=n(45287),eR=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n},eT=n(12918),eA=n(18544),e_=e=>{let{componentCls:t,motionDurationSlow:n}=e;return[{[t]:{["".concat(t,"-switch")]:{"&-appear, &-enter":{transition:"none","&-start":{opacity:0},"&-active":{opacity:1,transition:"opacity ".concat(n)}},"&-leave":{position:"absolute",transition:"none",inset:0,"&-start":{opacity:1},"&-active":{opacity:0,transition:"opacity ".concat(n)}}}}},[(0,eA.oN)(e,"slide-up"),(0,eA.oN)(e,"slide-down")]]};let eD=e=>{let{componentCls:t,tabsCardPadding:n,cardBg:r,cardGutter:o,colorBorderSecondary:i,itemSelectedColor:a}=e;return{["".concat(t,"-card")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{["".concat(t,"-tab")]:{margin:0,padding:n,background:r,border:"".concat((0,u.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(i),transition:"all ".concat(e.motionDurationSlow," ").concat(e.motionEaseInOut)},["".concat(t,"-tab-active")]:{color:a,background:e.colorBgContainer},["".concat(t,"-ink-bar")]:{visibility:"hidden"}},["&".concat(t,"-top, &").concat(t,"-bottom")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{["".concat(t,"-tab + ").concat(t,"-tab")]:{marginLeft:{_skip_check_:!0,value:(0,u.bf)(o)}}}},["&".concat(t,"-top")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{["".concat(t,"-tab")]:{borderRadius:"".concat((0,u.bf)(e.borderRadiusLG)," ").concat((0,u.bf)(e.borderRadiusLG)," 0 0")},["".concat(t,"-tab-active")]:{borderBottomColor:e.colorBgContainer}}},["&".concat(t,"-bottom")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{["".concat(t,"-tab")]:{borderRadius:"0 0 ".concat((0,u.bf)(e.borderRadiusLG)," ").concat((0,u.bf)(e.borderRadiusLG))},["".concat(t,"-tab-active")]:{borderTopColor:e.colorBgContainer}}},["&".concat(t,"-left, &").concat(t,"-right")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{["".concat(t,"-tab + ").concat(t,"-tab")]:{marginTop:(0,u.bf)(o)}}},["&".concat(t,"-left")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{["".concat(t,"-tab")]:{borderRadius:{_skip_check_:!0,value:"".concat((0,u.bf)(e.borderRadiusLG)," 0 0 ").concat((0,u.bf)(e.borderRadiusLG))}},["".concat(t,"-tab-active")]:{borderRightColor:{_skip_check_:!0,value:e.colorBgContainer}}}},["&".concat(t,"-right")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{["".concat(t,"-tab")]:{borderRadius:{_skip_check_:!0,value:"0 ".concat((0,u.bf)(e.borderRadiusLG)," ").concat((0,u.bf)(e.borderRadiusLG)," 0")}},["".concat(t,"-tab-active")]:{borderLeftColor:{_skip_check_:!0,value:e.colorBgContainer}}}}}}},eZ=e=>{let{componentCls:t,itemHoverColor:n,dropdownEdgeChildVerticalPadding:r}=e;return{["".concat(t,"-dropdown")]:Object.assign(Object.assign({},(0,eT.Wf)(e)),{position:"absolute",top:-9999,left:{_skip_check_:!0,value:-9999},zIndex:e.zIndexPopup,display:"block","&-hidden":{display:"none"},["".concat(t,"-dropdown-menu")]:{maxHeight:e.tabsDropdownHeight,margin:0,padding:"".concat((0,u.bf)(r)," 0"),overflowX:"hidden",overflowY:"auto",textAlign:{_skip_check_:!0,value:"left"},listStyleType:"none",backgroundColor:e.colorBgContainer,backgroundClip:"padding-box",borderRadius:e.borderRadiusLG,outline:"none",boxShadow:e.boxShadowSecondary,"&-item":Object.assign(Object.assign({},eT.vS),{display:"flex",alignItems:"center",minWidth:e.tabsDropdownWidth,margin:0,padding:"".concat((0,u.bf)(e.paddingXXS)," ").concat((0,u.bf)(e.paddingSM)),color:e.colorText,fontWeight:"normal",fontSize:e.fontSize,lineHeight:e.lineHeight,cursor:"pointer",transition:"all ".concat(e.motionDurationSlow),"> span":{flex:1,whiteSpace:"nowrap"},"&-remove":{flex:"none",marginLeft:{_skip_check_:!0,value:e.marginSM},color:e.colorTextDescription,fontSize:e.fontSizeSM,background:"transparent",border:0,cursor:"pointer","&:hover":{color:n}},"&:hover":{background:e.controlItemBgHover},"&-disabled":{"&, &:hover":{color:e.colorTextDisabled,background:"transparent",cursor:"not-allowed"}}})}})}},eL=e=>{let{componentCls:t,margin:n,colorBorderSecondary:r,horizontalMargin:o,verticalItemPadding:i,verticalItemMargin:a,calc:l}=e;return{["".concat(t,"-top, ").concat(t,"-bottom")]:{flexDirection:"column",["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{margin:o,"&::before":{position:"absolute",right:{_skip_check_:!0,value:0},left:{_skip_check_:!0,value:0},borderBottom:"".concat((0,u.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(r),content:"''"},["".concat(t,"-ink-bar")]:{height:e.lineWidthBold,"&-animated":{transition:"width ".concat(e.motionDurationSlow,", left ").concat(e.motionDurationSlow,",\n right ").concat(e.motionDurationSlow)}},["".concat(t,"-nav-wrap")]:{"&::before, &::after":{top:0,bottom:0,width:e.controlHeight},"&::before":{left:{_skip_check_:!0,value:0},boxShadow:e.boxShadowTabsOverflowLeft},"&::after":{right:{_skip_check_:!0,value:0},boxShadow:e.boxShadowTabsOverflowRight},["&".concat(t,"-nav-wrap-ping-left::before")]:{opacity:1},["&".concat(t,"-nav-wrap-ping-right::after")]:{opacity:1}}}},["".concat(t,"-top")]:{["> ".concat(t,"-nav,\n > div > ").concat(t,"-nav")]:{"&::before":{bottom:0},["".concat(t,"-ink-bar")]:{bottom:0}}},["".concat(t,"-bottom")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{order:1,marginTop:n,marginBottom:0,"&::before":{top:0},["".concat(t,"-ink-bar")]:{top:0}},["> ".concat(t,"-content-holder, > div > ").concat(t,"-content-holder")]:{order:0}},["".concat(t,"-left, ").concat(t,"-right")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{flexDirection:"column",minWidth:l(e.controlHeight).mul(1.25).equal(),["".concat(t,"-tab")]:{padding:i,textAlign:"center"},["".concat(t,"-tab + ").concat(t,"-tab")]:{margin:a},["".concat(t,"-nav-wrap")]:{flexDirection:"column","&::before, &::after":{right:{_skip_check_:!0,value:0},left:{_skip_check_:!0,value:0},height:e.controlHeight},"&::before":{top:0,boxShadow:e.boxShadowTabsOverflowTop},"&::after":{bottom:0,boxShadow:e.boxShadowTabsOverflowBottom},["&".concat(t,"-nav-wrap-ping-top::before")]:{opacity:1},["&".concat(t,"-nav-wrap-ping-bottom::after")]:{opacity:1}},["".concat(t,"-ink-bar")]:{width:e.lineWidthBold,"&-animated":{transition:"height ".concat(e.motionDurationSlow,", top ").concat(e.motionDurationSlow)}},["".concat(t,"-nav-list, ").concat(t,"-nav-operations")]:{flex:"1 0 auto",flexDirection:"column"}}},["".concat(t,"-left")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{["".concat(t,"-ink-bar")]:{right:{_skip_check_:!0,value:0}}},["> ".concat(t,"-content-holder, > div > ").concat(t,"-content-holder")]:{marginLeft:{_skip_check_:!0,value:(0,u.bf)(l(e.lineWidth).mul(-1).equal())},borderLeft:{_skip_check_:!0,value:"".concat((0,u.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder)},["> ".concat(t,"-content > ").concat(t,"-tabpane")]:{paddingLeft:{_skip_check_:!0,value:e.paddingLG}}}},["".concat(t,"-right")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{order:1,["".concat(t,"-ink-bar")]:{left:{_skip_check_:!0,value:0}}},["> ".concat(t,"-content-holder, > div > ").concat(t,"-content-holder")]:{order:0,marginRight:{_skip_check_:!0,value:l(e.lineWidth).mul(-1).equal()},borderRight:{_skip_check_:!0,value:"".concat((0,u.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder)},["> ".concat(t,"-content > ").concat(t,"-tabpane")]:{paddingRight:{_skip_check_:!0,value:e.paddingLG}}}}}},ez=e=>{let{componentCls:t,cardPaddingSM:n,cardPaddingLG:r,horizontalItemPaddingSM:o,horizontalItemPaddingLG:i}=e;return{[t]:{"&-small":{["> ".concat(t,"-nav")]:{["".concat(t,"-tab")]:{padding:o,fontSize:e.titleFontSizeSM}}},"&-large":{["> ".concat(t,"-nav")]:{["".concat(t,"-tab")]:{padding:i,fontSize:e.titleFontSizeLG}}}},["".concat(t,"-card")]:{["&".concat(t,"-small")]:{["> ".concat(t,"-nav")]:{["".concat(t,"-tab")]:{padding:n}},["&".concat(t,"-bottom")]:{["> ".concat(t,"-nav ").concat(t,"-tab")]:{borderRadius:"0 0 ".concat((0,u.bf)(e.borderRadius)," ").concat((0,u.bf)(e.borderRadius))}},["&".concat(t,"-top")]:{["> ".concat(t,"-nav ").concat(t,"-tab")]:{borderRadius:"".concat((0,u.bf)(e.borderRadius)," ").concat((0,u.bf)(e.borderRadius)," 0 0")}},["&".concat(t,"-right")]:{["> ".concat(t,"-nav ").concat(t,"-tab")]:{borderRadius:{_skip_check_:!0,value:"0 ".concat((0,u.bf)(e.borderRadius)," ").concat((0,u.bf)(e.borderRadius)," 0")}}},["&".concat(t,"-left")]:{["> ".concat(t,"-nav ").concat(t,"-tab")]:{borderRadius:{_skip_check_:!0,value:"".concat((0,u.bf)(e.borderRadius)," 0 0 ").concat((0,u.bf)(e.borderRadius))}}}},["&".concat(t,"-large")]:{["> ".concat(t,"-nav")]:{["".concat(t,"-tab")]:{padding:r}}}}}},eB=e=>{let{componentCls:t,itemActiveColor:n,itemHoverColor:r,iconCls:o,tabsHorizontalItemMargin:i,horizontalItemPadding:a,itemSelectedColor:l,itemColor:c}=e,s="".concat(t,"-tab");return{[s]:{position:"relative",WebkitTouchCallout:"none",WebkitTapHighlightColor:"transparent",display:"inline-flex",alignItems:"center",padding:a,fontSize:e.titleFontSize,background:"transparent",border:0,outline:"none",cursor:"pointer",color:c,"&-btn, &-remove":Object.assign({"&:focus:not(:focus-visible), &:active":{color:n}},(0,eT.Qy)(e)),"&-btn":{outline:"none",transition:"all 0.3s",["".concat(s,"-icon:not(:last-child)")]:{marginInlineEnd:e.marginSM}},"&-remove":{flex:"none",marginRight:{_skip_check_:!0,value:e.calc(e.marginXXS).mul(-1).equal()},marginLeft:{_skip_check_:!0,value:e.marginXS},color:e.colorTextDescription,fontSize:e.fontSizeSM,background:"transparent",border:"none",outline:"none",cursor:"pointer",transition:"all ".concat(e.motionDurationSlow),"&:hover":{color:e.colorTextHeading}},"&:hover":{color:r},["&".concat(s,"-active ").concat(s,"-btn")]:{color:l,textShadow:e.tabsActiveTextShadow},["&".concat(s,"-disabled")]:{color:e.colorTextDisabled,cursor:"not-allowed"},["&".concat(s,"-disabled ").concat(s,"-btn, &").concat(s,"-disabled ").concat(t,"-remove")]:{"&:focus, &:active":{color:e.colorTextDisabled}},["& ".concat(s,"-remove ").concat(o)]:{margin:0},["".concat(o,":not(:last-child)")]:{marginRight:{_skip_check_:!0,value:e.marginSM}}},["".concat(s," + ").concat(s)]:{margin:{_skip_check_:!0,value:i}}}},eF=e=>{let{componentCls:t,tabsHorizontalItemMarginRTL:n,iconCls:r,cardGutter:o,calc:i}=e;return{["".concat(t,"-rtl")]:{direction:"rtl",["".concat(t,"-nav")]:{["".concat(t,"-tab")]:{margin:{_skip_check_:!0,value:n},["".concat(t,"-tab:last-of-type")]:{marginLeft:{_skip_check_:!0,value:0}},[r]:{marginRight:{_skip_check_:!0,value:0},marginLeft:{_skip_check_:!0,value:(0,u.bf)(e.marginSM)}},["".concat(t,"-tab-remove")]:{marginRight:{_skip_check_:!0,value:(0,u.bf)(e.marginXS)},marginLeft:{_skip_check_:!0,value:(0,u.bf)(i(e.marginXXS).mul(-1).equal())},[r]:{margin:0}}}},["&".concat(t,"-left")]:{["> ".concat(t,"-nav")]:{order:1},["> ".concat(t,"-content-holder")]:{order:0}},["&".concat(t,"-right")]:{["> ".concat(t,"-nav")]:{order:0},["> ".concat(t,"-content-holder")]:{order:1}},["&".concat(t,"-card").concat(t,"-top, &").concat(t,"-card").concat(t,"-bottom")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{["".concat(t,"-tab + ").concat(t,"-tab")]:{marginRight:{_skip_check_:!0,value:o},marginLeft:{_skip_check_:!0,value:0}}}}},["".concat(t,"-dropdown-rtl")]:{direction:"rtl"},["".concat(t,"-menu-item")]:{["".concat(t,"-dropdown-rtl")]:{textAlign:{_skip_check_:!0,value:"right"}}}}},eH=e=>{let{componentCls:t,tabsCardPadding:n,cardHeight:r,cardGutter:o,itemHoverColor:i,itemActiveColor:a,colorBorderSecondary:l}=e;return{[t]:Object.assign(Object.assign(Object.assign(Object.assign({},(0,eT.Wf)(e)),{display:"flex",["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{position:"relative",display:"flex",flex:"none",alignItems:"center",["".concat(t,"-nav-wrap")]:{position:"relative",display:"flex",flex:"auto",alignSelf:"stretch",overflow:"hidden",whiteSpace:"nowrap",transform:"translate(0)","&::before, &::after":{position:"absolute",zIndex:1,opacity:0,transition:"opacity ".concat(e.motionDurationSlow),content:"''",pointerEvents:"none"}},["".concat(t,"-nav-list")]:{position:"relative",display:"flex",transition:"opacity ".concat(e.motionDurationSlow)},["".concat(t,"-nav-operations")]:{display:"flex",alignSelf:"stretch"},["".concat(t,"-nav-operations-hidden")]:{position:"absolute",visibility:"hidden",pointerEvents:"none"},["".concat(t,"-nav-more")]:{position:"relative",padding:n,background:"transparent",border:0,color:e.colorText,"&::after":{position:"absolute",right:{_skip_check_:!0,value:0},bottom:0,left:{_skip_check_:!0,value:0},height:e.calc(e.controlHeightLG).div(8).equal(),transform:"translateY(100%)",content:"''"}},["".concat(t,"-nav-add")]:Object.assign({minWidth:r,minHeight:r,marginLeft:{_skip_check_:!0,value:o},padding:"0 ".concat((0,u.bf)(e.paddingXS)),background:"transparent",border:"".concat((0,u.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(l),borderRadius:"".concat((0,u.bf)(e.borderRadiusLG)," ").concat((0,u.bf)(e.borderRadiusLG)," 0 0"),outline:"none",cursor:"pointer",color:e.colorText,transition:"all ".concat(e.motionDurationSlow," ").concat(e.motionEaseInOut),"&:hover":{color:i},"&:active, &:focus:not(:focus-visible)":{color:a}},(0,eT.Qy)(e))},["".concat(t,"-extra-content")]:{flex:"none"},["".concat(t,"-ink-bar")]:{position:"absolute",background:e.inkBarColor,pointerEvents:"none"}}),eB(e)),{["".concat(t,"-content")]:{position:"relative",width:"100%"},["".concat(t,"-content-holder")]:{flex:"auto",minWidth:0,minHeight:0},["".concat(t,"-tabpane")]:{outline:"none","&-hidden":{display:"none"}}}),["".concat(t,"-centered")]:{["> ".concat(t,"-nav, > div > ").concat(t,"-nav")]:{["".concat(t,"-nav-wrap")]:{["&:not([class*='".concat(t,"-nav-wrap-ping'])")]:{justifyContent:"center"}}}}}};var eq=(0,d.I$)("Tabs",e=>{let t=(0,f.TS)(e,{tabsCardPadding:e.cardPadding,dropdownEdgeChildVerticalPadding:e.paddingXXS,tabsActiveTextShadow:"0 0 0.25px currentcolor",tabsDropdownHeight:200,tabsDropdownWidth:120,tabsHorizontalItemMargin:"0 0 0 ".concat((0,u.bf)(e.horizontalItemGutter)),tabsHorizontalItemMarginRTL:"0 0 0 ".concat((0,u.bf)(e.horizontalItemGutter))});return[ez(t),eF(t),eL(t),eZ(t),eD(t),eH(t),e_(t)]},e=>{let t=e.controlHeightLG;return{zIndexPopup:e.zIndexPopupBase+50,cardBg:e.colorFillAlter,cardHeight:t,cardPadding:"".concat((t-Math.round(e.fontSize*e.lineHeight))/2-e.lineWidth,"px ").concat(e.padding,"px"),cardPaddingSM:"".concat(1.5*e.paddingXXS,"px ").concat(e.padding,"px"),cardPaddingLG:"".concat(e.paddingXS,"px ").concat(e.padding,"px ").concat(1.5*e.paddingXXS,"px"),titleFontSize:e.fontSize,titleFontSizeLG:e.fontSizeLG,titleFontSizeSM:e.fontSize,inkBarColor:e.colorPrimary,horizontalMargin:"0 0 ".concat(e.margin,"px 0"),horizontalItemGutter:32,horizontalItemMargin:"",horizontalItemMarginRTL:"",horizontalItemPadding:"".concat(e.paddingSM,"px 0"),horizontalItemPaddingSM:"".concat(e.paddingXS,"px 0"),horizontalItemPaddingLG:"".concat(e.padding,"px 0"),verticalItemPadding:"".concat(e.paddingXS,"px ").concat(e.paddingLG,"px"),verticalItemMargin:"".concat(e.margin,"px 0 0 0"),itemColor:e.colorText,itemSelectedColor:e.colorPrimary,itemHoverColor:e.colorPrimaryHover,itemActiveColor:e.colorPrimaryActive,cardGutter:e.marginXXS/2}}),eW=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let eK=e=>{var t,n,o,a,s,u;let d;let{type:f,className:p,rootClassName:h,size:m,onEdit:g,hideAdd:v,centered:y,addIcon:b,popupClassName:x,children:w,items:S,animated:k,style:E,indicatorSize:C,indicator:O}=e,j=eW(e,["type","className","rootClassName","size","onEdit","hideAdd","centered","addIcon","popupClassName","children","items","animated","style","indicatorSize","indicator"]),{prefixCls:P,moreIcon:N=r.createElement(Z.Z,null)}=j,{direction:M,tabs:I,getPrefixCls:R,getPopupContainer:T}=r.useContext(l.E_),A=R("tabs",P),_=(0,eP.Z)(A),[z,B,F]=eq(A,_);"editable-card"===f&&(d={onEdit:(e,t)=>{let{key:n,event:r}=t;null==g||g("add"===e?r:n,e)},removeIcon:r.createElement(D.Z,null),addIcon:b||r.createElement(L.Z,null),showAdd:!0!==v});let H=R(),q=(0,c.Z)(m),W=S||(0,eI.Z)(w).map(e=>{if(r.isValidElement(e)){let{key:t,props:n}=e,r=n||{},{tab:o}=r,i=eR(r,["tab"]);return Object.assign(Object.assign({key:String(t)},i),{label:o})}return null}).filter(e=>e),K=function(e){let t,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{inkBar:!0,tabPane:!1};return(t=!1===n?{inkBar:!1,tabPane:!1}:!0===n?{inkBar:!0,tabPane:!0}:Object.assign({inkBar:!0},"object"==typeof n?n:{})).tabPane&&(t.tabPaneMotion=Object.assign(Object.assign({},eM),{motionName:(0,eN.m)(e,"switch")})),t}(A,k),V=Object.assign(Object.assign({},null==I?void 0:I.style),E),U={align:null!==(t=null==O?void 0:O.align)&&void 0!==t?t:null===(n=null==I?void 0:I.indicator)||void 0===n?void 0:n.align,size:null!==(u=null!==(a=null!==(o=null==O?void 0:O.size)&&void 0!==o?o:C)&&void 0!==a?a:null===(s=null==I?void 0:I.indicator)||void 0===s?void 0:s.size)&&void 0!==u?u:null==I?void 0:I.indicatorSize};return z(r.createElement(ej,Object.assign({direction:M,getPopupContainer:T,moreTransitionName:"".concat(H,"-slide-up")},j,{items:W,className:i()({["".concat(A,"-").concat(q)]:q,["".concat(A,"-card")]:["card","editable-card"].includes(f),["".concat(A,"-editable-card")]:"editable-card"===f,["".concat(A,"-centered")]:y},null==I?void 0:I.className,p,h,B,F,_),popupClassName:i()(x,B,F,_),style:V,editable:d,moreIcon:N,prefixCls:A,animated:K,indicator:U})))};eK.TabPane=()=>null;var eV=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n},eU=e=>{var{prefixCls:t,className:n,hoverable:o=!0}=e,a=eV(e,["prefixCls","className","hoverable"]);let{getPrefixCls:c}=r.useContext(l.E_),s=c("card",t),u=i()("".concat(s,"-grid"),n,{["".concat(s,"-grid-hoverable")]:o});return r.createElement("div",Object.assign({},a,{className:u}))};let eG=e=>{let{antCls:t,componentCls:n,headerHeight:r,cardPaddingBase:o,tabsMarginBottom:i}=e;return Object.assign(Object.assign({display:"flex",justifyContent:"center",flexDirection:"column",minHeight:r,marginBottom:-1,padding:"0 ".concat((0,u.bf)(o)),color:e.colorTextHeading,fontWeight:e.fontWeightStrong,fontSize:e.headerFontSize,background:e.headerBg,borderBottom:"".concat((0,u.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorderSecondary),borderRadius:"".concat((0,u.bf)(e.borderRadiusLG)," ").concat((0,u.bf)(e.borderRadiusLG)," 0 0")},(0,eT.dF)()),{"&-wrapper":{width:"100%",display:"flex",alignItems:"center"},"&-title":Object.assign(Object.assign({display:"inline-block",flex:1},eT.vS),{["\n > ".concat(n,"-typography,\n > ").concat(n,"-typography-edit-content\n ")]:{insetInlineStart:0,marginTop:0,marginBottom:0}}),["".concat(t,"-tabs-top")]:{clear:"both",marginBottom:i,color:e.colorText,fontWeight:"normal",fontSize:e.fontSize,"&-bar":{borderBottom:"".concat((0,u.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorderSecondary)}}})},eX=e=>{let{cardPaddingBase:t,colorBorderSecondary:n,cardShadow:r,lineWidth:o}=e;return{width:"33.33%",padding:t,border:0,borderRadius:0,boxShadow:"\n ".concat((0,u.bf)(o)," 0 0 0 ").concat(n,",\n 0 ").concat((0,u.bf)(o)," 0 0 ").concat(n,",\n ").concat((0,u.bf)(o)," ").concat((0,u.bf)(o)," 0 0 ").concat(n,",\n ").concat((0,u.bf)(o)," 0 0 0 ").concat(n," inset,\n 0 ").concat((0,u.bf)(o)," 0 0 ").concat(n," inset;\n "),transition:"all ".concat(e.motionDurationMid),"&-hoverable:hover":{position:"relative",zIndex:1,boxShadow:r}}},e$=e=>{let{componentCls:t,iconCls:n,actionsLiMargin:r,cardActionsIconSize:o,colorBorderSecondary:i,actionsBg:a}=e;return Object.assign(Object.assign({margin:0,padding:0,listStyle:"none",background:a,borderTop:"".concat((0,u.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(i),display:"flex",borderRadius:"0 0 ".concat((0,u.bf)(e.borderRadiusLG)," ").concat((0,u.bf)(e.borderRadiusLG))},(0,eT.dF)()),{"& > li":{margin:r,color:e.colorTextDescription,textAlign:"center","> span":{position:"relative",display:"block",minWidth:e.calc(e.cardActionsIconSize).mul(2).equal(),fontSize:e.fontSize,lineHeight:e.lineHeight,cursor:"pointer","&:hover":{color:e.colorPrimary,transition:"color ".concat(e.motionDurationMid)},["a:not(".concat(t,"-btn), > ").concat(n)]:{display:"inline-block",width:"100%",color:e.colorTextDescription,lineHeight:(0,u.bf)(e.fontHeight),transition:"color ".concat(e.motionDurationMid),"&:hover":{color:e.colorPrimary}},["> ".concat(n)]:{fontSize:o,lineHeight:(0,u.bf)(e.calc(o).mul(e.lineHeight).equal())}},"&:not(:last-child)":{borderInlineEnd:"".concat((0,u.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(i)}}})},eY=e=>Object.assign(Object.assign({margin:"".concat((0,u.bf)(e.calc(e.marginXXS).mul(-1).equal())," 0"),display:"flex"},(0,eT.dF)()),{"&-avatar":{paddingInlineEnd:e.padding},"&-detail":{overflow:"hidden",flex:1,"> div:not(:last-child)":{marginBottom:e.marginXS}},"&-title":Object.assign({color:e.colorTextHeading,fontWeight:e.fontWeightStrong,fontSize:e.fontSizeLG},eT.vS),"&-description":{color:e.colorTextDescription}}),eQ=e=>{let{componentCls:t,cardPaddingBase:n,colorFillAlter:r}=e;return{["".concat(t,"-head")]:{padding:"0 ".concat((0,u.bf)(n)),background:r,"&-title":{fontSize:e.fontSize}},["".concat(t,"-body")]:{padding:"".concat((0,u.bf)(e.padding)," ").concat((0,u.bf)(n))}}},eJ=e=>{let{componentCls:t}=e;return{overflow:"hidden",["".concat(t,"-body")]:{userSelect:"none"}}},e0=e=>{let{antCls:t,componentCls:n,cardShadow:r,cardHeadPadding:o,colorBorderSecondary:i,boxShadowTertiary:a,cardPaddingBase:l,extraColor:c}=e;return{[n]:Object.assign(Object.assign({},(0,eT.Wf)(e)),{position:"relative",background:e.colorBgContainer,borderRadius:e.borderRadiusLG,["&:not(".concat(n,"-bordered)")]:{boxShadow:a},["".concat(n,"-head")]:eG(e),["".concat(n,"-extra")]:{marginInlineStart:"auto",color:c,fontWeight:"normal",fontSize:e.fontSize},["".concat(n,"-body")]:Object.assign({padding:l,borderRadius:" 0 0 ".concat((0,u.bf)(e.borderRadiusLG)," ").concat((0,u.bf)(e.borderRadiusLG))},(0,eT.dF)()),["".concat(n,"-grid")]:eX(e),["".concat(n,"-cover")]:{"> *":{display:"block",width:"100%"},["img, img + ".concat(t,"-image-mask")]:{borderRadius:"".concat((0,u.bf)(e.borderRadiusLG)," ").concat((0,u.bf)(e.borderRadiusLG)," 0 0")}},["".concat(n,"-actions")]:e$(e),["".concat(n,"-meta")]:eY(e)}),["".concat(n,"-bordered")]:{border:"".concat((0,u.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(i),["".concat(n,"-cover")]:{marginTop:-1,marginInlineStart:-1,marginInlineEnd:-1}},["".concat(n,"-hoverable")]:{cursor:"pointer",transition:"box-shadow ".concat(e.motionDurationMid,", border-color ").concat(e.motionDurationMid),"&:hover":{borderColor:"transparent",boxShadow:r}},["".concat(n,"-contain-grid")]:{borderRadius:"".concat((0,u.bf)(e.borderRadiusLG)," ").concat((0,u.bf)(e.borderRadiusLG)," 0 0 "),["".concat(n,"-body")]:{display:"flex",flexWrap:"wrap"},["&:not(".concat(n,"-loading) ").concat(n,"-body")]:{marginBlockStart:e.calc(e.lineWidth).mul(-1).equal(),marginInlineStart:e.calc(e.lineWidth).mul(-1).equal(),padding:0}},["".concat(n,"-contain-tabs")]:{["> ".concat(n,"-head")]:{minHeight:0,["".concat(n,"-head-title, ").concat(n,"-extra")]:{paddingTop:o}}},["".concat(n,"-type-inner")]:eQ(e),["".concat(n,"-loading")]:eJ(e),["".concat(n,"-rtl")]:{direction:"rtl"}}},e1=e=>{let{componentCls:t,cardPaddingSM:n,headerHeightSM:r,headerFontSizeSM:o}=e;return{["".concat(t,"-small")]:{["> ".concat(t,"-head")]:{minHeight:r,padding:"0 ".concat((0,u.bf)(n)),fontSize:o,["> ".concat(t,"-head-wrapper")]:{["> ".concat(t,"-extra")]:{fontSize:e.fontSize}}},["> ".concat(t,"-body")]:{padding:n}},["".concat(t,"-small").concat(t,"-contain-tabs")]:{["> ".concat(t,"-head")]:{["".concat(t,"-head-title, ").concat(t,"-extra")]:{paddingTop:0,display:"flex",alignItems:"center"}}}}};var e2=(0,d.I$)("Card",e=>{let t=(0,f.TS)(e,{cardShadow:e.boxShadowCard,cardHeadPadding:e.padding,cardPaddingBase:e.paddingLG,cardActionsIconSize:e.fontSize,cardPaddingSM:12});return[e0(t),e1(t)]},e=>({headerBg:"transparent",headerFontSize:e.fontSizeLG,headerFontSizeSM:e.fontSize,headerHeight:e.fontSizeLG*e.lineHeightLG+2*e.padding,headerHeightSM:e.fontSize*e.lineHeight+2*e.paddingXS,actionsBg:e.colorBgContainer,actionsLiMargin:"".concat(e.paddingSM,"px 0"),tabsMarginBottom:-e.padding-e.lineWidth,extraColor:e.colorText})),e6=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let e4=e=>{let{prefixCls:t,actions:n=[]}=e;return r.createElement("ul",{className:"".concat(t,"-actions")},n.map((e,t)=>r.createElement("li",{style:{width:"".concat(100/n.length,"%")},key:"action-".concat(t)},r.createElement("span",null,e))))},e3=r.forwardRef((e,t)=>{let n;let{prefixCls:o,className:s,rootClassName:u,style:d,extra:f,headStyle:p={},bodyStyle:h={},title:m,loading:g,bordered:v=!0,size:y,type:b,cover:x,actions:w,tabList:S,children:k,activeTabKey:E,defaultActiveTabKey:C,tabBarExtraContent:O,hoverable:j,tabProps:P={}}=e,N=e6(e,["prefixCls","className","rootClassName","style","extra","headStyle","bodyStyle","title","loading","bordered","size","type","cover","actions","tabList","children","activeTabKey","defaultActiveTabKey","tabBarExtraContent","hoverable","tabProps"]),{getPrefixCls:M,direction:I,card:R}=r.useContext(l.E_),T=r.useMemo(()=>{let e=!1;return r.Children.forEach(k,t=>{t&&t.type&&t.type===eU&&(e=!0)}),e},[k]),A=M("card",o),[D,Z,L]=e2(A),z=r.createElement(_,{loading:!0,active:!0,paragraph:{rows:4},title:!1},k),B=void 0!==E,F=Object.assign(Object.assign({},P),{[B?"activeKey":"defaultActiveKey"]:B?E:C,tabBarExtraContent:O}),H=(0,c.Z)(y),q=H&&"default"!==H?H:"large",W=S?r.createElement(eK,Object.assign({size:q},F,{className:"".concat(A,"-head-tabs"),onChange:t=>{var n;null===(n=e.onTabChange)||void 0===n||n.call(e,t)},items:S.map(e=>{var{tab:t}=e;return Object.assign({label:t},e6(e,["tab"]))})})):null;(m||f||W)&&(n=r.createElement("div",{className:"".concat(A,"-head"),style:p},r.createElement("div",{className:"".concat(A,"-head-wrapper")},m&&r.createElement("div",{className:"".concat(A,"-head-title")},m),f&&r.createElement("div",{className:"".concat(A,"-extra")},f)),W));let K=x?r.createElement("div",{className:"".concat(A,"-cover")},x):null,V=r.createElement("div",{className:"".concat(A,"-body"),style:h},g?z:k),U=w&&w.length?r.createElement(e4,{prefixCls:A,actions:w}):null,G=(0,a.Z)(N,["onTabChange"]),X=i()(A,null==R?void 0:R.className,{["".concat(A,"-loading")]:g,["".concat(A,"-bordered")]:v,["".concat(A,"-hoverable")]:j,["".concat(A,"-contain-grid")]:T,["".concat(A,"-contain-tabs")]:S&&S.length,["".concat(A,"-").concat(H)]:H,["".concat(A,"-type-").concat(b)]:!!b,["".concat(A,"-rtl")]:"rtl"===I},s,u,Z,L),$=Object.assign(Object.assign({},null==R?void 0:R.style),d);return D(r.createElement("div",Object.assign({ref:t},G,{className:X,style:$}),n,K,V,U))});var e5=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};e3.Grid=eU,e3.Meta=e=>{let{prefixCls:t,className:n,avatar:o,title:a,description:c}=e,s=e5(e,["prefixCls","className","avatar","title","description"]),{getPrefixCls:u}=r.useContext(l.E_),d=u("card",t),f=i()("".concat(d,"-meta"),n),p=o?r.createElement("div",{className:"".concat(d,"-meta-avatar")},o):null,h=a?r.createElement("div",{className:"".concat(d,"-meta-title")},a):null,m=c?r.createElement("div",{className:"".concat(d,"-meta-description")},c):null,g=h||m?r.createElement("div",{className:"".concat(d,"-meta-detail")},h,m):null;return r.createElement("div",Object.assign({},s,{className:f}),p,g)};var e8=e3},69410:function(e,t,n){"use strict";var r=n(54998);t.Z=r.Z},91086:function(e,t,n){"use strict";var r=n(2265),o=n(71744),i=n(85180);t.Z=e=>{let{componentName:t}=e,{getPrefixCls:n}=(0,r.useContext)(o.E_),a=n("empty");switch(t){case"Table":case"List":return r.createElement(i.Z,{image:i.Z.PRESENTED_IMAGE_SIMPLE});case"Select":case"TreeSelect":case"Cascader":case"Transfer":case"Mentions":return r.createElement(i.Z,{image:i.Z.PRESENTED_IMAGE_SIMPLE,className:"".concat(a,"-small")});default:return r.createElement(i.Z,null)}}},23496:function(e,t,n){"use strict";n.d(t,{Z:function(){return h}});var r=n(2265),o=n(36760),i=n.n(o),a=n(71744),l=n(352),c=n(12918),s=n(80669),u=n(3104);let d=e=>{let{componentCls:t,sizePaddingEdgeHorizontal:n,colorSplit:r,lineWidth:o,textPaddingInline:i,orientationMargin:a,verticalMarginInline:s}=e;return{[t]:Object.assign(Object.assign({},(0,c.Wf)(e)),{borderBlockStart:"".concat((0,l.bf)(o)," solid ").concat(r),"&-vertical":{position:"relative",top:"-0.06em",display:"inline-block",height:"0.9em",marginInline:s,marginBlock:0,verticalAlign:"middle",borderTop:0,borderInlineStart:"".concat((0,l.bf)(o)," solid ").concat(r)},"&-horizontal":{display:"flex",clear:"both",width:"100%",minWidth:"100%",margin:"".concat((0,l.bf)(e.dividerHorizontalGutterMargin)," 0")},["&-horizontal".concat(t,"-with-text")]:{display:"flex",alignItems:"center",margin:"".concat((0,l.bf)(e.dividerHorizontalWithTextGutterMargin)," 0"),color:e.colorTextHeading,fontWeight:500,fontSize:e.fontSizeLG,whiteSpace:"nowrap",textAlign:"center",borderBlockStart:"0 ".concat(r),"&::before, &::after":{position:"relative",width:"50%",borderBlockStart:"".concat((0,l.bf)(o)," solid transparent"),borderBlockStartColor:"inherit",borderBlockEnd:0,transform:"translateY(50%)",content:"''"}},["&-horizontal".concat(t,"-with-text-left")]:{"&::before":{width:"calc(".concat(a," * 100%)")},"&::after":{width:"calc(100% - ".concat(a," * 100%)")}},["&-horizontal".concat(t,"-with-text-right")]:{"&::before":{width:"calc(100% - ".concat(a," * 100%)")},"&::after":{width:"calc(".concat(a," * 100%)")}},["".concat(t,"-inner-text")]:{display:"inline-block",paddingBlock:0,paddingInline:i},"&-dashed":{background:"none",borderColor:r,borderStyle:"dashed",borderWidth:"".concat((0,l.bf)(o)," 0 0")},["&-horizontal".concat(t,"-with-text").concat(t,"-dashed")]:{"&::before, &::after":{borderStyle:"dashed none none"}},["&-vertical".concat(t,"-dashed")]:{borderInlineStartWidth:o,borderInlineEnd:0,borderBlockStart:0,borderBlockEnd:0},["&-plain".concat(t,"-with-text")]:{color:e.colorText,fontWeight:"normal",fontSize:e.fontSize},["&-horizontal".concat(t,"-with-text-left").concat(t,"-no-default-orientation-margin-left")]:{"&::before":{width:0},"&::after":{width:"100%"},["".concat(t,"-inner-text")]:{paddingInlineStart:n}},["&-horizontal".concat(t,"-with-text-right").concat(t,"-no-default-orientation-margin-right")]:{"&::before":{width:"100%"},"&::after":{width:0},["".concat(t,"-inner-text")]:{paddingInlineEnd:n}}})}};var f=(0,s.I$)("Divider",e=>[d((0,u.TS)(e,{dividerHorizontalWithTextGutterMargin:e.margin,dividerHorizontalGutterMargin:e.marginLG,sizePaddingEdgeHorizontal:0}))],e=>({textPaddingInline:"1em",orientationMargin:.05,verticalMarginInline:e.marginXS}),{unitless:{orientationMargin:!0}}),p=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n},h=e=>{let{getPrefixCls:t,direction:n,divider:o}=r.useContext(a.E_),{prefixCls:l,type:c="horizontal",orientation:s="center",orientationMargin:u,className:d,rootClassName:h,children:m,dashed:g,plain:v,style:y}=e,b=p(e,["prefixCls","type","orientation","orientationMargin","className","rootClassName","children","dashed","plain","style"]),x=t("divider",l),[w,S,k]=f(x),E=s.length>0?"-".concat(s):s,C=!!m,O="left"===s&&null!=u,j="right"===s&&null!=u,P=i()(x,null==o?void 0:o.className,S,k,"".concat(x,"-").concat(c),{["".concat(x,"-with-text")]:C,["".concat(x,"-with-text").concat(E)]:C,["".concat(x,"-dashed")]:!!g,["".concat(x,"-plain")]:!!v,["".concat(x,"-rtl")]:"rtl"===n,["".concat(x,"-no-default-orientation-margin-left")]:O,["".concat(x,"-no-default-orientation-margin-right")]:j},d,h),N=r.useMemo(()=>"number"==typeof u?u:/^\d+$/.test(u)?Number(u):u,[u]),M=Object.assign(Object.assign({},O&&{marginLeft:N}),j&&{marginRight:N});return w(r.createElement("div",Object.assign({className:P,style:Object.assign(Object.assign({},null==o?void 0:o.style),y)},b,{role:"separator"}),m&&"vertical"!==c&&r.createElement("span",{className:"".concat(x,"-inner-text"),style:M},m)))}},80795:function(e,t,n){"use strict";n.d(t,{Z:function(){return H}});var r=n(2265),o=n(77565),i=n(36760),a=n.n(i),l=n(71030),c=n(74126),s=n(50506),u=n(18694),d=n(62236),f=n(92736),p=n(93942),h=n(19722),m=n(13613),g=n(95140),v=n(71744),y=n(45937),b=n(88208),x=n(29961),w=n(12918),S=n(18544),k=n(29382),E=n(691),C=n(88260),O=n(80669),j=n(3104),P=e=>{let{componentCls:t,menuCls:n,colorError:r,colorTextLightSolid:o}=e,i="".concat(n,"-item");return{["".concat(t,", ").concat(t,"-menu-submenu")]:{["".concat(n," ").concat(i)]:{["&".concat(i,"-danger:not(").concat(i,"-disabled)")]:{color:r,"&:hover":{color:o,backgroundColor:r}}}}}},N=n(34442),M=n(352);let I=e=>{let{componentCls:t,menuCls:n,zIndexPopup:r,dropdownArrowDistance:o,sizePopupArrow:i,antCls:a,iconCls:l,motionDurationMid:c,paddingBlock:s,fontSize:u,dropdownEdgeChildPadding:d,colorTextDisabled:f,fontSizeIcon:p,controlPaddingHorizontal:h,colorBgElevated:m}=e;return[{[t]:Object.assign(Object.assign({},(0,w.Wf)(e)),{position:"absolute",top:-9999,left:{_skip_check_:!0,value:-9999},zIndex:r,display:"block","&::before":{position:"absolute",insetBlock:e.calc(i).div(2).sub(o).equal(),zIndex:-9999,opacity:1e-4,content:'""'},["&-trigger".concat(a,"-btn")]:{["& > ".concat(l,"-down, & > ").concat(a,"-btn-icon > ").concat(l,"-down")]:{fontSize:p}},["".concat(t,"-wrap")]:{position:"relative",["".concat(a,"-btn > ").concat(l,"-down")]:{fontSize:p},["".concat(l,"-down::before")]:{transition:"transform ".concat(c)}},["".concat(t,"-wrap-open")]:{["".concat(l,"-down::before")]:{transform:"rotate(180deg)"}},"\n &-hidden,\n &-menu-hidden,\n &-menu-submenu-hidden\n ":{display:"none"},["&".concat(a,"-slide-down-enter").concat(a,"-slide-down-enter-active").concat(t,"-placement-bottomLeft,\n &").concat(a,"-slide-down-appear").concat(a,"-slide-down-appear-active").concat(t,"-placement-bottomLeft,\n &").concat(a,"-slide-down-enter").concat(a,"-slide-down-enter-active").concat(t,"-placement-bottom,\n &").concat(a,"-slide-down-appear").concat(a,"-slide-down-appear-active").concat(t,"-placement-bottom,\n &").concat(a,"-slide-down-enter").concat(a,"-slide-down-enter-active").concat(t,"-placement-bottomRight,\n &").concat(a,"-slide-down-appear").concat(a,"-slide-down-appear-active").concat(t,"-placement-bottomRight")]:{animationName:S.fJ},["&".concat(a,"-slide-up-enter").concat(a,"-slide-up-enter-active").concat(t,"-placement-topLeft,\n &").concat(a,"-slide-up-appear").concat(a,"-slide-up-appear-active").concat(t,"-placement-topLeft,\n &").concat(a,"-slide-up-enter").concat(a,"-slide-up-enter-active").concat(t,"-placement-top,\n &").concat(a,"-slide-up-appear").concat(a,"-slide-up-appear-active").concat(t,"-placement-top,\n &").concat(a,"-slide-up-enter").concat(a,"-slide-up-enter-active").concat(t,"-placement-topRight,\n &").concat(a,"-slide-up-appear").concat(a,"-slide-up-appear-active").concat(t,"-placement-topRight")]:{animationName:S.Qt},["&".concat(a,"-slide-down-leave").concat(a,"-slide-down-leave-active").concat(t,"-placement-bottomLeft,\n &").concat(a,"-slide-down-leave").concat(a,"-slide-down-leave-active").concat(t,"-placement-bottom,\n &").concat(a,"-slide-down-leave").concat(a,"-slide-down-leave-active").concat(t,"-placement-bottomRight")]:{animationName:S.Uw},["&".concat(a,"-slide-up-leave").concat(a,"-slide-up-leave-active").concat(t,"-placement-topLeft,\n &").concat(a,"-slide-up-leave").concat(a,"-slide-up-leave-active").concat(t,"-placement-top,\n &").concat(a,"-slide-up-leave").concat(a,"-slide-up-leave-active").concat(t,"-placement-topRight")]:{animationName:S.ly}})},(0,C.ZP)(e,m,{arrowPlacement:{top:!0,bottom:!0}}),{["".concat(t," ").concat(n)]:{position:"relative",margin:0},["".concat(n,"-submenu-popup")]:{position:"absolute",zIndex:r,background:"transparent",boxShadow:"none",transformOrigin:"0 0","ul, li":{listStyle:"none",margin:0}},["".concat(t,", ").concat(t,"-menu-submenu")]:{[n]:Object.assign(Object.assign({padding:d,listStyleType:"none",backgroundColor:m,backgroundClip:"padding-box",borderRadius:e.borderRadiusLG,outline:"none",boxShadow:e.boxShadowSecondary},(0,w.Qy)(e)),{["".concat(n,"-item-group-title")]:{padding:"".concat((0,M.bf)(s)," ").concat((0,M.bf)(h)),color:e.colorTextDescription,transition:"all ".concat(c)},["".concat(n,"-item")]:{position:"relative",display:"flex",alignItems:"center"},["".concat(n,"-item-icon")]:{minWidth:u,marginInlineEnd:e.marginXS,fontSize:e.fontSizeSM},["".concat(n,"-title-content")]:{flex:"auto","> a":{color:"inherit",transition:"all ".concat(c),"&:hover":{color:"inherit"},"&::after":{position:"absolute",inset:0,content:'""'}}},["".concat(n,"-item, ").concat(n,"-submenu-title")]:Object.assign(Object.assign({clear:"both",margin:0,padding:"".concat((0,M.bf)(s)," ").concat((0,M.bf)(h)),color:e.colorText,fontWeight:"normal",fontSize:u,lineHeight:e.lineHeight,cursor:"pointer",transition:"all ".concat(c),borderRadius:e.borderRadiusSM,"&:hover, &-active":{backgroundColor:e.controlItemBgHover}},(0,w.Qy)(e)),{"&-selected":{color:e.colorPrimary,backgroundColor:e.controlItemBgActive,"&:hover, &-active":{backgroundColor:e.controlItemBgActiveHover}},"&-disabled":{color:f,cursor:"not-allowed","&:hover":{color:f,backgroundColor:m,cursor:"not-allowed"},a:{pointerEvents:"none"}},"&-divider":{height:1,margin:"".concat((0,M.bf)(e.marginXXS)," 0"),overflow:"hidden",lineHeight:0,backgroundColor:e.colorSplit},["".concat(t,"-menu-submenu-expand-icon")]:{position:"absolute",insetInlineEnd:e.paddingXS,["".concat(t,"-menu-submenu-arrow-icon")]:{marginInlineEnd:"0 !important",color:e.colorTextDescription,fontSize:p,fontStyle:"normal"}}}),["".concat(n,"-item-group-list")]:{margin:"0 ".concat((0,M.bf)(e.marginXS)),padding:0,listStyle:"none"},["".concat(n,"-submenu-title")]:{paddingInlineEnd:e.calc(h).add(e.fontSizeSM).equal()},["".concat(n,"-submenu-vertical")]:{position:"relative"},["".concat(n,"-submenu").concat(n,"-submenu-disabled ").concat(t,"-menu-submenu-title")]:{["&, ".concat(t,"-menu-submenu-arrow-icon")]:{color:f,backgroundColor:m,cursor:"not-allowed"}},["".concat(n,"-submenu-selected ").concat(t,"-menu-submenu-title")]:{color:e.colorPrimary}})}},[(0,S.oN)(e,"slide-up"),(0,S.oN)(e,"slide-down"),(0,k.Fm)(e,"move-up"),(0,k.Fm)(e,"move-down"),(0,E._y)(e,"zoom-big")]]};var R=(0,O.I$)("Dropdown",e=>{let{marginXXS:t,sizePopupArrow:n,paddingXXS:r,componentCls:o}=e,i=(0,j.TS)(e,{menuCls:"".concat(o,"-menu"),dropdownArrowDistance:e.calc(n).div(2).add(t).equal(),dropdownEdgeChildPadding:r});return[I(i),P(i)]},e=>Object.assign(Object.assign({zIndexPopup:e.zIndexPopupBase+50,paddingBlock:(e.controlHeight-e.fontSize*e.lineHeight)/2},(0,C.wZ)({contentRadius:e.borderRadiusLG,limitVerticalRadius:!0})),(0,N.w)(e))),T=n(64024);let A=e=>{let t;let{menu:n,arrow:i,prefixCls:p,children:w,trigger:S,disabled:k,dropdownRender:E,getPopupContainer:C,overlayClassName:O,rootClassName:j,overlayStyle:P,open:N,onOpenChange:M,visible:I,onVisibleChange:A,mouseEnterDelay:_=.15,mouseLeaveDelay:D=.1,autoAdjustOverflow:Z=!0,placement:L="",overlay:z,transitionName:B}=e,{getPopupContainer:F,getPrefixCls:H,direction:q,dropdown:W}=r.useContext(v.E_);(0,m.ln)("Dropdown");let K=r.useMemo(()=>{let e=H();return void 0!==B?B:L.includes("top")?"".concat(e,"-slide-down"):"".concat(e,"-slide-up")},[H,L,B]),V=r.useMemo(()=>L?L.includes("Center")?L.slice(0,L.indexOf("Center")):L:"rtl"===q?"bottomRight":"bottomLeft",[L,q]),U=H("dropdown",p),G=(0,T.Z)(U),[X,$,Y]=R(U,G),[,Q]=(0,x.ZP)(),J=r.Children.only(w),ee=(0,h.Tm)(J,{className:a()("".concat(U,"-trigger"),{["".concat(U,"-rtl")]:"rtl"===q},J.props.className),disabled:k}),et=k?[]:S;et&&et.includes("contextMenu")&&(t=!0);let[en,er]=(0,s.Z)(!1,{value:null!=N?N:I}),eo=(0,c.zX)(e=>{null==M||M(e,{source:"trigger"}),null==A||A(e),er(e)}),ei=a()(O,j,$,Y,G,null==W?void 0:W.className,{["".concat(U,"-rtl")]:"rtl"===q}),ea=(0,f.Z)({arrowPointAtCenter:"object"==typeof i&&i.pointAtCenter,autoAdjustOverflow:Z,offset:Q.marginXXS,arrowWidth:i?Q.sizePopupArrow:0,borderRadius:Q.borderRadius}),el=r.useCallback(()=>{null!=n&&n.selectable&&null!=n&&n.multiple||(null==M||M(!1,{source:"menu"}),er(!1))},[null==n?void 0:n.selectable,null==n?void 0:n.multiple]),[ec,es]=(0,d.Cn)("Dropdown",null==P?void 0:P.zIndex),eu=r.createElement(l.Z,Object.assign({alignPoint:t},(0,u.Z)(e,["rootClassName"]),{mouseEnterDelay:_,mouseLeaveDelay:D,visible:en,builtinPlacements:ea,arrow:!!i,overlayClassName:ei,prefixCls:U,getPopupContainer:C||F,transitionName:K,trigger:et,overlay:()=>{let e;return e=(null==n?void 0:n.items)?r.createElement(y.Z,Object.assign({},n)):"function"==typeof z?z():z,E&&(e=E(e)),e=r.Children.only("string"==typeof e?r.createElement("span",null,e):e),r.createElement(b.J,{prefixCls:"".concat(U,"-menu"),rootClassName:a()(Y,G),expandIcon:r.createElement("span",{className:"".concat(U,"-menu-submenu-arrow")},r.createElement(o.Z,{className:"".concat(U,"-menu-submenu-arrow-icon")})),mode:"vertical",selectable:!1,onClick:el,validator:e=>{let{mode:t}=e}},e)},placement:V,onVisibleChange:eo,overlayStyle:Object.assign(Object.assign(Object.assign({},null==W?void 0:W.style),P),{zIndex:ec})}),ee);return ec&&(eu=r.createElement(g.Z.Provider,{value:es},eu)),X(eu)},_=(0,p.Z)(A,"dropdown",e=>e,function(e){return Object.assign(Object.assign({},e),{align:{overflow:{adjustX:!1,adjustY:!1}}})});A._InternalPanelDoNotUseOrYouWillBeFired=e=>r.createElement(_,Object.assign({},e),r.createElement("span",null));var D=n(39760),Z=n(73002),L=n(93142),z=n(65658),B=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let F=e=>{let{getPopupContainer:t,getPrefixCls:n,direction:o}=r.useContext(v.E_),{prefixCls:i,type:l="default",danger:c,disabled:s,loading:u,onClick:d,htmlType:f,children:p,className:h,menu:m,arrow:g,autoFocus:y,overlay:b,trigger:x,align:w,open:S,onOpenChange:k,placement:E,getPopupContainer:C,href:O,icon:j=r.createElement(D.Z,null),title:P,buttonsRender:N=e=>e,mouseEnterDelay:M,mouseLeaveDelay:I,overlayClassName:R,overlayStyle:T,destroyPopupOnHide:_,dropdownRender:F}=e,H=B(e,["prefixCls","type","danger","disabled","loading","onClick","htmlType","children","className","menu","arrow","autoFocus","overlay","trigger","align","open","onOpenChange","placement","getPopupContainer","href","icon","title","buttonsRender","mouseEnterDelay","mouseLeaveDelay","overlayClassName","overlayStyle","destroyPopupOnHide","dropdownRender"]),q=n("dropdown",i),W={menu:m,arrow:g,autoFocus:y,align:w,disabled:s,trigger:s?[]:x,onOpenChange:k,getPopupContainer:C||t,mouseEnterDelay:M,mouseLeaveDelay:I,overlayClassName:R,overlayStyle:T,destroyPopupOnHide:_,dropdownRender:F},{compactSize:K,compactItemClassnames:V}=(0,z.ri)(q,o),U=a()("".concat(q,"-button"),V,h);"overlay"in e&&(W.overlay=b),"open"in e&&(W.open=S),"placement"in e?W.placement=E:W.placement="rtl"===o?"bottomLeft":"bottomRight";let[G,X]=N([r.createElement(Z.ZP,{type:l,danger:c,disabled:s,loading:u,onClick:d,htmlType:f,href:O,title:P},p),r.createElement(Z.ZP,{type:l,danger:c,icon:j})]);return r.createElement(L.Z.Compact,Object.assign({className:U,size:K,block:!0},H),G,r.createElement(A,Object.assign({},W),X))};F.__ANT_BUTTON=!0,A.Button=F;var H=A},85180:function(e,t,n){"use strict";n.d(t,{Z:function(){return y}});var r=n(36760),o=n.n(r),i=n(2265),a=n(71744),l=n(55274),c=n(36360),s=n(29961),u=n(80669),d=n(3104);let f=e=>{let{componentCls:t,margin:n,marginXS:r,marginXL:o,fontSize:i,lineHeight:a}=e;return{[t]:{marginInline:r,fontSize:i,lineHeight:a,textAlign:"center",["".concat(t,"-image")]:{height:e.emptyImgHeight,marginBottom:r,opacity:e.opacityImage,img:{height:"100%"},svg:{maxWidth:"100%",height:"100%",margin:"auto"}},["".concat(t,"-description")]:{color:e.colorText},["".concat(t,"-footer")]:{marginTop:n},"&-normal":{marginBlock:o,color:e.colorTextDisabled,["".concat(t,"-description")]:{color:e.colorTextDisabled},["".concat(t,"-image")]:{height:e.emptyImgHeightMD}},"&-small":{marginBlock:r,color:e.colorTextDisabled,["".concat(t,"-image")]:{height:e.emptyImgHeightSM}}}}};var p=(0,u.I$)("Empty",e=>{let{componentCls:t,controlHeightLG:n,calc:r}=e;return[f((0,d.TS)(e,{emptyImgCls:"".concat(t,"-img"),emptyImgHeight:r(n).mul(2.5).equal(),emptyImgHeightMD:n,emptyImgHeightSM:r(n).mul(.875).equal()}))]}),h=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let m=i.createElement(()=>{let[,e]=(0,s.ZP)(),t=new c.C(e.colorBgBase).toHsl().l<.5?{opacity:.65}:{};return i.createElement("svg",{style:t,width:"184",height:"152",viewBox:"0 0 184 152",xmlns:"http://www.w3.org/2000/svg"},i.createElement("g",{fill:"none",fillRule:"evenodd"},i.createElement("g",{transform:"translate(24 31.67)"},i.createElement("ellipse",{fillOpacity:".8",fill:"#F5F5F7",cx:"67.797",cy:"106.89",rx:"67.797",ry:"12.668"}),i.createElement("path",{d:"M122.034 69.674L98.109 40.229c-1.148-1.386-2.826-2.225-4.593-2.225h-51.44c-1.766 0-3.444.839-4.592 2.225L13.56 69.674v15.383h108.475V69.674z",fill:"#AEB8C2"}),i.createElement("path",{d:"M101.537 86.214L80.63 61.102c-1.001-1.207-2.507-1.867-4.048-1.867H31.724c-1.54 0-3.047.66-4.048 1.867L6.769 86.214v13.792h94.768V86.214z",fill:"url(#linearGradient-1)",transform:"translate(13.56)"}),i.createElement("path",{d:"M33.83 0h67.933a4 4 0 0 1 4 4v93.344a4 4 0 0 1-4 4H33.83a4 4 0 0 1-4-4V4a4 4 0 0 1 4-4z",fill:"#F5F5F7"}),i.createElement("path",{d:"M42.678 9.953h50.237a2 2 0 0 1 2 2V36.91a2 2 0 0 1-2 2H42.678a2 2 0 0 1-2-2V11.953a2 2 0 0 1 2-2zM42.94 49.767h49.713a2.262 2.262 0 1 1 0 4.524H42.94a2.262 2.262 0 0 1 0-4.524zM42.94 61.53h49.713a2.262 2.262 0 1 1 0 4.525H42.94a2.262 2.262 0 0 1 0-4.525zM121.813 105.032c-.775 3.071-3.497 5.36-6.735 5.36H20.515c-3.238 0-5.96-2.29-6.734-5.36a7.309 7.309 0 0 1-.222-1.79V69.675h26.318c2.907 0 5.25 2.448 5.25 5.42v.04c0 2.971 2.37 5.37 5.277 5.37h34.785c2.907 0 5.277-2.421 5.277-5.393V75.1c0-2.972 2.343-5.426 5.25-5.426h26.318v33.569c0 .617-.077 1.216-.221 1.789z",fill:"#DCE0E6"})),i.createElement("path",{d:"M149.121 33.292l-6.83 2.65a1 1 0 0 1-1.317-1.23l1.937-6.207c-2.589-2.944-4.109-6.534-4.109-10.408C138.802 8.102 148.92 0 161.402 0 173.881 0 184 8.102 184 18.097c0 9.995-10.118 18.097-22.599 18.097-4.528 0-8.744-1.066-12.28-2.902z",fill:"#DCE0E6"}),i.createElement("g",{transform:"translate(149.65 15.383)",fill:"#FFF"},i.createElement("ellipse",{cx:"20.654",cy:"3.167",rx:"2.849",ry:"2.815"}),i.createElement("path",{d:"M5.698 5.63H0L2.898.704zM9.259.704h4.985V5.63H9.259z"}))))},null),g=i.createElement(()=>{let[,e]=(0,s.ZP)(),{colorFill:t,colorFillTertiary:n,colorFillQuaternary:r,colorBgContainer:o}=e,{borderColor:a,shadowColor:l,contentColor:u}=(0,i.useMemo)(()=>({borderColor:new c.C(t).onBackground(o).toHexShortString(),shadowColor:new c.C(n).onBackground(o).toHexShortString(),contentColor:new c.C(r).onBackground(o).toHexShortString()}),[t,n,r,o]);return i.createElement("svg",{width:"64",height:"41",viewBox:"0 0 64 41",xmlns:"http://www.w3.org/2000/svg"},i.createElement("g",{transform:"translate(0 1)",fill:"none",fillRule:"evenodd"},i.createElement("ellipse",{fill:l,cx:"32",cy:"33",rx:"32",ry:"7"}),i.createElement("g",{fillRule:"nonzero",stroke:a},i.createElement("path",{d:"M55 12.76L44.854 1.258C44.367.474 43.656 0 42.907 0H21.093c-.749 0-1.46.474-1.947 1.257L9 12.761V22h46v-9.24z"}),i.createElement("path",{d:"M41.613 15.931c0-1.605.994-2.93 2.227-2.931H55v18.137C55 33.26 53.68 35 52.05 35h-40.1C10.32 35 9 33.259 9 31.137V13h11.16c1.233 0 2.227 1.323 2.227 2.928v.022c0 1.605 1.005 2.901 2.237 2.901h14.752c1.232 0 2.237-1.308 2.237-2.913v-.007z",fill:u}))))},null),v=e=>{var{className:t,rootClassName:n,prefixCls:r,image:c=m,description:s,children:u,imageStyle:d,style:f}=e,v=h(e,["className","rootClassName","prefixCls","image","description","children","imageStyle","style"]);let{getPrefixCls:y,direction:b,empty:x}=i.useContext(a.E_),w=y("empty",r),[S,k,E]=p(w),[C]=(0,l.Z)("Empty"),O=void 0!==s?s:null==C?void 0:C.description,j=null;return j="string"==typeof c?i.createElement("img",{alt:"string"==typeof O?O:"empty",src:c}):c,S(i.createElement("div",Object.assign({className:o()(k,E,w,null==x?void 0:x.className,{["".concat(w,"-normal")]:c===g,["".concat(w,"-rtl")]:"rtl"===b},t,n),style:Object.assign(Object.assign({},null==x?void 0:x.style),f)},v),i.createElement("div",{className:"".concat(w,"-image"),style:d},j),O&&i.createElement("div",{className:"".concat(w,"-description")},O),u&&i.createElement("div",{className:"".concat(w,"-footer")},u)))};v.PRESENTED_IMAGE_DEFAULT=m,v.PRESENTED_IMAGE_SIMPLE=g;var y=v},56250:function(e,t,n){"use strict";var r=n(2265),o=n(39109);let i=["outlined","borderless","filled"];t.Z=function(e){let t,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:void 0,a=(0,r.useContext)(o.pg);t=void 0!==e?e:!1===n?"borderless":null!=a?a:"outlined";let l=i.includes(t);return[t,l]}},20577:function(e,t,n){"use strict";n.d(t,{Z:function(){return em}});var r=n(2265),o=n(70464),i=n(1119),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M890.5 755.3L537.9 269.2c-12.8-17.6-39-17.6-51.7 0L133.5 755.3A8 8 0 00140 768h75c5.1 0 9.9-2.5 12.9-6.6L512 369.8l284.1 391.6c3 4.1 7.8 6.6 12.9 6.6h75c6.5 0 10.3-7.4 6.5-12.7z"}}]},name:"up",theme:"outlined"},l=n(55015),c=r.forwardRef(function(e,t){return r.createElement(l.Z,(0,i.Z)({},e,{ref:t,icon:a}))}),s=n(36760),u=n.n(s),d=n(11993),f=n(41154),p=n(26365),h=n(6989),m=n(76405),g=n(25049);function v(){return"function"==typeof BigInt}function y(e){return!e&&0!==e&&!Number.isNaN(e)||!String(e).trim()}function b(e){var t=e.trim(),n=t.startsWith("-");n&&(t=t.slice(1)),(t=t.replace(/(\.\d*[^0])0*$/,"$1").replace(/\.0*$/,"").replace(/^0+/,"")).startsWith(".")&&(t="0".concat(t));var r=t||"0",o=r.split("."),i=o[0]||"0",a=o[1]||"0";"0"===i&&"0"===a&&(n=!1);var l=n?"-":"";return{negative:n,negativeStr:l,trimStr:r,integerStr:i,decimalStr:a,fullStr:"".concat(l).concat(r)}}function x(e){var t=String(e);return!Number.isNaN(Number(t))&&t.includes("e")}function w(e){var t=String(e);if(x(e)){var n=Number(t.slice(t.indexOf("e-")+2)),r=t.match(/\.(\d+)/);return null!=r&&r[1]&&(n+=r[1].length),n}return t.includes(".")&&k(t)?t.length-t.indexOf(".")-1:0}function S(e){var t=String(e);if(x(e)){if(e>Number.MAX_SAFE_INTEGER)return String(v()?BigInt(e).toString():Number.MAX_SAFE_INTEGER);if(e=this.add(e.negate().toString()).toNumber()}},{key:"toNumber",value:function(){return this.isNaN()?NaN:Number(this.toString())}},{key:"toString",value:function(){var e=!(arguments.length>0)||void 0===arguments[0]||arguments[0];return e?this.isInvalidate()?"":b("".concat(this.getMark()).concat(this.getIntegerStr(),".").concat(this.getDecimalStr())).fullStr:this.origin}}]),e}(),C=function(){function e(t){if((0,m.Z)(this,e),(0,d.Z)(this,"origin",""),(0,d.Z)(this,"number",void 0),(0,d.Z)(this,"empty",void 0),y(t)){this.empty=!0;return}this.origin=String(t),this.number=Number(t)}return(0,g.Z)(e,[{key:"negate",value:function(){return new e(-this.toNumber())}},{key:"add",value:function(t){if(this.isInvalidate())return new e(t);var n=Number(t);if(Number.isNaN(n))return this;var r=this.number+n;if(r>Number.MAX_SAFE_INTEGER)return new e(Number.MAX_SAFE_INTEGER);if(rNumber.MAX_SAFE_INTEGER)return new e(Number.MAX_SAFE_INTEGER);if(r=this.add(e.negate().toString()).toNumber()}},{key:"toNumber",value:function(){return this.number}},{key:"toString",value:function(){var e=!(arguments.length>0)||void 0===arguments[0]||arguments[0];return e?this.isInvalidate()?"":S(this.number):this.origin}}]),e}();function O(e){return v()?new E(e):new C(e)}function j(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]&&arguments[3];if(""===e)return"";var o=b(e),i=o.negativeStr,a=o.integerStr,l=o.decimalStr,c="".concat(t).concat(l),s="".concat(i).concat(a);if(n>=0){var u=Number(l[n]);return u>=5&&!r?j(O(e).add("".concat(i,"0.").concat("0".repeat(n)).concat(10-u)).toString(),t,n,r):0===n?s:"".concat(s).concat(t).concat(l.padEnd(n,"0").slice(0,n))}return".0"===c?s:"".concat(s).concat(c)}var P=n(2027),N=n(27380),M=n(28791),I=n(32559),R=n(79267),T=function(){var e=(0,r.useState)(!1),t=(0,p.Z)(e,2),n=t[0],o=t[1];return(0,N.Z)(function(){o((0,R.Z)())},[]),n},A=n(53346);function _(e){var t=e.prefixCls,n=e.upNode,o=e.downNode,a=e.upDisabled,l=e.downDisabled,c=e.onStep,s=r.useRef(),f=r.useRef([]),p=r.useRef();p.current=c;var h=function(){clearTimeout(s.current)},m=function(e,t){e.preventDefault(),h(),p.current(t),s.current=setTimeout(function e(){p.current(t),s.current=setTimeout(e,200)},600)};if(r.useEffect(function(){return function(){h(),f.current.forEach(function(e){return A.Z.cancel(e)})}},[]),T())return null;var g="".concat(t,"-handler"),v=u()(g,"".concat(g,"-up"),(0,d.Z)({},"".concat(g,"-up-disabled"),a)),y=u()(g,"".concat(g,"-down"),(0,d.Z)({},"".concat(g,"-down-disabled"),l)),b=function(){return f.current.push((0,A.Z)(h))},x={unselectable:"on",role:"button",onMouseUp:b,onMouseLeave:b};return r.createElement("div",{className:"".concat(g,"-wrap")},r.createElement("span",(0,i.Z)({},x,{onMouseDown:function(e){m(e,!0)},"aria-label":"Increase Value","aria-disabled":a,className:v}),n||r.createElement("span",{unselectable:"on",className:"".concat(t,"-handler-up-inner")})),r.createElement("span",(0,i.Z)({},x,{onMouseDown:function(e){m(e,!1)},"aria-label":"Decrease Value","aria-disabled":l,className:y}),o||r.createElement("span",{unselectable:"on",className:"".concat(t,"-handler-down-inner")})))}function D(e){var t="number"==typeof e?S(e):b(e).fullStr;return t.includes(".")?b(t.replace(/(\d)\.(\d)/g,"$1$2.")).fullStr:e+"0"}var Z=n(55041),L=function(){var e=(0,r.useRef)(0),t=function(){A.Z.cancel(e.current)};return(0,r.useEffect)(function(){return t},[]),function(n){t(),e.current=(0,A.Z)(function(){n()})}},z=["prefixCls","className","style","min","max","step","defaultValue","value","disabled","readOnly","upHandler","downHandler","keyboard","wheel","controls","classNames","stringMode","parser","formatter","precision","decimalSeparator","onChange","onInput","onPressEnter","onStep","changeOnBlur"],B=["disabled","style","prefixCls","value","prefix","suffix","addonBefore","addonAfter","className","classNames"],F=function(e,t){return e||t.isEmpty()?t.toString():t.toNumber()},H=function(e){var t=O(e);return t.isInvalidate()?null:t},q=r.forwardRef(function(e,t){var n,o,a,l=e.prefixCls,c=void 0===l?"rc-input-number":l,s=e.className,m=e.style,g=e.min,v=e.max,y=e.step,b=void 0===y?1:y,x=e.defaultValue,E=e.value,C=e.disabled,P=e.readOnly,R=e.upHandler,T=e.downHandler,A=e.keyboard,Z=e.wheel,B=e.controls,q=(e.classNames,e.stringMode),W=e.parser,K=e.formatter,V=e.precision,U=e.decimalSeparator,G=e.onChange,X=e.onInput,$=e.onPressEnter,Y=e.onStep,Q=e.changeOnBlur,J=void 0===Q||Q,ee=(0,h.Z)(e,z),et="".concat(c,"-input"),en=r.useRef(null),er=r.useState(!1),eo=(0,p.Z)(er,2),ei=eo[0],ea=eo[1],el=r.useRef(!1),ec=r.useRef(!1),es=r.useRef(!1),eu=r.useState(function(){return O(null!=E?E:x)}),ed=(0,p.Z)(eu,2),ef=ed[0],ep=ed[1],eh=r.useCallback(function(e,t){return t?void 0:V>=0?V:Math.max(w(e),w(b))},[V,b]),em=r.useCallback(function(e){var t=String(e);if(W)return W(t);var n=t;return U&&(n=n.replace(U,".")),n.replace(/[^\w.-]+/g,"")},[W,U]),eg=r.useRef(""),ev=r.useCallback(function(e,t){if(K)return K(e,{userTyping:t,input:String(eg.current)});var n="number"==typeof e?S(e):e;if(!t){var r=eh(n,t);k(n)&&(U||r>=0)&&(n=j(n,U||".",r))}return n},[K,eh,U]),ey=r.useState(function(){var e=null!=x?x:E;return ef.isInvalidate()&&["string","number"].includes((0,f.Z)(e))?Number.isNaN(e)?"":e:ev(ef.toString(),!1)}),eb=(0,p.Z)(ey,2),ex=eb[0],ew=eb[1];function eS(e,t){ew(ev(e.isInvalidate()?e.toString(!1):e.toString(!t),t))}eg.current=ex;var ek=r.useMemo(function(){return H(v)},[v,V]),eE=r.useMemo(function(){return H(g)},[g,V]),eC=r.useMemo(function(){return!(!ek||!ef||ef.isInvalidate())&&ek.lessEquals(ef)},[ek,ef]),eO=r.useMemo(function(){return!(!eE||!ef||ef.isInvalidate())&&ef.lessEquals(eE)},[eE,ef]),ej=(n=en.current,o=(0,r.useRef)(null),[function(){try{var e=n.selectionStart,t=n.selectionEnd,r=n.value,i=r.substring(0,e),a=r.substring(t);o.current={start:e,end:t,value:r,beforeTxt:i,afterTxt:a}}catch(e){}},function(){if(n&&o.current&&ei)try{var e=n.value,t=o.current,r=t.beforeTxt,i=t.afterTxt,a=t.start,l=e.length;if(e.endsWith(i))l=e.length-o.current.afterTxt.length;else if(e.startsWith(r))l=r.length;else{var c=r[a-1],s=e.indexOf(c,a-1);-1!==s&&(l=s+1)}n.setSelectionRange(l,l)}catch(e){(0,I.ZP)(!1,"Something warning of cursor restore. Please fire issue about this: ".concat(e.message))}}]),eP=(0,p.Z)(ej,2),eN=eP[0],eM=eP[1],eI=function(e){return ek&&!e.lessEquals(ek)?ek:eE&&!eE.lessEquals(e)?eE:null},eR=function(e){return!eI(e)},eT=function(e,t){var n=e,r=eR(n)||n.isEmpty();if(n.isEmpty()||t||(n=eI(n)||n,r=!0),!P&&!C&&r){var o,i=n.toString(),a=eh(i,t);return a>=0&&!eR(n=O(j(i,".",a)))&&(n=O(j(i,".",a,!0))),n.equals(ef)||(o=n,void 0===E&&ep(o),null==G||G(n.isEmpty()?null:F(q,n)),void 0===E&&eS(n,t)),n}return ef},eA=L(),e_=function e(t){if(eN(),eg.current=t,ew(t),!ec.current){var n=O(em(t));n.isNaN()||eT(n,!0)}null==X||X(t),eA(function(){var n=t;W||(n=t.replace(/。/g,".")),n!==t&&e(n)})},eD=function(e){if((!e||!eC)&&(e||!eO)){el.current=!1;var t,n=O(es.current?D(b):b);e||(n=n.negate());var r=eT((ef||O(0)).add(n.toString()),!1);null==Y||Y(F(q,r),{offset:es.current?D(b):b,type:e?"up":"down"}),null===(t=en.current)||void 0===t||t.focus()}},eZ=function(e){var t=O(em(ex)),n=t;n=t.isNaN()?eT(ef,e):eT(t,e),void 0!==E?eS(ef,!1):n.isNaN()||eS(n,!1)};return r.useEffect(function(){var e=function(e){!1!==Z&&(eD(e.deltaY<0),e.preventDefault())},t=en.current;if(t)return t.addEventListener("wheel",e),function(){return t.removeEventListener("wheel",e)}},[eD]),(0,N.o)(function(){ef.isInvalidate()||eS(ef,!1)},[V,K]),(0,N.o)(function(){var e=O(E);ep(e);var t=O(em(ex));e.equals(t)&&el.current&&!K||eS(e,el.current)},[E]),(0,N.o)(function(){K&&eM()},[ex]),r.createElement("div",{className:u()(c,s,(a={},(0,d.Z)(a,"".concat(c,"-focused"),ei),(0,d.Z)(a,"".concat(c,"-disabled"),C),(0,d.Z)(a,"".concat(c,"-readonly"),P),(0,d.Z)(a,"".concat(c,"-not-a-number"),ef.isNaN()),(0,d.Z)(a,"".concat(c,"-out-of-range"),!ef.isInvalidate()&&!eR(ef)),a)),style:m,onFocus:function(){ea(!0)},onBlur:function(){J&&eZ(!1),ea(!1),el.current=!1},onKeyDown:function(e){var t=e.key,n=e.shiftKey;el.current=!0,es.current=n,"Enter"===t&&(ec.current||(el.current=!1),eZ(!1),null==$||$(e)),!1!==A&&!ec.current&&["Up","ArrowUp","Down","ArrowDown"].includes(t)&&(eD("Up"===t||"ArrowUp"===t),e.preventDefault())},onKeyUp:function(){el.current=!1,es.current=!1},onCompositionStart:function(){ec.current=!0},onCompositionEnd:function(){ec.current=!1,e_(en.current.value)},onBeforeInput:function(){el.current=!0}},(void 0===B||B)&&r.createElement(_,{prefixCls:c,upNode:R,downNode:T,upDisabled:eC,downDisabled:eO,onStep:eD}),r.createElement("div",{className:"".concat(et,"-wrap")},r.createElement("input",(0,i.Z)({autoComplete:"off",role:"spinbutton","aria-valuemin":g,"aria-valuemax":v,"aria-valuenow":ef.isInvalidate()?null:ef.toString(),step:b},ee,{ref:(0,M.sQ)(en,t),className:et,value:ex,onChange:function(e){e_(e.target.value)},disabled:C,readOnly:P}))))}),W=r.forwardRef(function(e,t){var n=e.disabled,o=e.style,a=e.prefixCls,l=e.value,c=e.prefix,s=e.suffix,u=e.addonBefore,d=e.addonAfter,f=e.className,p=e.classNames,m=(0,h.Z)(e,B),g=r.useRef(null);return r.createElement(P.Q,{className:f,triggerFocus:function(e){g.current&&(0,Z.nH)(g.current,e)},prefixCls:a,value:l,disabled:n,style:o,prefix:c,suffix:s,addonAfter:d,addonBefore:u,classNames:p,components:{affixWrapper:"div",groupWrapper:"div",wrapper:"div",groupAddon:"div"}},r.createElement(q,(0,i.Z)({prefixCls:a,disabled:n,ref:(0,M.sQ)(g,t),className:null==p?void 0:p.input},m)))});W.displayName="InputNumber";var K=n(12757),V=n(71744),U=n(13959),G=n(86586),X=n(64024),$=n(33759),Y=n(39109),Q=n(56250),J=n(65658),ee=n(352),et=n(31282),en=n(37433),er=n(65265),eo=n(12918),ei=n(17691),ea=n(80669),el=n(3104),ec=n(36360);let es=(e,t)=>{let{componentCls:n,borderRadiusSM:r,borderRadiusLG:o}=e,i="lg"===t?o:r;return{["&-".concat(t)]:{["".concat(n,"-handler-wrap")]:{borderStartEndRadius:i,borderEndEndRadius:i},["".concat(n,"-handler-up")]:{borderStartEndRadius:i},["".concat(n,"-handler-down")]:{borderEndEndRadius:i}}}},eu=e=>{let{componentCls:t,lineWidth:n,lineType:r,borderRadius:o,fontSizeLG:i,controlHeightLG:a,controlHeightSM:l,colorError:c,paddingInlineSM:s,paddingBlockSM:u,paddingBlockLG:d,paddingInlineLG:f,colorTextDescription:p,motionDurationMid:h,handleHoverColor:m,paddingInline:g,paddingBlock:v,handleBg:y,handleActiveBg:b,colorTextDisabled:x,borderRadiusSM:w,borderRadiusLG:S,controlWidth:k,handleOpacity:E,handleBorderColor:C,filledHandleBg:O,lineHeightLG:j,calc:P}=e;return[{[t]:Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},(0,eo.Wf)(e)),(0,et.ik)(e)),{display:"inline-block",width:k,margin:0,padding:0,borderRadius:o}),(0,er.qG)(e,{["".concat(t,"-handler-wrap")]:{background:y,["".concat(t,"-handler-down")]:{borderBlockStart:"".concat((0,ee.bf)(n)," ").concat(r," ").concat(C)}}})),(0,er.H8)(e,{["".concat(t,"-handler-wrap")]:{background:O,["".concat(t,"-handler-down")]:{borderBlockStart:"".concat((0,ee.bf)(n)," ").concat(r," ").concat(C)}},"&:focus-within":{["".concat(t,"-handler-wrap")]:{background:y}}})),(0,er.Mu)(e)),{"&-rtl":{direction:"rtl",["".concat(t,"-input")]:{direction:"rtl"}},"&-lg":{padding:0,fontSize:i,lineHeight:j,borderRadius:S,["input".concat(t,"-input")]:{height:P(a).sub(P(n).mul(2)).equal(),padding:"".concat((0,ee.bf)(d)," ").concat((0,ee.bf)(f))}},"&-sm":{padding:0,borderRadius:w,["input".concat(t,"-input")]:{height:P(l).sub(P(n).mul(2)).equal(),padding:"".concat((0,ee.bf)(u)," ").concat((0,ee.bf)(s))}},"&-out-of-range":{["".concat(t,"-input-wrap")]:{input:{color:c}}},"&-group":Object.assign(Object.assign(Object.assign({},(0,eo.Wf)(e)),(0,et.s7)(e)),{"&-wrapper":Object.assign(Object.assign(Object.assign({display:"inline-block",textAlign:"start",verticalAlign:"top",["".concat(t,"-affix-wrapper")]:{width:"100%"},"&-lg":{["".concat(t,"-group-addon")]:{borderRadius:S,fontSize:e.fontSizeLG}},"&-sm":{["".concat(t,"-group-addon")]:{borderRadius:w}}},(0,er.ir)(e)),(0,er.S5)(e)),{["&:not(".concat(t,"-compact-first-item):not(").concat(t,"-compact-last-item)").concat(t,"-compact-item")]:{["".concat(t,", ").concat(t,"-group-addon")]:{borderRadius:0}},["&:not(".concat(t,"-compact-last-item)").concat(t,"-compact-first-item")]:{["".concat(t,", ").concat(t,"-group-addon")]:{borderStartEndRadius:0,borderEndEndRadius:0}},["&:not(".concat(t,"-compact-first-item)").concat(t,"-compact-last-item")]:{["".concat(t,", ").concat(t,"-group-addon")]:{borderStartStartRadius:0,borderEndStartRadius:0}}})}),["&-disabled ".concat(t,"-input")]:{cursor:"not-allowed"},[t]:{"&-input":Object.assign(Object.assign(Object.assign(Object.assign({},(0,eo.Wf)(e)),{width:"100%",padding:"".concat((0,ee.bf)(v)," ").concat((0,ee.bf)(g)),textAlign:"start",backgroundColor:"transparent",border:0,borderRadius:o,outline:0,transition:"all ".concat(h," linear"),appearance:"textfield",fontSize:"inherit"}),(0,et.nz)(e.colorTextPlaceholder)),{'&[type="number"]::-webkit-inner-spin-button, &[type="number"]::-webkit-outer-spin-button':{margin:0,webkitAppearance:"none",appearance:"none"}})}})},{[t]:Object.assign(Object.assign(Object.assign({["&:hover ".concat(t,"-handler-wrap, &-focused ").concat(t,"-handler-wrap")]:{opacity:1},["".concat(t,"-handler-wrap")]:{position:"absolute",insetBlockStart:0,insetInlineEnd:0,width:e.handleWidth,height:"100%",borderStartStartRadius:0,borderStartEndRadius:o,borderEndEndRadius:o,borderEndStartRadius:0,opacity:E,display:"flex",flexDirection:"column",alignItems:"stretch",transition:"opacity ".concat(h," linear ").concat(h),["".concat(t,"-handler")]:{display:"flex",alignItems:"center",justifyContent:"center",flex:"auto",height:"40%",["\n ".concat(t,"-handler-up-inner,\n ").concat(t,"-handler-down-inner\n ")]:{marginInlineEnd:0,fontSize:e.handleFontSize}}},["".concat(t,"-handler")]:{height:"50%",overflow:"hidden",color:p,fontWeight:"bold",lineHeight:0,textAlign:"center",cursor:"pointer",borderInlineStart:"".concat((0,ee.bf)(n)," ").concat(r," ").concat(C),transition:"all ".concat(h," linear"),"&:active":{background:b},"&:hover":{height:"60%",["\n ".concat(t,"-handler-up-inner,\n ").concat(t,"-handler-down-inner\n ")]:{color:m}},"&-up-inner, &-down-inner":Object.assign(Object.assign({},(0,eo.Ro)()),{color:p,transition:"all ".concat(h," linear"),userSelect:"none"})},["".concat(t,"-handler-up")]:{borderStartEndRadius:o},["".concat(t,"-handler-down")]:{borderEndEndRadius:o}},es(e,"lg")),es(e,"sm")),{"&-disabled, &-readonly":{["".concat(t,"-handler-wrap")]:{display:"none"},["".concat(t,"-input")]:{color:"inherit"}},["\n ".concat(t,"-handler-up-disabled,\n ").concat(t,"-handler-down-disabled\n ")]:{cursor:"not-allowed"},["\n ".concat(t,"-handler-up-disabled:hover &-handler-up-inner,\n ").concat(t,"-handler-down-disabled:hover &-handler-down-inner\n ")]:{color:x}})}]},ed=e=>{let{componentCls:t,paddingBlock:n,paddingInline:r,inputAffixPadding:o,controlWidth:i,borderRadiusLG:a,borderRadiusSM:l,paddingInlineLG:c,paddingInlineSM:s,paddingBlockLG:u,paddingBlockSM:d}=e;return{["".concat(t,"-affix-wrapper")]:Object.assign(Object.assign({["input".concat(t,"-input")]:{padding:"".concat((0,ee.bf)(n)," 0")}},(0,et.ik)(e)),{position:"relative",display:"inline-flex",width:i,padding:0,paddingInlineStart:r,"&-lg":{borderRadius:a,paddingInlineStart:c,["input".concat(t,"-input")]:{padding:"".concat((0,ee.bf)(u)," 0")}},"&-sm":{borderRadius:l,paddingInlineStart:s,["input".concat(t,"-input")]:{padding:"".concat((0,ee.bf)(d)," 0")}},["&:not(".concat(t,"-disabled):hover")]:{zIndex:1},"&-focused, &:focus":{zIndex:1},["&-disabled > ".concat(t,"-disabled")]:{background:"transparent"},["> div".concat(t)]:{width:"100%",border:"none",outline:"none",["&".concat(t,"-focused")]:{boxShadow:"none !important"}},"&::before":{display:"inline-block",width:0,visibility:"hidden",content:'"\\a0"'},["".concat(t,"-handler-wrap")]:{zIndex:2},[t]:{color:"inherit","&-prefix, &-suffix":{display:"flex",flex:"none",alignItems:"center",pointerEvents:"none"},"&-prefix":{marginInlineEnd:o},"&-suffix":{position:"absolute",insetBlockStart:0,insetInlineEnd:0,zIndex:1,height:"100%",marginInlineEnd:r,marginInlineStart:o}}})}};var ef=(0,ea.I$)("InputNumber",e=>{let t=(0,el.TS)(e,(0,en.e)(e));return[eu(t),ed(t),(0,ei.c)(t)]},e=>{var t;let n=null!==(t=e.handleVisible)&&void 0!==t?t:"auto";return Object.assign(Object.assign({},(0,en.T)(e)),{controlWidth:90,handleWidth:e.controlHeightSM-2*e.lineWidth,handleFontSize:e.fontSize/2,handleVisible:n,handleActiveBg:e.colorFillAlter,handleBg:e.colorBgContainer,filledHandleBg:new ec.C(e.colorFillSecondary).onBackground(e.colorBgContainer).toHexString(),handleHoverColor:e.colorPrimary,handleBorderColor:e.colorBorder,handleOpacity:!0===n?1:0})},{unitless:{handleOpacity:!0}}),ep=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let eh=r.forwardRef((e,t)=>{let{getPrefixCls:n,direction:i}=r.useContext(V.E_),a=r.useRef(null);r.useImperativeHandle(t,()=>a.current);let{className:l,rootClassName:s,size:d,disabled:f,prefixCls:p,addonBefore:h,addonAfter:m,prefix:g,bordered:v,readOnly:y,status:b,controls:x,variant:w}=e,S=ep(e,["className","rootClassName","size","disabled","prefixCls","addonBefore","addonAfter","prefix","bordered","readOnly","status","controls","variant"]),k=n("input-number",p),E=(0,X.Z)(k),[C,O,j]=ef(k,E),{compactSize:P,compactItemClassnames:N}=(0,J.ri)(k,i),M=r.createElement(c,{className:"".concat(k,"-handler-up-inner")}),I=r.createElement(o.Z,{className:"".concat(k,"-handler-down-inner")});"object"==typeof x&&(M=void 0===x.upIcon?M:r.createElement("span",{className:"".concat(k,"-handler-up-inner")},x.upIcon),I=void 0===x.downIcon?I:r.createElement("span",{className:"".concat(k,"-handler-down-inner")},x.downIcon));let{hasFeedback:R,status:T,isFormItemInput:A,feedbackIcon:_}=r.useContext(Y.aM),D=(0,K.F)(T,b),Z=(0,$.Z)(e=>{var t;return null!==(t=null!=d?d:P)&&void 0!==t?t:e}),L=r.useContext(G.Z),[z,B]=(0,Q.Z)(w,v),F=R&&r.createElement(r.Fragment,null,_),H=u()({["".concat(k,"-lg")]:"large"===Z,["".concat(k,"-sm")]:"small"===Z,["".concat(k,"-rtl")]:"rtl"===i,["".concat(k,"-in-form-item")]:A},O),q="".concat(k,"-group");return C(r.createElement(W,Object.assign({ref:a,disabled:null!=f?f:L,className:u()(j,E,l,s,N),upHandler:M,downHandler:I,prefixCls:k,readOnly:y,controls:"boolean"==typeof x?x:void 0,prefix:g,suffix:F,addonAfter:m&&r.createElement(J.BR,null,r.createElement(Y.Ux,{override:!0,status:!0},m)),addonBefore:h&&r.createElement(J.BR,null,r.createElement(Y.Ux,{override:!0,status:!0},h)),classNames:{input:H,variant:u()({["".concat(k,"-").concat(z)]:B},(0,K.Z)(k,D,R)),affixWrapper:u()({["".concat(k,"-affix-wrapper-sm")]:"small"===Z,["".concat(k,"-affix-wrapper-lg")]:"large"===Z,["".concat(k,"-affix-wrapper-rtl")]:"rtl"===i},O),wrapper:u()({["".concat(q,"-rtl")]:"rtl"===i},O),groupWrapper:u()({["".concat(k,"-group-wrapper-sm")]:"small"===Z,["".concat(k,"-group-wrapper-lg")]:"large"===Z,["".concat(k,"-group-wrapper-rtl")]:"rtl"===i,["".concat(k,"-group-wrapper-").concat(z)]:B},(0,K.Z)("".concat(k,"-group-wrapper"),D,R),O)}},S)))});eh._InternalPanelDoNotUseOrYouWillBeFired=e=>r.createElement(U.ZP,{theme:{components:{InputNumber:{handleVisible:!0}}}},r.createElement(eh,Object.assign({},e)));var em=eh},65863:function(e,t,n){"use strict";n.d(t,{Z:function(){return S},n:function(){return w}});var r=n(2265),o=n(36760),i=n.n(o),a=n(2027),l=n(28791),c=n(12757),s=n(71744),u=n(86586),d=n(33759),f=n(39109),p=n(65658),h=n(39164),m=n(31282),g=n(64024),v=n(56250),y=n(39725),b=e=>{let t;return"object"==typeof e&&(null==e?void 0:e.clearIcon)?t=e:e&&(t={clearIcon:r.createElement(y.Z,null)}),t},x=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};function w(e,t){if(!e)return;e.focus(t);let{cursor:n}=t||{};if(n){let t=e.value.length;switch(n){case"start":e.setSelectionRange(0,0);break;case"end":e.setSelectionRange(t,t);break;default:e.setSelectionRange(0,t)}}}var S=(0,r.forwardRef)((e,t)=>{var n;let{prefixCls:o,bordered:y=!0,status:w,size:S,disabled:k,onBlur:E,onFocus:C,suffix:O,allowClear:j,addonAfter:P,addonBefore:N,className:M,style:I,styles:R,rootClassName:T,onChange:A,classNames:_,variant:D}=e,Z=x(e,["prefixCls","bordered","status","size","disabled","onBlur","onFocus","suffix","allowClear","addonAfter","addonBefore","className","style","styles","rootClassName","onChange","classNames","variant"]),{getPrefixCls:L,direction:z,input:B}=r.useContext(s.E_),F=L("input",o),H=(0,r.useRef)(null),q=(0,g.Z)(F),[W,K,V]=(0,m.ZP)(F,q),{compactSize:U,compactItemClassnames:G}=(0,p.ri)(F,z),X=(0,d.Z)(e=>{var t;return null!==(t=null!=S?S:U)&&void 0!==t?t:e}),$=r.useContext(u.Z),{status:Y,hasFeedback:Q,feedbackIcon:J}=(0,r.useContext)(f.aM),ee=(0,c.F)(Y,w),et=!!(e.prefix||e.suffix||e.allowClear||e.showCount)||!!Q;(0,r.useRef)(et);let en=(0,h.Z)(H,!0),er=(Q||O)&&r.createElement(r.Fragment,null,O,Q&&J),eo=b(j),[ei,ea]=(0,v.Z)(D,y);return W(r.createElement(a.Z,Object.assign({ref:(0,l.sQ)(t,H),prefixCls:F,autoComplete:null==B?void 0:B.autoComplete},Z,{disabled:null!=k?k:$,onBlur:e=>{en(),null==E||E(e)},onFocus:e=>{en(),null==C||C(e)},style:Object.assign(Object.assign({},null==B?void 0:B.style),I),styles:Object.assign(Object.assign({},null==B?void 0:B.styles),R),suffix:er,allowClear:eo,className:i()(M,T,V,q,G,null==B?void 0:B.className),onChange:e=>{en(),null==A||A(e)},addonAfter:P&&r.createElement(p.BR,null,r.createElement(f.Ux,{override:!0,status:!0},P)),addonBefore:N&&r.createElement(p.BR,null,r.createElement(f.Ux,{override:!0,status:!0},N)),classNames:Object.assign(Object.assign(Object.assign({},_),null==B?void 0:B.classNames),{input:i()({["".concat(F,"-sm")]:"small"===X,["".concat(F,"-lg")]:"large"===X,["".concat(F,"-rtl")]:"rtl"===z},null==_?void 0:_.input,null===(n=null==B?void 0:B.classNames)||void 0===n?void 0:n.input,K),variant:i()({["".concat(F,"-").concat(ei)]:ea},(0,c.Z)(F,ee)),affixWrapper:i()({["".concat(F,"-affix-wrapper-sm")]:"small"===X,["".concat(F,"-affix-wrapper-lg")]:"large"===X,["".concat(F,"-affix-wrapper-rtl")]:"rtl"===z},K),wrapper:i()({["".concat(F,"-group-rtl")]:"rtl"===z},K),groupWrapper:i()({["".concat(F,"-group-wrapper-sm")]:"small"===X,["".concat(F,"-group-wrapper-lg")]:"large"===X,["".concat(F,"-group-wrapper-rtl")]:"rtl"===z,["".concat(F,"-group-wrapper-").concat(ei)]:ea},(0,c.Z)("".concat(F,"-group-wrapper"),ee,Q),K)})})))})},90464:function(e,t,n){"use strict";n.d(t,{Z:function(){return L}});var r,o=n(2265),i=n(39725),a=n(36760),l=n.n(a),c=n(1119),s=n(11993),u=n(31686),d=n(83145),f=n(26365),p=n(6989),h=n(2027),m=n(96032),g=n(55041),v=n(50506),y=n(41154),b=n(31474),x=n(27380),w=n(53346),S=["letter-spacing","line-height","padding-top","padding-bottom","font-family","font-weight","font-size","font-variant","text-rendering","text-transform","width","text-indent","padding-left","padding-right","border-width","box-sizing","word-break","white-space"],k={},E=["prefixCls","onPressEnter","defaultValue","value","autoSize","onResize","className","style","disabled","onChange","onInternalAutoSize"],C=o.forwardRef(function(e,t){var n=e.prefixCls,i=(e.onPressEnter,e.defaultValue),a=e.value,d=e.autoSize,h=e.onResize,m=e.className,g=e.style,C=e.disabled,O=e.onChange,j=(e.onInternalAutoSize,(0,p.Z)(e,E)),P=(0,v.Z)(i,{value:a,postState:function(e){return null!=e?e:""}}),N=(0,f.Z)(P,2),M=N[0],I=N[1],R=o.useRef();o.useImperativeHandle(t,function(){return{textArea:R.current}});var T=o.useMemo(function(){return d&&"object"===(0,y.Z)(d)?[d.minRows,d.maxRows]:[]},[d]),A=(0,f.Z)(T,2),_=A[0],D=A[1],Z=!!d,L=function(){try{if(document.activeElement===R.current){var e=R.current,t=e.selectionStart,n=e.selectionEnd,r=e.scrollTop;R.current.setSelectionRange(t,n),R.current.scrollTop=r}}catch(e){}},z=o.useState(2),B=(0,f.Z)(z,2),F=B[0],H=B[1],q=o.useState(),W=(0,f.Z)(q,2),K=W[0],V=W[1],U=function(){H(0)};(0,x.Z)(function(){Z&&U()},[a,_,D,Z]),(0,x.Z)(function(){if(0===F)H(1);else if(1===F){var e=function(e){var t,n=arguments.length>1&&void 0!==arguments[1]&&arguments[1],o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:null,i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:null;r||((r=document.createElement("textarea")).setAttribute("tab-index","-1"),r.setAttribute("aria-hidden","true"),document.body.appendChild(r)),e.getAttribute("wrap")?r.setAttribute("wrap",e.getAttribute("wrap")):r.removeAttribute("wrap");var a=function(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=e.getAttribute("id")||e.getAttribute("data-reactid")||e.getAttribute("name");if(t&&k[n])return k[n];var r=window.getComputedStyle(e),o=r.getPropertyValue("box-sizing")||r.getPropertyValue("-moz-box-sizing")||r.getPropertyValue("-webkit-box-sizing"),i=parseFloat(r.getPropertyValue("padding-bottom"))+parseFloat(r.getPropertyValue("padding-top")),a=parseFloat(r.getPropertyValue("border-bottom-width"))+parseFloat(r.getPropertyValue("border-top-width")),l={sizingStyle:S.map(function(e){return"".concat(e,":").concat(r.getPropertyValue(e))}).join(";"),paddingSize:i,borderSize:a,boxSizing:o};return t&&n&&(k[n]=l),l}(e,n),l=a.paddingSize,c=a.borderSize,s=a.boxSizing,u=a.sizingStyle;r.setAttribute("style","".concat(u,";").concat("\n min-height:0 !important;\n max-height:none !important;\n height:0 !important;\n visibility:hidden !important;\n overflow:hidden !important;\n position:absolute !important;\n z-index:-1000 !important;\n top:0 !important;\n right:0 !important;\n pointer-events: none !important;\n")),r.value=e.value||e.placeholder||"";var d=void 0,f=void 0,p=r.scrollHeight;if("border-box"===s?p+=c:"content-box"===s&&(p-=l),null!==o||null!==i){r.value=" ";var h=r.scrollHeight-l;null!==o&&(d=h*o,"border-box"===s&&(d=d+l+c),p=Math.max(d,p)),null!==i&&(f=h*i,"border-box"===s&&(f=f+l+c),t=p>f?"":"hidden",p=Math.min(f,p))}var m={height:p,overflowY:t,resize:"none"};return d&&(m.minHeight=d),f&&(m.maxHeight=f),m}(R.current,!1,_,D);H(2),V(e)}else L()},[F]);var G=o.useRef(),X=function(){w.Z.cancel(G.current)};o.useEffect(function(){return X},[]);var $=(0,u.Z)((0,u.Z)({},g),Z?K:null);return(0===F||1===F)&&($.overflowY="hidden",$.overflowX="hidden"),o.createElement(b.Z,{onResize:function(e){2===F&&(null==h||h(e),d&&(X(),G.current=(0,w.Z)(function(){U()})))},disabled:!(d||h)},o.createElement("textarea",(0,c.Z)({},j,{ref:R,style:$,className:l()(n,m,(0,s.Z)({},"".concat(n,"-disabled"),C)),disabled:C,value:M,onChange:function(e){I(e.target.value),null==O||O(e)}})))}),O=["defaultValue","value","onFocus","onBlur","onChange","allowClear","maxLength","onCompositionStart","onCompositionEnd","suffix","prefixCls","showCount","count","className","style","disabled","hidden","classNames","styles","onResize"],j=o.forwardRef(function(e,t){var n,r,i,a=e.defaultValue,y=e.value,b=e.onFocus,x=e.onBlur,w=e.onChange,S=e.allowClear,k=e.maxLength,E=e.onCompositionStart,j=e.onCompositionEnd,P=e.suffix,N=e.prefixCls,M=void 0===N?"rc-textarea":N,I=e.showCount,R=e.count,T=e.className,A=e.style,_=e.disabled,D=e.hidden,Z=e.classNames,L=e.styles,z=e.onResize,B=(0,p.Z)(e,O),F=(0,v.Z)(a,{value:y,defaultValue:a}),H=(0,f.Z)(F,2),q=H[0],W=H[1],K=null==q?"":String(q),V=o.useState(!1),U=(0,f.Z)(V,2),G=U[0],X=U[1],$=o.useRef(!1),Y=o.useState(null),Q=(0,f.Z)(Y,2),J=Q[0],ee=Q[1],et=(0,o.useRef)(null),en=function(){var e;return null===(e=et.current)||void 0===e?void 0:e.textArea},er=function(){en().focus()};(0,o.useImperativeHandle)(t,function(){return{resizableTextArea:et.current,focus:er,blur:function(){en().blur()}}}),(0,o.useEffect)(function(){X(function(e){return!_&&e})},[_]);var eo=o.useState(null),ei=(0,f.Z)(eo,2),ea=ei[0],el=ei[1];o.useEffect(function(){if(ea){var e;(e=en()).setSelectionRange.apply(e,(0,d.Z)(ea))}},[ea]);var ec=(0,m.Z)(R,I),es=null!==(n=ec.max)&&void 0!==n?n:k,eu=Number(es)>0,ed=ec.strategy(K),ef=!!es&&ed>es,ep=function(e,t){var n=t;!$.current&&ec.exceedFormatter&&ec.max&&ec.strategy(t)>ec.max&&(n=ec.exceedFormatter(t,{max:ec.max}),t!==n&&el([en().selectionStart||0,en().selectionEnd||0])),W(n),(0,g.rJ)(e.currentTarget,e,w,n)},eh=P;ec.show&&(i=ec.showFormatter?ec.showFormatter({value:K,count:ed,maxLength:es}):"".concat(ed).concat(eu?" / ".concat(es):""),eh=o.createElement(o.Fragment,null,eh,o.createElement("span",{className:l()("".concat(M,"-data-count"),null==Z?void 0:Z.count),style:null==L?void 0:L.count},i)));var em=!B.autoSize&&!I&&!S;return o.createElement(h.Q,{value:K,allowClear:S,handleReset:function(e){W(""),er(),(0,g.rJ)(en(),e,w)},suffix:eh,prefixCls:M,classNames:(0,u.Z)((0,u.Z)({},Z),{},{affixWrapper:l()(null==Z?void 0:Z.affixWrapper,(r={},(0,s.Z)(r,"".concat(M,"-show-count"),I),(0,s.Z)(r,"".concat(M,"-textarea-allow-clear"),S),r))}),disabled:_,focused:G,className:l()(T,ef&&"".concat(M,"-out-of-range")),style:(0,u.Z)((0,u.Z)({},A),J&&!em?{height:"auto"}:{}),dataAttrs:{affixWrapper:{"data-count":"string"==typeof i?i:void 0}},hidden:D},o.createElement(C,(0,c.Z)({},B,{maxLength:k,onKeyDown:function(e){var t=B.onPressEnter,n=B.onKeyDown;"Enter"===e.key&&t&&t(e),null==n||n(e)},onChange:function(e){ep(e,e.target.value)},onFocus:function(e){X(!0),null==b||b(e)},onBlur:function(e){X(!1),null==x||x(e)},onCompositionStart:function(e){$.current=!0,null==E||E(e)},onCompositionEnd:function(e){$.current=!1,ep(e,e.currentTarget.value),null==j||j(e)},className:l()(null==Z?void 0:Z.textarea),style:(0,u.Z)((0,u.Z)({},null==L?void 0:L.textarea),{},{resize:null==A?void 0:A.resize}),disabled:_,prefixCls:M,onResize:function(e){var t;null==z||z(e),null!==(t=en())&&void 0!==t&&t.style.height&&ee(!0)},ref:et})))}),P=n(12757),N=n(71744),M=n(86586),I=n(33759),R=n(39109),T=n(65863),A=n(31282),_=n(64024),D=n(56250),Z=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n},L=(0,o.forwardRef)((e,t)=>{var n;let r;let{prefixCls:a,bordered:c=!0,size:s,disabled:u,status:d,allowClear:f,classNames:p,rootClassName:h,className:m,variant:g}=e,v=Z(e,["prefixCls","bordered","size","disabled","status","allowClear","classNames","rootClassName","className","variant"]),{getPrefixCls:y,direction:b}=o.useContext(N.E_),x=(0,I.Z)(s),w=o.useContext(M.Z),{status:S,hasFeedback:k,feedbackIcon:E}=o.useContext(R.aM),C=(0,P.F)(S,d),O=o.useRef(null);o.useImperativeHandle(t,()=>{var e;return{resizableTextArea:null===(e=O.current)||void 0===e?void 0:e.resizableTextArea,focus:e=>{var t,n;(0,T.n)(null===(n=null===(t=O.current)||void 0===t?void 0:t.resizableTextArea)||void 0===n?void 0:n.textArea,e)},blur:()=>{var e;return null===(e=O.current)||void 0===e?void 0:e.blur()}}});let L=y("input",a);"object"==typeof f&&(null==f?void 0:f.clearIcon)?r=f:f&&(r={clearIcon:o.createElement(i.Z,null)});let z=(0,_.Z)(L),[B,F,H]=(0,A.ZP)(L,z),[q,W]=(0,D.Z)(g,c);return B(o.createElement(j,Object.assign({},v,{disabled:null!=u?u:w,allowClear:r,className:l()(H,z,m,h),classNames:Object.assign(Object.assign({},p),{textarea:l()({["".concat(L,"-sm")]:"small"===x,["".concat(L,"-lg")]:"large"===x},F,null==p?void 0:p.textarea),variant:l()({["".concat(L,"-").concat(q)]:W},(0,P.Z)(L,C)),affixWrapper:l()("".concat(L,"-textarea-affix-wrapper"),{["".concat(L,"-affix-wrapper-rtl")]:"rtl"===b,["".concat(L,"-affix-wrapper-sm")]:"small"===x,["".concat(L,"-affix-wrapper-lg")]:"large"===x,["".concat(L,"-textarea-show-count")]:e.showCount||(null===(n=e.count)||void 0===n?void 0:n.show)},F)}),prefixCls:L,suffix:k&&o.createElement("span",{className:"".concat(L,"-textarea-suffix")},E),ref:O})))})},39164:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(2265);function o(e,t){let n=(0,r.useRef)([]),o=()=>{n.current.push(setTimeout(()=>{var t,n,r,o;(null===(t=e.current)||void 0===t?void 0:t.input)&&(null===(n=e.current)||void 0===n?void 0:n.input.getAttribute("type"))==="password"&&(null===(r=e.current)||void 0===r?void 0:r.input.hasAttribute("value"))&&(null===(o=e.current)||void 0===o||o.input.removeAttribute("value"))}))};return(0,r.useEffect)(()=>(t&&o(),()=>n.current.forEach(e=>{e&&clearTimeout(e)})),[]),o}},56632:function(e,t,n){"use strict";n.d(t,{Z:function(){return I}});var r=n(2265),o=n(36760),i=n.n(o),a=n(71744),l=n(39109),c=n(31282),s=n(65863),u=n(1119),d={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M942.2 486.2Q889.47 375.11 816.7 305l-50.88 50.88C807.31 395.53 843.45 447.4 874.7 512 791.5 684.2 673.4 766 512 766q-72.67 0-133.87-22.38L323 798.75Q408 838 512 838q288.3 0 430.2-300.3a60.29 60.29 0 000-51.5zm-63.57-320.64L836 122.88a8 8 0 00-11.32 0L715.31 232.2Q624.86 186 512 186q-288.3 0-430.2 300.3a60.3 60.3 0 000 51.5q56.69 119.4 136.5 191.41L112.48 835a8 8 0 000 11.31L155.17 889a8 8 0 0011.31 0l712.15-712.12a8 8 0 000-11.32zM149.3 512C232.6 339.8 350.7 258 512 258c54.54 0 104.13 9.36 149.12 28.39l-70.3 70.3a176 176 0 00-238.13 238.13l-83.42 83.42C223.1 637.49 183.3 582.28 149.3 512zm246.7 0a112.11 112.11 0 01146.2-106.69L401.31 546.2A112 112 0 01396 512z"}},{tag:"path",attrs:{d:"M508 624c-3.46 0-6.87-.16-10.25-.47l-52.82 52.82a176.09 176.09 0 00227.42-227.42l-52.82 52.82c.31 3.38.47 6.79.47 10.25a111.94 111.94 0 01-112 112z"}}]},name:"eye-invisible",theme:"outlined"},f=n(55015),p=r.forwardRef(function(e,t){return r.createElement(f.Z,(0,u.Z)({},e,{ref:t,icon:d}))}),h=n(6520),m=n(18694),g=n(28791),v=n(39164),y=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let b=e=>e?r.createElement(h.Z,null):r.createElement(p,null),x={click:"onClick",hover:"onMouseOver"},w=r.forwardRef((e,t)=>{let{visibilityToggle:n=!0}=e,o="object"==typeof n&&void 0!==n.visible,[l,c]=(0,r.useState)(()=>!!o&&n.visible),u=(0,r.useRef)(null);r.useEffect(()=>{o&&c(n.visible)},[o,n]);let d=(0,v.Z)(u),f=()=>{let{disabled:t}=e;t||(l&&d(),c(e=>{var t;let r=!e;return"object"==typeof n&&(null===(t=n.onVisibleChange)||void 0===t||t.call(n,r)),r}))},{className:p,prefixCls:h,inputPrefixCls:w,size:S}=e,k=y(e,["className","prefixCls","inputPrefixCls","size"]),{getPrefixCls:E}=r.useContext(a.E_),C=E("input",w),O=E("input-password",h),j=n&&(t=>{let{action:n="click",iconRender:o=b}=e,i=x[n]||"",a=o(l);return r.cloneElement(r.isValidElement(a)?a:r.createElement("span",null,a),{[i]:f,className:"".concat(t,"-icon"),key:"passwordIcon",onMouseDown:e=>{e.preventDefault()},onMouseUp:e=>{e.preventDefault()}})})(O),P=i()(O,p,{["".concat(O,"-").concat(S)]:!!S}),N=Object.assign(Object.assign({},(0,m.Z)(k,["suffix","iconRender","visibilityToggle"])),{type:l?"text":"password",className:P,prefixCls:C,suffix:j});return S&&(N.size=S),r.createElement(s.Z,Object.assign({ref:(0,g.sQ)(t,u)},N))});var S=n(29436),k=n(19722),E=n(73002),C=n(33759),O=n(65658),j=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let P=r.forwardRef((e,t)=>{let n;let{prefixCls:o,inputPrefixCls:l,className:c,size:u,suffix:d,enterButton:f=!1,addonAfter:p,loading:h,disabled:m,onSearch:v,onChange:y,onCompositionStart:b,onCompositionEnd:x}=e,w=j(e,["prefixCls","inputPrefixCls","className","size","suffix","enterButton","addonAfter","loading","disabled","onSearch","onChange","onCompositionStart","onCompositionEnd"]),{getPrefixCls:P,direction:N}=r.useContext(a.E_),M=r.useRef(!1),I=P("input-search",o),R=P("input",l),{compactSize:T}=(0,O.ri)(I,N),A=(0,C.Z)(e=>{var t;return null!==(t=null!=u?u:T)&&void 0!==t?t:e}),_=r.useRef(null),D=e=>{var t;document.activeElement===(null===(t=_.current)||void 0===t?void 0:t.input)&&e.preventDefault()},Z=e=>{var t,n;v&&v(null===(n=null===(t=_.current)||void 0===t?void 0:t.input)||void 0===n?void 0:n.value,e,{source:"input"})},L="boolean"==typeof f?r.createElement(S.Z,null):null,z="".concat(I,"-button"),B=f||{},F=B.type&&!0===B.type.__ANT_BUTTON;n=F||"button"===B.type?(0,k.Tm)(B,Object.assign({onMouseDown:D,onClick:e=>{var t,n;null===(n=null===(t=null==B?void 0:B.props)||void 0===t?void 0:t.onClick)||void 0===n||n.call(t,e),Z(e)},key:"enterButton"},F?{className:z,size:A}:{})):r.createElement(E.ZP,{className:z,type:f?"primary":void 0,size:A,disabled:m,key:"enterButton",onMouseDown:D,onClick:Z,loading:h,icon:L},f),p&&(n=[n,(0,k.Tm)(p,{key:"addonAfter"})]);let H=i()(I,{["".concat(I,"-rtl")]:"rtl"===N,["".concat(I,"-").concat(A)]:!!A,["".concat(I,"-with-button")]:!!f},c);return r.createElement(s.Z,Object.assign({ref:(0,g.sQ)(_,t),onPressEnter:e=>{M.current||h||Z(e)}},w,{size:A,onCompositionStart:e=>{M.current=!0,null==b||b(e)},onCompositionEnd:e=>{M.current=!1,null==x||x(e)},prefixCls:R,addonAfter:n,suffix:d,onChange:e=>{e&&e.target&&"click"===e.type&&v&&v(e.target.value,e,{source:"clear"}),y&&y(e)},className:H,disabled:m}))});var N=n(90464);let M=s.Z;M.Group=e=>{let{getPrefixCls:t,direction:n}=(0,r.useContext)(a.E_),{prefixCls:o,className:s}=e,u=t("input-group",o),d=t("input"),[f,p]=(0,c.ZP)(d),h=i()(u,{["".concat(u,"-lg")]:"large"===e.size,["".concat(u,"-sm")]:"small"===e.size,["".concat(u,"-compact")]:e.compact,["".concat(u,"-rtl")]:"rtl"===n},p,s),m=(0,r.useContext)(l.aM),g=(0,r.useMemo)(()=>Object.assign(Object.assign({},m),{isFormItemInput:!1}),[m]);return f(r.createElement("span",{className:h,style:e.style,onMouseEnter:e.onMouseEnter,onMouseLeave:e.onMouseLeave,onFocus:e.onFocus,onBlur:e.onBlur},r.createElement(l.aM.Provider,{value:g},e.children)))},M.Search=P,M.TextArea=N.Z,M.Password=w;var I=M},31282:function(e,t,n){"use strict";n.d(t,{ik:function(){return p},nz:function(){return u},s7:function(){return h},x0:function(){return f}});var r=n(352),o=n(12918),i=n(17691),a=n(80669),l=n(3104),c=n(37433),s=n(65265);let u=e=>({"&::-moz-placeholder":{opacity:1},"&::placeholder":{color:e,userSelect:"none"},"&:placeholder-shown":{textOverflow:"ellipsis"}}),d=e=>{let{paddingBlockLG:t,lineHeightLG:n,borderRadiusLG:o,paddingInlineLG:i}=e;return{padding:"".concat((0,r.bf)(t)," ").concat((0,r.bf)(i)),fontSize:e.inputFontSizeLG,lineHeight:n,borderRadius:o}},f=e=>({padding:"".concat((0,r.bf)(e.paddingBlockSM)," ").concat((0,r.bf)(e.paddingInlineSM)),fontSize:e.inputFontSizeSM,borderRadius:e.borderRadiusSM}),p=e=>Object.assign(Object.assign({position:"relative",display:"inline-block",width:"100%",minWidth:0,padding:"".concat((0,r.bf)(e.paddingBlock)," ").concat((0,r.bf)(e.paddingInline)),color:e.colorText,fontSize:e.inputFontSize,lineHeight:e.lineHeight,borderRadius:e.borderRadius,transition:"all ".concat(e.motionDurationMid)},u(e.colorTextPlaceholder)),{"textarea&":{maxWidth:"100%",height:"auto",minHeight:e.controlHeight,lineHeight:e.lineHeight,verticalAlign:"bottom",transition:"all ".concat(e.motionDurationSlow,", height 0s"),resize:"vertical"},"&-lg":Object.assign({},d(e)),"&-sm":Object.assign({},f(e)),"&-rtl":{direction:"rtl"},"&-textarea-rtl":{direction:"rtl"}}),h=e=>{let{componentCls:t,antCls:n}=e;return{position:"relative",display:"table",width:"100%",borderCollapse:"separate",borderSpacing:0,"&[class*='col-']":{paddingInlineEnd:e.paddingXS,"&:last-child":{paddingInlineEnd:0}},["&-lg ".concat(t,", &-lg > ").concat(t,"-group-addon")]:Object.assign({},d(e)),["&-sm ".concat(t,", &-sm > ").concat(t,"-group-addon")]:Object.assign({},f(e)),["&-lg ".concat(n,"-select-single ").concat(n,"-select-selector")]:{height:e.controlHeightLG},["&-sm ".concat(n,"-select-single ").concat(n,"-select-selector")]:{height:e.controlHeightSM},["> ".concat(t)]:{display:"table-cell","&:not(:first-child):not(:last-child)":{borderRadius:0}},["".concat(t,"-group")]:{"&-addon, &-wrap":{display:"table-cell",width:1,whiteSpace:"nowrap",verticalAlign:"middle","&:not(:first-child):not(:last-child)":{borderRadius:0}},"&-wrap > *":{display:"block !important"},"&-addon":{position:"relative",padding:"0 ".concat((0,r.bf)(e.paddingInline)),color:e.colorText,fontWeight:"normal",fontSize:e.inputFontSize,textAlign:"center",borderRadius:e.borderRadius,transition:"all ".concat(e.motionDurationSlow),lineHeight:1,["".concat(n,"-select")]:{margin:"".concat((0,r.bf)(e.calc(e.paddingBlock).add(1).mul(-1).equal())," ").concat((0,r.bf)(e.calc(e.paddingInline).mul(-1).equal())),["&".concat(n,"-select-single:not(").concat(n,"-select-customize-input):not(").concat(n,"-pagination-size-changer)")]:{["".concat(n,"-select-selector")]:{backgroundColor:"inherit",border:"".concat((0,r.bf)(e.lineWidth)," ").concat(e.lineType," transparent"),boxShadow:"none"}},"&-open, &-focused":{["".concat(n,"-select-selector")]:{color:e.colorPrimary}}},["".concat(n,"-cascader-picker")]:{margin:"-9px ".concat((0,r.bf)(e.calc(e.paddingInline).mul(-1).equal())),backgroundColor:"transparent",["".concat(n,"-cascader-input")]:{textAlign:"start",border:0,boxShadow:"none"}}}},["".concat(t)]:{width:"100%",marginBottom:0,textAlign:"inherit","&:focus":{zIndex:1,borderInlineEndWidth:1},"&:hover":{zIndex:1,borderInlineEndWidth:1,["".concat(t,"-search-with-button &")]:{zIndex:0}}},["> ".concat(t,":first-child, ").concat(t,"-group-addon:first-child")]:{borderStartEndRadius:0,borderEndEndRadius:0,["".concat(n,"-select ").concat(n,"-select-selector")]:{borderStartEndRadius:0,borderEndEndRadius:0}},["> ".concat(t,"-affix-wrapper")]:{["&:not(:first-child) ".concat(t)]:{borderStartStartRadius:0,borderEndStartRadius:0},["&:not(:last-child) ".concat(t)]:{borderStartEndRadius:0,borderEndEndRadius:0}},["> ".concat(t,":last-child, ").concat(t,"-group-addon:last-child")]:{borderStartStartRadius:0,borderEndStartRadius:0,["".concat(n,"-select ").concat(n,"-select-selector")]:{borderStartStartRadius:0,borderEndStartRadius:0}},["".concat(t,"-affix-wrapper")]:{"&:not(:last-child)":{borderStartEndRadius:0,borderEndEndRadius:0,["".concat(t,"-search &")]:{borderStartStartRadius:e.borderRadius,borderEndStartRadius:e.borderRadius}},["&:not(:first-child), ".concat(t,"-search &:not(:first-child)")]:{borderStartStartRadius:0,borderEndStartRadius:0}},["&".concat(t,"-group-compact")]:Object.assign(Object.assign({display:"block"},(0,o.dF)()),{["".concat(t,"-group-addon, ").concat(t,"-group-wrap, > ").concat(t)]:{"&:not(:first-child):not(:last-child)":{borderInlineEndWidth:e.lineWidth,"&:hover":{zIndex:1},"&:focus":{zIndex:1}}},"& > *":{display:"inline-block",float:"none",verticalAlign:"top",borderRadius:0},["\n & > ".concat(t,"-affix-wrapper,\n & > ").concat(t,"-number-affix-wrapper,\n & > ").concat(n,"-picker-range\n ")]:{display:"inline-flex"},"& > *:not(:last-child)":{marginInlineEnd:e.calc(e.lineWidth).mul(-1).equal(),borderInlineEndWidth:e.lineWidth},["".concat(t)]:{float:"none"},["& > ".concat(n,"-select > ").concat(n,"-select-selector,\n & > ").concat(n,"-select-auto-complete ").concat(t,",\n & > ").concat(n,"-cascader-picker ").concat(t,",\n & > ").concat(t,"-group-wrapper ").concat(t)]:{borderInlineEndWidth:e.lineWidth,borderRadius:0,"&:hover":{zIndex:1},"&:focus":{zIndex:1}},["& > ".concat(n,"-select-focused")]:{zIndex:1},["& > ".concat(n,"-select > ").concat(n,"-select-arrow")]:{zIndex:1},["& > *:first-child,\n & > ".concat(n,"-select:first-child > ").concat(n,"-select-selector,\n & > ").concat(n,"-select-auto-complete:first-child ").concat(t,",\n & > ").concat(n,"-cascader-picker:first-child ").concat(t)]:{borderStartStartRadius:e.borderRadius,borderEndStartRadius:e.borderRadius},["& > *:last-child,\n & > ".concat(n,"-select:last-child > ").concat(n,"-select-selector,\n & > ").concat(n,"-cascader-picker:last-child ").concat(t,",\n & > ").concat(n,"-cascader-picker-focused:last-child ").concat(t)]:{borderInlineEndWidth:e.lineWidth,borderStartEndRadius:e.borderRadius,borderEndEndRadius:e.borderRadius},["& > ".concat(n,"-select-auto-complete ").concat(t)]:{verticalAlign:"top"},["".concat(t,"-group-wrapper + ").concat(t,"-group-wrapper")]:{marginInlineStart:e.calc(e.lineWidth).mul(-1).equal(),["".concat(t,"-affix-wrapper")]:{borderRadius:0}},["".concat(t,"-group-wrapper:not(:last-child)")]:{["&".concat(t,"-search > ").concat(t,"-group")]:{["& > ".concat(t,"-group-addon > ").concat(t,"-search-button")]:{borderRadius:0},["& > ".concat(t)]:{borderStartStartRadius:e.borderRadius,borderStartEndRadius:0,borderEndEndRadius:0,borderEndStartRadius:e.borderRadius}}}})}},m=e=>{let{componentCls:t,controlHeightSM:n,lineWidth:r,calc:i}=e,a=i(n).sub(i(r).mul(2)).sub(16).div(2).equal();return{[t]:Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},(0,o.Wf)(e)),p(e)),(0,s.qG)(e)),(0,s.H8)(e)),(0,s.Mu)(e)),{'&[type="color"]':{height:e.controlHeight,["&".concat(t,"-lg")]:{height:e.controlHeightLG},["&".concat(t,"-sm")]:{height:n,paddingTop:a,paddingBottom:a}},'&[type="search"]::-webkit-search-cancel-button, &[type="search"]::-webkit-search-decoration':{"-webkit-appearance":"none"}})}},g=e=>{let{componentCls:t}=e;return{["".concat(t,"-clear-icon")]:{margin:0,color:e.colorTextQuaternary,fontSize:e.fontSizeIcon,verticalAlign:-1,cursor:"pointer",transition:"color ".concat(e.motionDurationSlow),"&:hover":{color:e.colorTextTertiary},"&:active":{color:e.colorText},"&-hidden":{visibility:"hidden"},"&-has-suffix":{margin:"0 ".concat((0,r.bf)(e.inputAffixPadding))}}}},v=e=>{let{componentCls:t,inputAffixPadding:n,colorTextDescription:r,motionDurationSlow:o,colorIcon:i,colorIconHover:a,iconCls:l}=e;return{["".concat(t,"-affix-wrapper")]:Object.assign(Object.assign(Object.assign(Object.assign({},p(e)),{display:"inline-flex",["&:not(".concat(t,"-disabled):hover")]:{zIndex:1,["".concat(t,"-search-with-button &")]:{zIndex:0}},"&-focused, &:focus":{zIndex:1},["> input".concat(t)]:{padding:0,fontSize:"inherit",border:"none",borderRadius:0,outline:"none",background:"transparent",color:"inherit","&::-ms-reveal":{display:"none"},"&:focus":{boxShadow:"none !important"}},"&::before":{display:"inline-block",width:0,visibility:"hidden",content:'"\\a0"'},["".concat(t)]:{"&-prefix, &-suffix":{display:"flex",flex:"none",alignItems:"center","> *:not(:last-child)":{marginInlineEnd:e.paddingXS}},"&-show-count-suffix":{color:r},"&-show-count-has-suffix":{marginInlineEnd:e.paddingXXS},"&-prefix":{marginInlineEnd:n},"&-suffix":{marginInlineStart:n}}}),g(e)),{["".concat(l).concat(t,"-password-icon")]:{color:i,cursor:"pointer",transition:"all ".concat(o),"&:hover":{color:a}}})}},y=e=>{let{componentCls:t,borderRadiusLG:n,borderRadiusSM:r}=e;return{["".concat(t,"-group")]:Object.assign(Object.assign(Object.assign({},(0,o.Wf)(e)),h(e)),{"&-rtl":{direction:"rtl"},"&-wrapper":Object.assign(Object.assign(Object.assign({display:"inline-block",width:"100%",textAlign:"start",verticalAlign:"top","&-rtl":{direction:"rtl"},"&-lg":{["".concat(t,"-group-addon")]:{borderRadius:n,fontSize:e.inputFontSizeLG}},"&-sm":{["".concat(t,"-group-addon")]:{borderRadius:r}}},(0,s.ir)(e)),(0,s.S5)(e)),{["&:not(".concat(t,"-compact-first-item):not(").concat(t,"-compact-last-item)").concat(t,"-compact-item")]:{["".concat(t,", ").concat(t,"-group-addon")]:{borderRadius:0}},["&:not(".concat(t,"-compact-last-item)").concat(t,"-compact-first-item")]:{["".concat(t,", ").concat(t,"-group-addon")]:{borderStartEndRadius:0,borderEndEndRadius:0}},["&:not(".concat(t,"-compact-first-item)").concat(t,"-compact-last-item")]:{["".concat(t,", ").concat(t,"-group-addon")]:{borderStartStartRadius:0,borderEndStartRadius:0}}})})}},b=e=>{let{componentCls:t,antCls:n}=e,r="".concat(t,"-search");return{[r]:{["".concat(t)]:{"&:hover, &:focus":{borderColor:e.colorPrimaryHover,["+ ".concat(t,"-group-addon ").concat(r,"-button:not(").concat(n,"-btn-primary)")]:{borderInlineStartColor:e.colorPrimaryHover}}},["".concat(t,"-affix-wrapper")]:{borderRadius:0},["".concat(t,"-lg")]:{lineHeight:e.calc(e.lineHeightLG).sub(2e-4).equal({unit:!1})},["> ".concat(t,"-group")]:{["> ".concat(t,"-group-addon:last-child")]:{insetInlineStart:-1,padding:0,border:0,["".concat(r,"-button")]:{paddingTop:0,paddingBottom:0,borderStartStartRadius:0,borderStartEndRadius:e.borderRadius,borderEndEndRadius:e.borderRadius,borderEndStartRadius:0,boxShadow:"none"},["".concat(r,"-button:not(").concat(n,"-btn-primary)")]:{color:e.colorTextDescription,"&:hover":{color:e.colorPrimaryHover},"&:active":{color:e.colorPrimaryActive},["&".concat(n,"-btn-loading::before")]:{insetInlineStart:0,insetInlineEnd:0,insetBlockStart:0,insetBlockEnd:0}}}},["".concat(r,"-button")]:{height:e.controlHeight,"&:hover, &:focus":{zIndex:1}},["&-large ".concat(r,"-button")]:{height:e.controlHeightLG},["&-small ".concat(r,"-button")]:{height:e.controlHeightSM},"&-rtl":{direction:"rtl"},["&".concat(t,"-compact-item")]:{["&:not(".concat(t,"-compact-last-item)")]:{["".concat(t,"-group-addon")]:{["".concat(t,"-search-button")]:{marginInlineEnd:e.calc(e.lineWidth).mul(-1).equal(),borderRadius:0}}},["&:not(".concat(t,"-compact-first-item)")]:{["".concat(t,",").concat(t,"-affix-wrapper")]:{borderRadius:0}},["> ".concat(t,"-group-addon ").concat(t,"-search-button,\n > ").concat(t,",\n ").concat(t,"-affix-wrapper")]:{"&:hover,&:focus,&:active":{zIndex:2}},["> ".concat(t,"-affix-wrapper-focused")]:{zIndex:2}}}}},x=e=>{let{componentCls:t,paddingLG:n}=e,r="".concat(t,"-textarea");return{[r]:{position:"relative","&-show-count":{["> ".concat(t)]:{height:"100%"},["".concat(t,"-data-count")]:{position:"absolute",bottom:e.calc(e.fontSize).mul(e.lineHeight).mul(-1).equal(),insetInlineEnd:0,color:e.colorTextDescription,whiteSpace:"nowrap",pointerEvents:"none"}},"&-allow-clear":{["> ".concat(t)]:{paddingInlineEnd:n}},["&-affix-wrapper".concat(r,"-has-feedback")]:{["".concat(t)]:{paddingInlineEnd:n}},["&-affix-wrapper".concat(t,"-affix-wrapper")]:{padding:0,["> textarea".concat(t)]:{fontSize:"inherit",border:"none",outline:"none",background:"transparent","&:focus":{boxShadow:"none !important"}},["".concat(t,"-suffix")]:{margin:0,"> *:not(:last-child)":{marginInline:0},["".concat(t,"-clear-icon")]:{position:"absolute",insetInlineEnd:e.paddingXS,insetBlockStart:e.paddingXS},["".concat(r,"-suffix")]:{position:"absolute",top:0,insetInlineEnd:e.paddingInline,bottom:0,zIndex:1,display:"inline-flex",alignItems:"center",margin:"auto",pointerEvents:"none"}}}}}},w=e=>{let{componentCls:t}=e;return{["".concat(t,"-out-of-range")]:{["&, & input, & textarea, ".concat(t,"-show-count-suffix, ").concat(t,"-data-count")]:{color:e.colorError}}}};t.ZP=(0,a.I$)("Input",e=>{let t=(0,l.TS)(e,(0,c.e)(e));return[m(t),x(t),v(t),y(t),b(t),w(t),(0,i.c)(t)]},c.T)},37433:function(e,t,n){"use strict";n.d(t,{T:function(){return i},e:function(){return o}});var r=n(3104);function o(e){return(0,r.TS)(e,{inputAffixPadding:e.paddingXXS})}let i=e=>{let{controlHeight:t,fontSize:n,lineHeight:r,lineWidth:o,controlHeightSM:i,controlHeightLG:a,fontSizeLG:l,lineHeightLG:c,paddingSM:s,controlPaddingHorizontalSM:u,controlPaddingHorizontal:d,colorFillAlter:f,colorPrimaryHover:p,colorPrimary:h,controlOutlineWidth:m,controlOutline:g,colorErrorOutline:v,colorWarningOutline:y,colorBgContainer:b}=e;return{paddingBlock:Math.max(Math.round((t-n*r)/2*10)/10-o,0),paddingBlockSM:Math.max(Math.round((i-n*r)/2*10)/10-o,0),paddingBlockLG:Math.ceil((a-l*c)/2*10)/10-o,paddingInline:s-o,paddingInlineSM:u-o,paddingInlineLG:d-o,addonBg:f,activeBorderColor:h,hoverBorderColor:p,activeShadow:"0 0 0 ".concat(m,"px ").concat(g),errorActiveShadow:"0 0 0 ".concat(m,"px ").concat(v),warningActiveShadow:"0 0 0 ".concat(m,"px ").concat(y),hoverBg:b,activeBg:b,inputFontSize:n,inputFontSizeLG:l,inputFontSizeSM:n}}},65265:function(e,t,n){"use strict";n.d(t,{$U:function(){return l},H8:function(){return m},Mu:function(){return f},S5:function(){return v},Xy:function(){return a},ir:function(){return d},qG:function(){return s}});var r=n(352),o=n(3104);let i=e=>({borderColor:e.hoverBorderColor,backgroundColor:e.hoverBg}),a=e=>({color:e.colorTextDisabled,backgroundColor:e.colorBgContainerDisabled,borderColor:e.colorBorder,boxShadow:"none",cursor:"not-allowed",opacity:1,"&:hover:not([disabled])":Object.assign({},i((0,o.TS)(e,{hoverBorderColor:e.colorBorder,hoverBg:e.colorBgContainerDisabled})))}),l=(e,t)=>({background:e.colorBgContainer,borderWidth:e.lineWidth,borderStyle:e.lineType,borderColor:t.borderColor,"&:hover":{borderColor:t.hoverBorderColor,backgroundColor:e.hoverBg},"&:focus, &:focus-within":{borderColor:t.activeBorderColor,boxShadow:t.activeShadow,outline:0,backgroundColor:e.activeBg}}),c=(e,t)=>({["&".concat(e.componentCls,"-status-").concat(t.status,":not(").concat(e.componentCls,"-disabled)")]:Object.assign(Object.assign({},l(e,t)),{["".concat(e.componentCls,"-prefix, ").concat(e.componentCls,"-suffix")]:{color:t.affixColor}})}),s=(e,t)=>({"&-outlined":Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},l(e,{borderColor:e.colorBorder,hoverBorderColor:e.colorPrimaryHover,activeBorderColor:e.colorPrimary,activeShadow:e.activeShadow})),{["&".concat(e.componentCls,"-disabled, &[disabled]")]:Object.assign({},a(e))}),c(e,{status:"error",borderColor:e.colorError,hoverBorderColor:e.colorErrorBorderHover,activeBorderColor:e.colorError,activeShadow:e.errorActiveShadow,affixColor:e.colorError})),c(e,{status:"warning",borderColor:e.colorWarning,hoverBorderColor:e.colorWarningBorderHover,activeBorderColor:e.colorWarning,activeShadow:e.warningActiveShadow,affixColor:e.colorWarning})),t)}),u=(e,t)=>({["&".concat(e.componentCls,"-group-wrapper-status-").concat(t.status)]:{["".concat(e.componentCls,"-group-addon")]:{borderColor:t.addonBorderColor,color:t.addonColor}}}),d=e=>({"&-outlined":Object.assign(Object.assign(Object.assign({["".concat(e.componentCls,"-group")]:{"&-addon":{background:e.addonBg,border:"".concat((0,r.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder)},"&-addon:first-child":{borderInlineEnd:0},"&-addon:last-child":{borderInlineStart:0}}},u(e,{status:"error",addonBorderColor:e.colorError,addonColor:e.colorErrorText})),u(e,{status:"warning",addonBorderColor:e.colorWarning,addonColor:e.colorWarningText})),{["&".concat(e.componentCls,"-group-wrapper-disabled")]:{["".concat(e.componentCls,"-group-addon")]:Object.assign({},a(e))}})}),f=(e,t)=>({"&-borderless":Object.assign({background:"transparent",border:"none","&:focus, &:focus-within":{outline:"none"},["&".concat(e.componentCls,"-disabled, &[disabled]")]:{color:e.colorTextDisabled}},t)}),p=(e,t)=>({background:t.bg,borderWidth:e.lineWidth,borderStyle:e.lineType,borderColor:"transparent","input&, & input, textarea&, & textarea":{color:null==t?void 0:t.inputColor},"&:hover":{background:t.hoverBg},"&:focus, &:focus-within":{outline:0,borderColor:t.activeBorderColor,backgroundColor:e.activeBg}}),h=(e,t)=>({["&".concat(e.componentCls,"-status-").concat(t.status,":not(").concat(e.componentCls,"-disabled)")]:Object.assign(Object.assign({},p(e,t)),{["".concat(e.componentCls,"-prefix, ").concat(e.componentCls,"-suffix")]:{color:t.affixColor}})}),m=(e,t)=>({"&-filled":Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},p(e,{bg:e.colorFillTertiary,hoverBg:e.colorFillSecondary,activeBorderColor:e.colorPrimary})),{["&".concat(e.componentCls,"-disabled, &[disabled]")]:Object.assign({},a(e))}),h(e,{status:"error",bg:e.colorErrorBg,hoverBg:e.colorErrorBgHover,activeBorderColor:e.colorError,inputColor:e.colorErrorText,affixColor:e.colorError})),h(e,{status:"warning",bg:e.colorWarningBg,hoverBg:e.colorWarningBgHover,activeBorderColor:e.colorWarning,inputColor:e.colorWarningText,affixColor:e.colorWarning})),t)}),g=(e,t)=>({["&".concat(e.componentCls,"-group-wrapper-status-").concat(t.status)]:{["".concat(e.componentCls,"-group-addon")]:{background:t.addonBg,color:t.addonColor}}}),v=e=>({"&-filled":Object.assign(Object.assign(Object.assign({["".concat(e.componentCls,"-group")]:{"&-addon":{background:e.colorFillTertiary},["".concat(e.componentCls,"-filled:not(:focus):not(:focus-within)")]:{"&:not(:first-child)":{borderInlineStart:"".concat((0,r.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorSplit)},"&:not(:last-child)":{borderInlineEnd:"".concat((0,r.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorSplit)}}}},g(e,{status:"error",addonBg:e.colorErrorBg,addonColor:e.colorErrorText})),g(e,{status:"warning",addonBg:e.colorWarningBg,addonColor:e.colorWarningText})),{["&".concat(e.componentCls,"-group-wrapper-disabled")]:{["".concat(e.componentCls,"-group")]:{"&-addon":{background:e.colorFillTertiary,color:e.colorTextDisabled},"&-addon:first-child":{borderInlineStart:"".concat((0,r.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder),borderTop:"".concat((0,r.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder),borderBottom:"".concat((0,r.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder)},"&-addon:last-child":{borderInlineEnd:"".concat((0,r.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder),borderTop:"".concat((0,r.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder),borderBottom:"".concat((0,r.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder)}}}})})},92239:function(e,t,n){"use strict";let r;n.d(t,{D:function(){return b},Z:function(){return w}});var o=n(2265),i=n(1119),a={icon:{tag:"svg",attrs:{viewBox:"0 0 1024 1024",focusable:"false"},children:[{tag:"path",attrs:{d:"M912 192H328c-4.4 0-8 3.6-8 8v56c0 4.4 3.6 8 8 8h584c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8zm0 284H328c-4.4 0-8 3.6-8 8v56c0 4.4 3.6 8 8 8h584c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8zm0 284H328c-4.4 0-8 3.6-8 8v56c0 4.4 3.6 8 8 8h584c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8zM104 228a56 56 0 10112 0 56 56 0 10-112 0zm0 284a56 56 0 10112 0 56 56 0 10-112 0zm0 284a56 56 0 10112 0 56 56 0 10-112 0z"}}]},name:"bars",theme:"outlined"},l=n(55015),c=o.forwardRef(function(e,t){return o.createElement(l.Z,(0,i.Z)({},e,{ref:t,icon:a}))}),s=n(15327),u=n(77565),d=n(36760),f=n.n(d),p=n(18694),h=e=>!isNaN(parseFloat(e))&&isFinite(e),m=n(71744),g=n(80856),v=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let y={xs:"479.98px",sm:"575.98px",md:"767.98px",lg:"991.98px",xl:"1199.98px",xxl:"1599.98px"},b=o.createContext({}),x=(r=0,function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return r+=1,"".concat(e).concat(r)});var w=o.forwardRef((e,t)=>{let{prefixCls:n,className:r,trigger:i,children:a,defaultCollapsed:l=!1,theme:d="dark",style:w={},collapsible:S=!1,reverseArrow:k=!1,width:E=200,collapsedWidth:C=80,zeroWidthTriggerStyle:O,breakpoint:j,onCollapse:P,onBreakpoint:N}=e,M=v(e,["prefixCls","className","trigger","children","defaultCollapsed","theme","style","collapsible","reverseArrow","width","collapsedWidth","zeroWidthTriggerStyle","breakpoint","onCollapse","onBreakpoint"]),{siderHook:I}=(0,o.useContext)(g.V),[R,T]=(0,o.useState)("collapsed"in e?e.collapsed:l),[A,_]=(0,o.useState)(!1);(0,o.useEffect)(()=>{"collapsed"in e&&T(e.collapsed)},[e.collapsed]);let D=(t,n)=>{"collapsed"in e||T(t),null==P||P(t,n)},Z=(0,o.useRef)();Z.current=e=>{_(e.matches),null==N||N(e.matches),R!==e.matches&&D(e.matches,"responsive")},(0,o.useEffect)(()=>{let e;function t(e){return Z.current(e)}if("undefined"!=typeof window){let{matchMedia:n}=window;if(n&&j&&j in y){e=n("screen and (max-width: ".concat(y[j],")"));try{e.addEventListener("change",t)}catch(n){e.addListener(t)}t(e)}}return()=>{try{null==e||e.removeEventListener("change",t)}catch(n){null==e||e.removeListener(t)}}},[j]),(0,o.useEffect)(()=>{let e=x("ant-sider-");return I.addSider(e),()=>I.removeSider(e)},[]);let L=()=>{D(!R,"clickTrigger")},{getPrefixCls:z}=(0,o.useContext)(m.E_),B=o.useMemo(()=>({siderCollapsed:R}),[R]);return o.createElement(b.Provider,{value:B},(()=>{let e=z("layout-sider",n),l=(0,p.Z)(M,["collapsed"]),m=R?C:E,g=h(m)?"".concat(m,"px"):String(m),v=0===parseFloat(String(C||0))?o.createElement("span",{onClick:L,className:f()("".concat(e,"-zero-width-trigger"),"".concat(e,"-zero-width-trigger-").concat(k?"right":"left")),style:O},i||o.createElement(c,null)):null,y={expanded:k?o.createElement(u.Z,null):o.createElement(s.Z,null),collapsed:k?o.createElement(s.Z,null):o.createElement(u.Z,null)}[R?"collapsed":"expanded"],b=null!==i?v||o.createElement("div",{className:"".concat(e,"-trigger"),onClick:L,style:{width:g}},i||y):null,x=Object.assign(Object.assign({},w),{flex:"0 0 ".concat(g),maxWidth:g,minWidth:g,width:g}),j=f()(e,"".concat(e,"-").concat(d),{["".concat(e,"-collapsed")]:!!R,["".concat(e,"-has-trigger")]:S&&null!==i&&!v,["".concat(e,"-below")]:!!A,["".concat(e,"-zero-width")]:0===parseFloat(g)},r);return o.createElement("aside",Object.assign({className:j},l,{style:x,ref:t}),o.createElement("div",{className:"".concat(e,"-children")},a),S||A&&v?b:null)})())})},80856:function(e,t,n){"use strict";n.d(t,{V:function(){return r}});let r=n(2265).createContext({siderHook:{addSider:()=>null,removeSider:()=>null}})},19226:function(e,t,n){"use strict";n.d(t,{default:function(){return C}});var r=n(83145),o=n(2265),i=n(36760),a=n.n(i),l=n(18694),c=n(71744),s=n(80856),u=n(45287),d=n(92239),f=n(352),p=n(80669),h=e=>{let{componentCls:t,bodyBg:n,lightSiderBg:r,lightTriggerBg:o,lightTriggerColor:i}=e;return{["".concat(t,"-sider-light")]:{background:r,["".concat(t,"-sider-trigger")]:{color:i,background:o},["".concat(t,"-sider-zero-width-trigger")]:{color:i,background:o,border:"1px solid ".concat(n),borderInlineStart:0}}}};let m=e=>{let{antCls:t,componentCls:n,colorText:r,triggerColor:o,footerBg:i,triggerBg:a,headerHeight:l,headerPadding:c,headerColor:s,footerPadding:u,triggerHeight:d,zeroTriggerHeight:p,zeroTriggerWidth:m,motionDurationMid:g,motionDurationSlow:v,fontSize:y,borderRadius:b,bodyBg:x,headerBg:w,siderBg:S}=e;return{[n]:Object.assign(Object.assign({display:"flex",flex:"auto",flexDirection:"column",minHeight:0,background:x,"&, *":{boxSizing:"border-box"},["&".concat(n,"-has-sider")]:{flexDirection:"row",["> ".concat(n,", > ").concat(n,"-content")]:{width:0}},["".concat(n,"-header, &").concat(n,"-footer")]:{flex:"0 0 auto"},["".concat(n,"-sider")]:{position:"relative",minWidth:0,background:S,transition:"all ".concat(g,", background 0s"),"&-children":{height:"100%",marginTop:-.1,paddingTop:.1,["".concat(t,"-menu").concat(t,"-menu-inline-collapsed")]:{width:"auto"}},"&-has-trigger":{paddingBottom:d},"&-right":{order:1},"&-trigger":{position:"fixed",bottom:0,zIndex:1,height:d,color:o,lineHeight:(0,f.bf)(d),textAlign:"center",background:a,cursor:"pointer",transition:"all ".concat(g)},"&-zero-width":{"> *":{overflow:"hidden"},"&-trigger":{position:"absolute",top:l,insetInlineEnd:e.calc(m).mul(-1).equal(),zIndex:1,width:m,height:p,color:o,fontSize:e.fontSizeXL,display:"flex",alignItems:"center",justifyContent:"center",background:S,borderStartStartRadius:0,borderStartEndRadius:b,borderEndEndRadius:b,borderEndStartRadius:0,cursor:"pointer",transition:"background ".concat(v," ease"),"&::after":{position:"absolute",inset:0,background:"transparent",transition:"all ".concat(v),content:'""'},"&:hover::after":{background:"rgba(255, 255, 255, 0.2)"},"&-right":{insetInlineStart:e.calc(m).mul(-1).equal(),borderStartStartRadius:b,borderStartEndRadius:0,borderEndEndRadius:0,borderEndStartRadius:b}}}}},h(e)),{"&-rtl":{direction:"rtl"}}),["".concat(n,"-header")]:{height:l,padding:c,color:s,lineHeight:(0,f.bf)(l),background:w,["".concat(t,"-menu")]:{lineHeight:"inherit"}},["".concat(n,"-footer")]:{padding:u,color:r,fontSize:y,background:i},["".concat(n,"-content")]:{flex:"auto",minHeight:0}}};var g=(0,p.I$)("Layout",e=>[m(e)],e=>{let{colorBgLayout:t,controlHeight:n,controlHeightLG:r,colorText:o,controlHeightSM:i,marginXXS:a,colorTextLightSolid:l,colorBgContainer:c}=e,s=1.25*r;return{colorBgHeader:"#001529",colorBgBody:t,colorBgTrigger:"#002140",bodyBg:t,headerBg:"#001529",headerHeight:2*n,headerPadding:"0 ".concat(s,"px"),headerColor:o,footerPadding:"".concat(i,"px ").concat(s,"px"),footerBg:t,siderBg:"#001529",triggerHeight:r+2*a,triggerBg:"#002140",triggerColor:l,zeroTriggerWidth:r,zeroTriggerHeight:r,lightSiderBg:c,lightTriggerBg:c,lightTriggerColor:o}},{deprecatedTokens:[["colorBgBody","bodyBg"],["colorBgHeader","headerBg"],["colorBgTrigger","triggerBg"]]}),v=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};function y(e){let{suffixCls:t,tagName:n,displayName:r}=e;return e=>o.forwardRef((r,i)=>o.createElement(e,Object.assign({ref:i,suffixCls:t,tagName:n},r)))}let b=o.forwardRef((e,t)=>{let{prefixCls:n,suffixCls:r,className:i,tagName:l}=e,s=v(e,["prefixCls","suffixCls","className","tagName"]),{getPrefixCls:u}=o.useContext(c.E_),d=u("layout",n),[f,p,h]=g(d),m=r?"".concat(d,"-").concat(r):d;return f(o.createElement(l,Object.assign({className:a()(n||m,i,p,h),ref:t},s)))}),x=o.forwardRef((e,t)=>{let{direction:n}=o.useContext(c.E_),[i,f]=o.useState([]),{prefixCls:p,className:h,rootClassName:m,children:y,hasSider:b,tagName:x,style:w}=e,S=v(e,["prefixCls","className","rootClassName","children","hasSider","tagName","style"]),k=(0,l.Z)(S,["suffixCls"]),{getPrefixCls:E,layout:C}=o.useContext(c.E_),O=E("layout",p),j="boolean"==typeof b?b:!!i.length||(0,u.Z)(y).some(e=>e.type===d.Z),[P,N,M]=g(O),I=a()(O,{["".concat(O,"-has-sider")]:j,["".concat(O,"-rtl")]:"rtl"===n},null==C?void 0:C.className,h,m,N,M),R=o.useMemo(()=>({siderHook:{addSider:e=>{f(t=>[].concat((0,r.Z)(t),[e]))},removeSider:e=>{f(t=>t.filter(t=>t!==e))}}}),[]);return P(o.createElement(s.V.Provider,{value:R},o.createElement(x,Object.assign({ref:t,className:I,style:Object.assign(Object.assign({},null==C?void 0:C.style),w)},k),y)))}),w=y({tagName:"div",displayName:"Layout"})(x),S=y({suffixCls:"header",tagName:"header",displayName:"Header"})(b),k=y({suffixCls:"footer",tagName:"footer",displayName:"Footer"})(b),E=y({suffixCls:"content",tagName:"main",displayName:"Content"})(b);w.Header=S,w.Footer=k,w.Content=E,w.Sider=d.Z,w._InternalSiderContext=d.D;var C=w},88208:function(e,t,n){"use strict";n.d(t,{J:function(){return c}});var r=n(2265),o=n(74126),i=n(65658),a=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let l=r.createContext(null),c=r.forwardRef((e,t)=>{let{children:n}=e,c=a(e,["children"]),s=r.useContext(l),u=r.useMemo(()=>Object.assign(Object.assign({},s),c),[s,c.prefixCls,c.mode,c.selectable,c.rootClassName]),d=(0,o.t4)(n),f=(0,o.x1)(t,d?n.ref:null);return r.createElement(l.Provider,{value:u},r.createElement(i.BR,null,d?r.cloneElement(n,{ref:f}):n))});t.Z=l},45937:function(e,t,n){"use strict";n.d(t,{Z:function(){return V}});var r=n(2265),o=n(33082),i=n(92239),a=n(39760),l=n(36760),c=n.n(l),s=n(74126),u=n(18694),d=n(68710),f=n(19722),p=n(71744),h=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n},m=e=>{let{prefixCls:t,className:n,dashed:i}=e,a=h(e,["prefixCls","className","dashed"]),{getPrefixCls:l}=r.useContext(p.E_),s=l("menu",t),u=c()({["".concat(s,"-item-divider-dashed")]:!!i},n);return r.createElement(o.iz,Object.assign({className:u},a))},g=n(45287),v=n(75957);let y=(0,r.createContext)({prefixCls:"",firstLevel:!0,inlineCollapsed:!1});var b=e=>{var t;let{className:n,children:a,icon:l,title:s,danger:d}=e,{prefixCls:p,firstLevel:h,direction:m,disableMenuItemTitleTooltip:b,inlineCollapsed:x}=r.useContext(y),{siderCollapsed:w}=r.useContext(i.D),S=s;void 0===s?S=h?a:"":!1===s&&(S="");let k={title:S};w||x||(k.title=null,k.open=!1);let E=(0,g.Z)(a).length,C=r.createElement(o.ck,Object.assign({},(0,u.Z)(e,["title","icon","danger"]),{className:c()({["".concat(p,"-item-danger")]:d,["".concat(p,"-item-only-child")]:(l?E+1:E)===1},n),title:"string"==typeof s?s:void 0}),(0,f.Tm)(l,{className:c()((0,f.l$)(l)?null===(t=l.props)||void 0===t?void 0:t.className:"","".concat(p,"-item-icon"))}),(e=>{let t=r.createElement("span",{className:"".concat(p,"-title-content")},a);return(!l||(0,f.l$)(a)&&"span"===a.type)&&a&&e&&h&&"string"==typeof a?r.createElement("div",{className:"".concat(p,"-inline-collapsed-noicon")},a.charAt(0)):t})(x));return b||(C=r.createElement(v.Z,Object.assign({},k,{placement:"rtl"===m?"left":"right",overlayClassName:"".concat(p,"-inline-collapsed-tooltip")}),C)),C},x=n(62236),w=e=>{var t;let n;let{popupClassName:i,icon:a,title:l,theme:s}=e,d=r.useContext(y),{prefixCls:p,inlineCollapsed:h,theme:m}=d,g=(0,o.Xl)();if(a){let e=(0,f.l$)(l)&&"span"===l.type;n=r.createElement(r.Fragment,null,(0,f.Tm)(a,{className:c()((0,f.l$)(a)?null===(t=a.props)||void 0===t?void 0:t.className:"","".concat(p,"-item-icon"))}),e?l:r.createElement("span",{className:"".concat(p,"-title-content")},l))}else n=h&&!g.length&&l&&"string"==typeof l?r.createElement("div",{className:"".concat(p,"-inline-collapsed-noicon")},l.charAt(0)):r.createElement("span",{className:"".concat(p,"-title-content")},l);let v=r.useMemo(()=>Object.assign(Object.assign({},d),{firstLevel:!1}),[d]),[b]=(0,x.Cn)("Menu");return r.createElement(y.Provider,{value:v},r.createElement(o.Wd,Object.assign({},(0,u.Z)(e,["icon"]),{title:n,popupClassName:c()(p,i,"".concat(p,"-").concat(s||m)),popupStyle:{zIndex:b}})))},S=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n},k=n(88208),E=n(352),C=n(36360),O=n(12918),j=n(63074),P=n(18544),N=n(691),M=n(80669),I=n(3104),R=e=>{let{componentCls:t,motionDurationSlow:n,horizontalLineHeight:r,colorSplit:o,lineWidth:i,lineType:a,itemPaddingInline:l}=e;return{["".concat(t,"-horizontal")]:{lineHeight:r,border:0,borderBottom:"".concat((0,E.bf)(i)," ").concat(a," ").concat(o),boxShadow:"none","&::after":{display:"block",clear:"both",height:0,content:'"\\20"'},["".concat(t,"-item, ").concat(t,"-submenu")]:{position:"relative",display:"inline-block",verticalAlign:"bottom",paddingInline:l},["> ".concat(t,"-item:hover,\n > ").concat(t,"-item-active,\n > ").concat(t,"-submenu ").concat(t,"-submenu-title:hover")]:{backgroundColor:"transparent"},["".concat(t,"-item, ").concat(t,"-submenu-title")]:{transition:["border-color ".concat(n),"background ".concat(n)].join(",")},["".concat(t,"-submenu-arrow")]:{display:"none"}}}},T=e=>{let{componentCls:t,menuArrowOffset:n,calc:r}=e;return{["".concat(t,"-rtl")]:{direction:"rtl"},["".concat(t,"-submenu-rtl")]:{transformOrigin:"100% 0"},["".concat(t,"-rtl").concat(t,"-vertical,\n ").concat(t,"-submenu-rtl ").concat(t,"-vertical")]:{["".concat(t,"-submenu-arrow")]:{"&::before":{transform:"rotate(-45deg) translateY(".concat((0,E.bf)(r(n).mul(-1).equal()),")")},"&::after":{transform:"rotate(45deg) translateY(".concat((0,E.bf)(n),")")}}}}};let A=e=>Object.assign({},(0,O.oN)(e));var _=(e,t)=>{let{componentCls:n,itemColor:r,itemSelectedColor:o,groupTitleColor:i,itemBg:a,subMenuItemBg:l,itemSelectedBg:c,activeBarHeight:s,activeBarWidth:u,activeBarBorderWidth:d,motionDurationSlow:f,motionEaseInOut:p,motionEaseOut:h,itemPaddingInline:m,motionDurationMid:g,itemHoverColor:v,lineType:y,colorSplit:b,itemDisabledColor:x,dangerItemColor:w,dangerItemHoverColor:S,dangerItemSelectedColor:k,dangerItemActiveBg:C,dangerItemSelectedBg:O,popupBg:j,itemHoverBg:P,itemActiveBg:N,menuSubMenuBg:M,horizontalItemSelectedColor:I,horizontalItemSelectedBg:R,horizontalItemBorderRadius:T,horizontalItemHoverBg:_}=e;return{["".concat(n,"-").concat(t,", ").concat(n,"-").concat(t," > ").concat(n)]:{color:r,background:a,["&".concat(n,"-root:focus-visible")]:Object.assign({},A(e)),["".concat(n,"-item-group-title")]:{color:i},["".concat(n,"-submenu-selected")]:{["> ".concat(n,"-submenu-title")]:{color:o}},["".concat(n,"-item-disabled, ").concat(n,"-submenu-disabled")]:{color:"".concat(x," !important")},["".concat(n,"-item:not(").concat(n,"-item-selected):not(").concat(n,"-submenu-selected)")]:{["&:hover, > ".concat(n,"-submenu-title:hover")]:{color:v}},["&:not(".concat(n,"-horizontal)")]:{["".concat(n,"-item:not(").concat(n,"-item-selected)")]:{"&:hover":{backgroundColor:P},"&:active":{backgroundColor:N}},["".concat(n,"-submenu-title")]:{"&:hover":{backgroundColor:P},"&:active":{backgroundColor:N}}},["".concat(n,"-item-danger")]:{color:w,["&".concat(n,"-item:hover")]:{["&:not(".concat(n,"-item-selected):not(").concat(n,"-submenu-selected)")]:{color:S}},["&".concat(n,"-item:active")]:{background:C}},["".concat(n,"-item a")]:{"&, &:hover":{color:"inherit"}},["".concat(n,"-item-selected")]:{color:o,["&".concat(n,"-item-danger")]:{color:k},"a, a:hover":{color:"inherit"}},["& ".concat(n,"-item-selected")]:{backgroundColor:c,["&".concat(n,"-item-danger")]:{backgroundColor:O}},["".concat(n,"-item, ").concat(n,"-submenu-title")]:{["&:not(".concat(n,"-item-disabled):focus-visible")]:Object.assign({},A(e))},["&".concat(n,"-submenu > ").concat(n)]:{backgroundColor:M},["&".concat(n,"-popup > ").concat(n)]:{backgroundColor:j},["&".concat(n,"-submenu-popup > ").concat(n)]:{backgroundColor:j},["&".concat(n,"-horizontal")]:Object.assign(Object.assign({},"dark"===t?{borderBottom:0}:{}),{["> ".concat(n,"-item, > ").concat(n,"-submenu")]:{top:d,marginTop:e.calc(d).mul(-1).equal(),marginBottom:0,borderRadius:T,"&::after":{position:"absolute",insetInline:m,bottom:0,borderBottom:"".concat((0,E.bf)(s)," solid transparent"),transition:"border-color ".concat(f," ").concat(p),content:'""'},"&:hover, &-active, &-open":{background:_,"&::after":{borderBottomWidth:s,borderBottomColor:I}},"&-selected":{color:I,backgroundColor:R,"&:hover":{backgroundColor:R},"&::after":{borderBottomWidth:s,borderBottomColor:I}}}}),["&".concat(n,"-root")]:{["&".concat(n,"-inline, &").concat(n,"-vertical")]:{borderInlineEnd:"".concat((0,E.bf)(d)," ").concat(y," ").concat(b)}},["&".concat(n,"-inline")]:{["".concat(n,"-sub").concat(n,"-inline")]:{background:l},["".concat(n,"-item")]:{position:"relative","&::after":{position:"absolute",insetBlock:0,insetInlineEnd:0,borderInlineEnd:"".concat((0,E.bf)(u)," solid ").concat(o),transform:"scaleY(0.0001)",opacity:0,transition:["transform ".concat(g," ").concat(h),"opacity ".concat(g," ").concat(h)].join(","),content:'""'},["&".concat(n,"-item-danger")]:{"&::after":{borderInlineEndColor:k}}},["".concat(n,"-selected, ").concat(n,"-item-selected")]:{"&::after":{transform:"scaleY(1)",opacity:1,transition:["transform ".concat(g," ").concat(p),"opacity ".concat(g," ").concat(p)].join(",")}}}}}};let D=e=>{let{componentCls:t,itemHeight:n,itemMarginInline:r,padding:o,menuArrowSize:i,marginXS:a,itemMarginBlock:l,itemWidth:c}=e,s=e.calc(i).add(o).add(a).equal();return{["".concat(t,"-item")]:{position:"relative",overflow:"hidden"},["".concat(t,"-item, ").concat(t,"-submenu-title")]:{height:n,lineHeight:(0,E.bf)(n),paddingInline:o,overflow:"hidden",textOverflow:"ellipsis",marginInline:r,marginBlock:l,width:c},["> ".concat(t,"-item,\n > ").concat(t,"-submenu > ").concat(t,"-submenu-title")]:{height:n,lineHeight:(0,E.bf)(n)},["".concat(t,"-item-group-list ").concat(t,"-submenu-title,\n ").concat(t,"-submenu-title")]:{paddingInlineEnd:s}}};var Z=e=>{let{componentCls:t,iconCls:n,itemHeight:r,colorTextLightSolid:o,dropdownWidth:i,controlHeightLG:a,motionDurationMid:l,motionEaseOut:c,paddingXL:s,itemMarginInline:u,fontSizeLG:d,motionDurationSlow:f,paddingXS:p,boxShadowSecondary:h,collapsedWidth:m,collapsedIconSize:g}=e,v={height:r,lineHeight:(0,E.bf)(r),listStylePosition:"inside",listStyleType:"disc"};return[{[t]:{"&-inline, &-vertical":Object.assign({["&".concat(t,"-root")]:{boxShadow:"none"}},D(e))},["".concat(t,"-submenu-popup")]:{["".concat(t,"-vertical")]:Object.assign(Object.assign({},D(e)),{boxShadow:h})}},{["".concat(t,"-submenu-popup ").concat(t,"-vertical").concat(t,"-sub")]:{minWidth:i,maxHeight:"calc(100vh - ".concat((0,E.bf)(e.calc(a).mul(2.5).equal()),")"),padding:"0",overflow:"hidden",borderInlineEnd:0,"&:not([class*='-active'])":{overflowX:"hidden",overflowY:"auto"}}},{["".concat(t,"-inline")]:{width:"100%",["&".concat(t,"-root")]:{["".concat(t,"-item, ").concat(t,"-submenu-title")]:{display:"flex",alignItems:"center",transition:["border-color ".concat(f),"background ".concat(f),"padding ".concat(l," ").concat(c)].join(","),["> ".concat(t,"-title-content")]:{flex:"auto",minWidth:0,overflow:"hidden",textOverflow:"ellipsis"},"> *":{flex:"none"}}},["".concat(t,"-sub").concat(t,"-inline")]:{padding:0,border:0,borderRadius:0,boxShadow:"none",["& > ".concat(t,"-submenu > ").concat(t,"-submenu-title")]:v,["& ".concat(t,"-item-group-title")]:{paddingInlineStart:s}},["".concat(t,"-item")]:v}},{["".concat(t,"-inline-collapsed")]:{width:m,["&".concat(t,"-root")]:{["".concat(t,"-item, ").concat(t,"-submenu ").concat(t,"-submenu-title")]:{["> ".concat(t,"-inline-collapsed-noicon")]:{fontSize:d,textAlign:"center"}}},["> ".concat(t,"-item,\n > ").concat(t,"-item-group > ").concat(t,"-item-group-list > ").concat(t,"-item,\n > ").concat(t,"-item-group > ").concat(t,"-item-group-list > ").concat(t,"-submenu > ").concat(t,"-submenu-title,\n > ").concat(t,"-submenu > ").concat(t,"-submenu-title")]:{insetInlineStart:0,paddingInline:"calc(50% - ".concat((0,E.bf)(e.calc(d).div(2).equal())," - ").concat((0,E.bf)(u),")"),textOverflow:"clip",["\n ".concat(t,"-submenu-arrow,\n ").concat(t,"-submenu-expand-icon\n ")]:{opacity:0},["".concat(t,"-item-icon, ").concat(n)]:{margin:0,fontSize:g,lineHeight:(0,E.bf)(r),"+ span":{display:"inline-block",opacity:0}}},["".concat(t,"-item-icon, ").concat(n)]:{display:"inline-block"},"&-tooltip":{pointerEvents:"none",["".concat(t,"-item-icon, ").concat(n)]:{display:"none"},"a, a:hover":{color:o}},["".concat(t,"-item-group-title")]:Object.assign(Object.assign({},O.vS),{paddingInline:p})}}]};let L=e=>{let{componentCls:t,motionDurationSlow:n,motionDurationMid:r,motionEaseInOut:o,motionEaseOut:i,iconCls:a,iconSize:l,iconMarginInlineEnd:c}=e;return{["".concat(t,"-item, ").concat(t,"-submenu-title")]:{position:"relative",display:"block",margin:0,whiteSpace:"nowrap",cursor:"pointer",transition:["border-color ".concat(n),"background ".concat(n),"padding ".concat(n," ").concat(o)].join(","),["".concat(t,"-item-icon, ").concat(a)]:{minWidth:l,fontSize:l,transition:["font-size ".concat(r," ").concat(i),"margin ".concat(n," ").concat(o),"color ".concat(n)].join(","),"+ span":{marginInlineStart:c,opacity:1,transition:["opacity ".concat(n," ").concat(o),"margin ".concat(n),"color ".concat(n)].join(",")}},["".concat(t,"-item-icon")]:Object.assign({},(0,O.Ro)()),["&".concat(t,"-item-only-child")]:{["> ".concat(a,", > ").concat(t,"-item-icon")]:{marginInlineEnd:0}}},["".concat(t,"-item-disabled, ").concat(t,"-submenu-disabled")]:{background:"none !important",cursor:"not-allowed","&::after":{borderColor:"transparent !important"},a:{color:"inherit !important"},["> ".concat(t,"-submenu-title")]:{color:"inherit !important",cursor:"not-allowed"}}}},z=e=>{let{componentCls:t,motionDurationSlow:n,motionEaseInOut:r,borderRadius:o,menuArrowSize:i,menuArrowOffset:a}=e;return{["".concat(t,"-submenu")]:{"&-expand-icon, &-arrow":{position:"absolute",top:"50%",insetInlineEnd:e.margin,width:i,color:"currentcolor",transform:"translateY(-50%)",transition:"transform ".concat(n," ").concat(r,", opacity ").concat(n)},"&-arrow":{"&::before, &::after":{position:"absolute",width:e.calc(i).mul(.6).equal(),height:e.calc(i).mul(.15).equal(),backgroundColor:"currentcolor",borderRadius:o,transition:["background ".concat(n," ").concat(r),"transform ".concat(n," ").concat(r),"top ".concat(n," ").concat(r),"color ".concat(n," ").concat(r)].join(","),content:'""'},"&::before":{transform:"rotate(45deg) translateY(".concat((0,E.bf)(e.calc(a).mul(-1).equal()),")")},"&::after":{transform:"rotate(-45deg) translateY(".concat((0,E.bf)(a),")")}}}}},B=e=>{let{antCls:t,componentCls:n,fontSize:r,motionDurationSlow:o,motionDurationMid:i,motionEaseInOut:a,paddingXS:l,padding:c,colorSplit:s,lineWidth:u,zIndexPopup:d,borderRadiusLG:f,subMenuItemBorderRadius:p,menuArrowSize:h,menuArrowOffset:m,lineType:g,menuPanelMaskInset:v,groupTitleLineHeight:y,groupTitleFontSize:b}=e;return[{"":{["".concat(n)]:Object.assign(Object.assign({},(0,O.dF)()),{"&-hidden":{display:"none"}})},["".concat(n,"-submenu-hidden")]:{display:"none"}},{[n]:Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},(0,O.Wf)(e)),(0,O.dF)()),{marginBottom:0,paddingInlineStart:0,fontSize:r,lineHeight:0,listStyle:"none",outline:"none",transition:"width ".concat(o," cubic-bezier(0.2, 0, 0, 1) 0s"),"ul, ol":{margin:0,padding:0,listStyle:"none"},"&-overflow":{display:"flex",["".concat(n,"-item")]:{flex:"none"}},["".concat(n,"-item, ").concat(n,"-submenu, ").concat(n,"-submenu-title")]:{borderRadius:e.itemBorderRadius},["".concat(n,"-item-group-title")]:{padding:"".concat((0,E.bf)(l)," ").concat((0,E.bf)(c)),fontSize:b,lineHeight:y,transition:"all ".concat(o)},["&-horizontal ".concat(n,"-submenu")]:{transition:["border-color ".concat(o," ").concat(a),"background ".concat(o," ").concat(a)].join(",")},["".concat(n,"-submenu, ").concat(n,"-submenu-inline")]:{transition:["border-color ".concat(o," ").concat(a),"background ".concat(o," ").concat(a),"padding ".concat(i," ").concat(a)].join(",")},["".concat(n,"-submenu ").concat(n,"-sub")]:{cursor:"initial",transition:["background ".concat(o," ").concat(a),"padding ".concat(o," ").concat(a)].join(",")},["".concat(n,"-title-content")]:{transition:"color ".concat(o),["> ".concat(t,"-typography-ellipsis-single-line")]:{display:"inline",verticalAlign:"unset"}},["".concat(n,"-item a")]:{"&::before":{position:"absolute",inset:0,backgroundColor:"transparent",content:'""'}},["".concat(n,"-item-divider")]:{overflow:"hidden",lineHeight:0,borderColor:s,borderStyle:g,borderWidth:0,borderTopWidth:u,marginBlock:u,padding:0,"&-dashed":{borderStyle:"dashed"}}}),L(e)),{["".concat(n,"-item-group")]:{["".concat(n,"-item-group-list")]:{margin:0,padding:0,["".concat(n,"-item, ").concat(n,"-submenu-title")]:{paddingInline:"".concat((0,E.bf)(e.calc(r).mul(2).equal())," ").concat((0,E.bf)(c))}}},"&-submenu":{"&-popup":{position:"absolute",zIndex:d,borderRadius:f,boxShadow:"none",transformOrigin:"0 0",["&".concat(n,"-submenu")]:{background:"transparent"},"&::before":{position:"absolute",inset:"".concat((0,E.bf)(v)," 0 0"),zIndex:-1,width:"100%",height:"100%",opacity:0,content:'""'}},"&-placement-rightTop::before":{top:0,insetInlineStart:v},"\n &-placement-leftTop,\n &-placement-bottomRight,\n ":{transformOrigin:"100% 0"},"\n &-placement-leftBottom,\n &-placement-topRight,\n ":{transformOrigin:"100% 100%"},"\n &-placement-rightBottom,\n &-placement-topLeft,\n ":{transformOrigin:"0 100%"},"\n &-placement-bottomLeft,\n &-placement-rightTop,\n ":{transformOrigin:"0 0"},"\n &-placement-leftTop,\n &-placement-leftBottom\n ":{paddingInlineEnd:e.paddingXS},"\n &-placement-rightTop,\n &-placement-rightBottom\n ":{paddingInlineStart:e.paddingXS},"\n &-placement-topRight,\n &-placement-topLeft\n ":{paddingBottom:e.paddingXS},"\n &-placement-bottomRight,\n &-placement-bottomLeft\n ":{paddingTop:e.paddingXS},["> ".concat(n)]:Object.assign(Object.assign(Object.assign({borderRadius:f},L(e)),z(e)),{["".concat(n,"-item, ").concat(n,"-submenu > ").concat(n,"-submenu-title")]:{borderRadius:p},["".concat(n,"-submenu-title::after")]:{transition:"transform ".concat(o," ").concat(a)}})}}),z(e)),{["&-inline-collapsed ".concat(n,"-submenu-arrow,\n &-inline ").concat(n,"-submenu-arrow")]:{"&::before":{transform:"rotate(-45deg) translateX(".concat((0,E.bf)(m),")")},"&::after":{transform:"rotate(45deg) translateX(".concat((0,E.bf)(e.calc(m).mul(-1).equal()),")")}},["".concat(n,"-submenu-open").concat(n,"-submenu-inline > ").concat(n,"-submenu-title > ").concat(n,"-submenu-arrow")]:{transform:"translateY(".concat((0,E.bf)(e.calc(h).mul(.2).mul(-1).equal()),")"),"&::after":{transform:"rotate(-45deg) translateX(".concat((0,E.bf)(e.calc(m).mul(-1).equal()),")")},"&::before":{transform:"rotate(45deg) translateX(".concat((0,E.bf)(m),")")}}})},{["".concat(t,"-layout-header")]:{[n]:{lineHeight:"inherit"}}}]},F=e=>{var t,n,r;let{colorPrimary:o,colorError:i,colorTextDisabled:a,colorErrorBg:l,colorText:c,colorTextDescription:s,colorBgContainer:u,colorFillAlter:d,colorFillContent:f,lineWidth:p,lineWidthBold:h,controlItemBgActive:m,colorBgTextHover:g,controlHeightLG:v,lineHeight:y,colorBgElevated:b,marginXXS:x,padding:w,fontSize:S,controlHeightSM:k,fontSizeLG:E,colorTextLightSolid:O,colorErrorHover:j}=e,P=null!==(t=e.activeBarWidth)&&void 0!==t?t:0,N=null!==(n=e.activeBarBorderWidth)&&void 0!==n?n:p,M=null!==(r=e.itemMarginInline)&&void 0!==r?r:e.marginXXS,I=new C.C(O).setAlpha(.65).toRgbString();return{dropdownWidth:160,zIndexPopup:e.zIndexPopupBase+50,radiusItem:e.borderRadiusLG,itemBorderRadius:e.borderRadiusLG,radiusSubMenuItem:e.borderRadiusSM,subMenuItemBorderRadius:e.borderRadiusSM,colorItemText:c,itemColor:c,colorItemTextHover:c,itemHoverColor:c,colorItemTextHoverHorizontal:o,horizontalItemHoverColor:o,colorGroupTitle:s,groupTitleColor:s,colorItemTextSelected:o,itemSelectedColor:o,colorItemTextSelectedHorizontal:o,horizontalItemSelectedColor:o,colorItemBg:u,itemBg:u,colorItemBgHover:g,itemHoverBg:g,colorItemBgActive:f,itemActiveBg:m,colorSubItemBg:d,subMenuItemBg:d,colorItemBgSelected:m,itemSelectedBg:m,colorItemBgSelectedHorizontal:"transparent",horizontalItemSelectedBg:"transparent",colorActiveBarWidth:0,activeBarWidth:P,colorActiveBarHeight:h,activeBarHeight:h,colorActiveBarBorderSize:p,activeBarBorderWidth:N,colorItemTextDisabled:a,itemDisabledColor:a,colorDangerItemText:i,dangerItemColor:i,colorDangerItemTextHover:i,dangerItemHoverColor:i,colorDangerItemTextSelected:i,dangerItemSelectedColor:i,colorDangerItemBgActive:l,dangerItemActiveBg:l,colorDangerItemBgSelected:l,dangerItemSelectedBg:l,itemMarginInline:M,horizontalItemBorderRadius:0,horizontalItemHoverBg:"transparent",itemHeight:v,groupTitleLineHeight:y,collapsedWidth:2*v,popupBg:b,itemMarginBlock:x,itemPaddingInline:w,horizontalLineHeight:"".concat(1.15*v,"px"),iconSize:S,iconMarginInlineEnd:k-S,collapsedIconSize:E,groupTitleFontSize:S,darkItemDisabledColor:new C.C(O).setAlpha(.25).toRgbString(),darkItemColor:I,darkDangerItemColor:i,darkItemBg:"#001529",darkPopupBg:"#001529",darkSubMenuItemBg:"#000c17",darkItemSelectedColor:O,darkItemSelectedBg:o,darkDangerItemSelectedBg:i,darkItemHoverBg:"transparent",darkGroupTitleColor:I,darkItemHoverColor:O,darkDangerItemHoverColor:j,darkDangerItemSelectedColor:O,darkDangerItemActiveBg:i,itemWidth:P?"calc(100% + ".concat(N,"px)"):"calc(100% - ".concat(2*M,"px)")}};var H=n(64024),q=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let W=(0,r.forwardRef)((e,t)=>{var n,i;let l;let h=r.useContext(k.Z),g=h||{},{getPrefixCls:v,getPopupContainer:x,direction:E,menu:C}=r.useContext(p.E_),O=v(),{prefixCls:A,className:D,style:L,theme:z="light",expandIcon:W,_internalDisableMenuItemTitleTooltip:K,inlineCollapsed:V,siderCollapsed:U,items:G,children:X,rootClassName:$,mode:Y,selectable:Q,onClick:J,overflowedIndicatorPopupClassName:ee}=e,et=q(e,["prefixCls","className","style","theme","expandIcon","_internalDisableMenuItemTitleTooltip","inlineCollapsed","siderCollapsed","items","children","rootClassName","mode","selectable","onClick","overflowedIndicatorPopupClassName"]),en=(0,u.Z)(et,["collapsedWidth"]),er=r.useMemo(()=>G?function e(t){return(t||[]).map((t,n)=>{if(t&&"object"==typeof t){let{label:i,children:a,key:l,type:c}=t,s=S(t,["label","children","key","type"]),u=null!=l?l:"tmp-".concat(n);return a||"group"===c?"group"===c?r.createElement(o.BW,Object.assign({key:u},s,{title:i}),e(a)):r.createElement(w,Object.assign({key:u},s,{title:i}),e(a)):"divider"===c?r.createElement(m,Object.assign({key:u},s)):r.createElement(b,Object.assign({key:u},s),i)}return null}).filter(e=>e)}(G):G,[G])||X;null===(n=g.validator)||void 0===n||n.call(g,{mode:Y});let eo=(0,s.zX)(function(){var e;null==J||J.apply(void 0,arguments),null===(e=g.onClick)||void 0===e||e.call(g)}),ei=g.mode||Y,ea=null!=Q?Q:g.selectable,el=r.useMemo(()=>void 0!==U?U:V,[V,U]),ec={horizontal:{motionName:"".concat(O,"-slide-up")},inline:(0,d.Z)(O),other:{motionName:"".concat(O,"-zoom-big")}},es=v("menu",A||g.prefixCls),eu=(0,H.Z)(es),[ed,ef,ep]=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:e,n=!(arguments.length>2)||void 0===arguments[2]||arguments[2];return(0,M.I$)("Menu",e=>{let{colorBgElevated:t,colorPrimary:n,colorTextLightSolid:r,controlHeightLG:o,fontSize:i,darkItemColor:a,darkDangerItemColor:l,darkItemBg:c,darkSubMenuItemBg:s,darkItemSelectedColor:u,darkItemSelectedBg:d,darkDangerItemSelectedBg:f,darkItemHoverBg:p,darkGroupTitleColor:h,darkItemHoverColor:m,darkItemDisabledColor:g,darkDangerItemHoverColor:v,darkDangerItemSelectedColor:y,darkDangerItemActiveBg:b,popupBg:x,darkPopupBg:w}=e,S=e.calc(i).div(7).mul(5).equal(),k=(0,I.TS)(e,{menuArrowSize:S,menuHorizontalHeight:e.calc(o).mul(1.15).equal(),menuArrowOffset:e.calc(S).mul(.25).equal(),menuPanelMaskInset:-7,menuSubMenuBg:t,calc:e.calc,popupBg:x}),E=(0,I.TS)(k,{itemColor:a,itemHoverColor:m,groupTitleColor:h,itemSelectedColor:u,itemBg:c,popupBg:w,subMenuItemBg:s,itemActiveBg:"transparent",itemSelectedBg:d,activeBarHeight:0,activeBarBorderWidth:0,itemHoverBg:p,itemDisabledColor:g,dangerItemColor:l,dangerItemHoverColor:v,dangerItemSelectedColor:y,dangerItemActiveBg:b,dangerItemSelectedBg:f,menuSubMenuBg:s,horizontalItemSelectedColor:r,horizontalItemSelectedBg:n});return[B(k),R(k),Z(k),_(k,"light"),_(E,"dark"),T(k),(0,j.Z)(k),(0,P.oN)(k,"slide-up"),(0,P.oN)(k,"slide-down"),(0,N._y)(k,"zoom-big")]},F,{deprecatedTokens:[["colorGroupTitle","groupTitleColor"],["radiusItem","itemBorderRadius"],["radiusSubMenuItem","subMenuItemBorderRadius"],["colorItemText","itemColor"],["colorItemTextHover","itemHoverColor"],["colorItemTextHoverHorizontal","horizontalItemHoverColor"],["colorItemTextSelected","itemSelectedColor"],["colorItemTextSelectedHorizontal","horizontalItemSelectedColor"],["colorItemTextDisabled","itemDisabledColor"],["colorDangerItemText","dangerItemColor"],["colorDangerItemTextHover","dangerItemHoverColor"],["colorDangerItemTextSelected","dangerItemSelectedColor"],["colorDangerItemBgActive","dangerItemActiveBg"],["colorDangerItemBgSelected","dangerItemSelectedBg"],["colorItemBg","itemBg"],["colorItemBgHover","itemHoverBg"],["colorSubItemBg","subMenuItemBg"],["colorItemBgActive","itemActiveBg"],["colorItemBgSelectedHorizontal","horizontalItemSelectedBg"],["colorActiveBarWidth","activeBarWidth"],["colorActiveBarHeight","activeBarHeight"],["colorActiveBarBorderSize","activeBarBorderWidth"],["colorItemBgSelected","itemSelectedBg"]],injectStyle:n,unitless:{groupTitleLineHeight:!0}})(e,t)}(es,eu,!h),eh=c()("".concat(es,"-").concat(z),null==C?void 0:C.className,D);if("function"==typeof W)l=W;else if(null===W||!1===W)l=null;else if(null===g.expandIcon||!1===g.expandIcon)l=null;else{let e=null!=W?W:g.expandIcon;l=(0,f.Tm)(e,{className:c()("".concat(es,"-submenu-expand-icon"),(0,f.l$)(e)?null===(i=e.props)||void 0===i?void 0:i.className:"")})}let em=r.useMemo(()=>({prefixCls:es,inlineCollapsed:el||!1,direction:E,firstLevel:!0,theme:z,mode:ei,disableMenuItemTitleTooltip:K}),[es,el,E,K,z]);return ed(r.createElement(k.Z.Provider,{value:null},r.createElement(y.Provider,{value:em},r.createElement(o.ZP,Object.assign({getPopupContainer:x,overflowedIndicator:r.createElement(a.Z,null),overflowedIndicatorPopupClassName:c()(es,"".concat(es,"-").concat(z),ee),mode:ei,selectable:ea,onClick:eo},en,{inlineCollapsed:el,style:Object.assign(Object.assign({},null==C?void 0:C.style),L),className:eh,prefixCls:es,direction:E,defaultMotions:ec,expandIcon:l,ref:t,rootClassName:c()($,ef,g.rootClassName,ep,eu)}),er))))}),K=(0,r.forwardRef)((e,t)=>{let n=(0,r.useRef)(null),o=r.useContext(i.D);return(0,r.useImperativeHandle)(t,()=>({menu:n.current,focus:e=>{var t;null===(t=n.current)||void 0===t||t.focus(e)}})),r.createElement(W,Object.assign({ref:n},e,o))});K.Item=b,K.SubMenu=w,K.Divider=m,K.ItemGroup=o.BW;var V=K},14301:function(e,t,n){"use strict";n.d(t,{Z:function(){return j}});var r=n(2265),o=n(36760),i=n.n(o);let a=e=>e?"function"==typeof e?e():e:null;var l=n(68710),c=n(71744),s=n(75957),u=n(5769),d=n(12918),f=n(691),p=n(88260),h=n(53454),m=n(80669),g=n(3104),v=n(34442);let y=e=>{let{componentCls:t,popoverColor:n,titleMinWidth:r,fontWeightStrong:o,innerPadding:i,boxShadowSecondary:a,colorTextHeading:l,borderRadiusLG:c,zIndexPopup:s,titleMarginBottom:u,colorBgElevated:f,popoverBg:h,titleBorderBottom:m,innerContentPadding:g,titlePadding:v}=e;return[{[t]:Object.assign(Object.assign({},(0,d.Wf)(e)),{position:"absolute",top:0,left:{_skip_check_:!0,value:0},zIndex:s,fontWeight:"normal",whiteSpace:"normal",textAlign:"start",cursor:"auto",userSelect:"text",transformOrigin:"var(--arrow-x, 50%) var(--arrow-y, 50%)","--antd-arrow-background-color":f,"&-rtl":{direction:"rtl"},"&-hidden":{display:"none"},["".concat(t,"-content")]:{position:"relative"},["".concat(t,"-inner")]:{backgroundColor:h,backgroundClip:"padding-box",borderRadius:c,boxShadow:a,padding:i},["".concat(t,"-title")]:{minWidth:r,marginBottom:u,color:l,fontWeight:o,borderBottom:m,padding:v},["".concat(t,"-inner-content")]:{color:n,padding:g}})},(0,p.ZP)(e,"var(--antd-arrow-background-color)"),{["".concat(t,"-pure")]:{position:"relative",maxWidth:"none",margin:e.sizePopupArrow,display:"inline-block",["".concat(t,"-content")]:{display:"inline-block"}}}]},b=e=>{let{componentCls:t}=e;return{[t]:h.i.map(n=>{let r=e["".concat(n,"6")];return{["&".concat(t,"-").concat(n)]:{"--antd-arrow-background-color":r,["".concat(t,"-inner")]:{backgroundColor:r},["".concat(t,"-arrow")]:{background:"transparent"}}}})}};var x=(0,m.I$)("Popover",e=>{let{colorBgElevated:t,colorText:n}=e,r=(0,g.TS)(e,{popoverBg:t,popoverColor:n});return[y(r),b(r),(0,f._y)(r,"zoom-big")]},e=>{let{lineWidth:t,controlHeight:n,fontHeight:r,padding:o,wireframe:i,zIndexPopupBase:a,borderRadiusLG:l,marginXS:c,lineType:s,colorSplit:u,paddingSM:d}=e,f=n-r;return Object.assign(Object.assign(Object.assign({titleMinWidth:177,zIndexPopup:a+30},(0,v.w)(e)),(0,p.wZ)({contentRadius:l,limitVerticalRadius:!0})),{innerPadding:i?0:12,titleMarginBottom:i?0:c,titlePadding:i?"".concat(f/2,"px ").concat(o,"px ").concat(f/2-t,"px"):0,titleBorderBottom:i?"".concat(t,"px ").concat(s," ").concat(u):"none",innerContentPadding:i?"".concat(d,"px ").concat(o,"px"):0})},{resetStyle:!1,deprecatedTokens:[["width","titleMinWidth"],["minWidth","titleMinWidth"]]}),w=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let S=(e,t,n)=>t||n?r.createElement(r.Fragment,null,t&&r.createElement("div",{className:"".concat(e,"-title")},a(t)),r.createElement("div",{className:"".concat(e,"-inner-content")},a(n))):null,k=e=>{let{hashId:t,prefixCls:n,className:o,style:a,placement:l="top",title:c,content:s,children:d}=e;return r.createElement("div",{className:i()(t,n,"".concat(n,"-pure"),"".concat(n,"-placement-").concat(l),o),style:a},r.createElement("div",{className:"".concat(n,"-arrow")}),r.createElement(u.G,Object.assign({},e,{className:t,prefixCls:n}),d||S(n,c,s)))};var E=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let C=e=>{let{title:t,content:n,prefixCls:o}=e;return r.createElement(r.Fragment,null,t&&r.createElement("div",{className:"".concat(o,"-title")},a(t)),r.createElement("div",{className:"".concat(o,"-inner-content")},a(n)))},O=r.forwardRef((e,t)=>{let{prefixCls:n,title:o,content:a,overlayClassName:u,placement:d="top",trigger:f="hover",mouseEnterDelay:p=.1,mouseLeaveDelay:h=.1,overlayStyle:m={}}=e,g=E(e,["prefixCls","title","content","overlayClassName","placement","trigger","mouseEnterDelay","mouseLeaveDelay","overlayStyle"]),{getPrefixCls:v}=r.useContext(c.E_),y=v("popover",n),[b,w,S]=x(y),k=v(),O=i()(u,w,S);return b(r.createElement(s.Z,Object.assign({placement:d,trigger:f,mouseEnterDelay:p,mouseLeaveDelay:h,overlayStyle:m},g,{prefixCls:y,overlayClassName:O,ref:t,overlay:o||a?r.createElement(C,{prefixCls:y,title:o,content:a}):null,transitionName:(0,l.m)(k,"zoom-big",g.transitionName),"data-popover-inject":!0})))});O._InternalPanelDoNotUseOrYouWillBeFired=e=>{let{prefixCls:t,className:n}=e,o=w(e,["prefixCls","className"]),{getPrefixCls:a}=r.useContext(c.E_),l=a("popover",t),[s,u,d]=x(l);return s(r.createElement(k,Object.assign({},o,{prefixCls:l,hashId:u,className:i()(n,d)})))};var j=O},29967:function(e,t,n){"use strict";n.d(t,{ZP:function(){return _}});var r=n(2265),o=n(36760),i=n.n(o),a=n(50506),l=n(18242),c=n(71744),s=n(33759);let u=r.createContext(null),d=u.Provider,f=r.createContext(null),p=f.Provider;var h=n(20873),m=n(28791),g=n(6694),v=n(34709),y=n(86586),b=n(39109),x=n(352),w=n(12918),S=n(80669),k=n(3104);let E=e=>{let{componentCls:t,antCls:n}=e,r="".concat(t,"-group");return{[r]:Object.assign(Object.assign({},(0,w.Wf)(e)),{display:"inline-block",fontSize:0,["&".concat(r,"-rtl")]:{direction:"rtl"},["".concat(n,"-badge ").concat(n,"-badge-count")]:{zIndex:1},["> ".concat(n,"-badge:not(:first-child) > ").concat(n,"-button-wrapper")]:{borderInlineStart:"none"}})}},C=e=>{let{componentCls:t,wrapperMarginInlineEnd:n,colorPrimary:r,radioSize:o,motionDurationSlow:i,motionDurationMid:a,motionEaseInOutCirc:l,colorBgContainer:c,colorBorder:s,lineWidth:u,colorBgContainerDisabled:d,colorTextDisabled:f,paddingXS:p,dotColorDisabled:h,lineType:m,radioColor:g,radioBgColor:v,calc:y}=e,b="".concat(t,"-inner"),S=y(o).sub(y(4).mul(2)),k=y(1).mul(o).equal();return{["".concat(t,"-wrapper")]:Object.assign(Object.assign({},(0,w.Wf)(e)),{display:"inline-flex",alignItems:"baseline",marginInlineStart:0,marginInlineEnd:n,cursor:"pointer",["&".concat(t,"-wrapper-rtl")]:{direction:"rtl"},"&-disabled":{cursor:"not-allowed",color:e.colorTextDisabled},"&::after":{display:"inline-block",width:0,overflow:"hidden",content:'"\\a0"'},["".concat(t,"-checked::after")]:{position:"absolute",insetBlockStart:0,insetInlineStart:0,width:"100%",height:"100%",border:"".concat((0,x.bf)(u)," ").concat(m," ").concat(r),borderRadius:"50%",visibility:"hidden",content:'""'},[t]:Object.assign(Object.assign({},(0,w.Wf)(e)),{position:"relative",display:"inline-block",outline:"none",cursor:"pointer",alignSelf:"center",borderRadius:"50%"}),["".concat(t,"-wrapper:hover &,\n &:hover ").concat(b)]:{borderColor:r},["".concat(t,"-input:focus-visible + ").concat(b)]:Object.assign({},(0,w.oN)(e)),["".concat(t,":hover::after, ").concat(t,"-wrapper:hover &::after")]:{visibility:"visible"},["".concat(t,"-inner")]:{"&::after":{boxSizing:"border-box",position:"absolute",insetBlockStart:"50%",insetInlineStart:"50%",display:"block",width:k,height:k,marginBlockStart:y(1).mul(o).div(-2).equal(),marginInlineStart:y(1).mul(o).div(-2).equal(),backgroundColor:g,borderBlockStart:0,borderInlineStart:0,borderRadius:k,transform:"scale(0)",opacity:0,transition:"all ".concat(i," ").concat(l),content:'""'},boxSizing:"border-box",position:"relative",insetBlockStart:0,insetInlineStart:0,display:"block",width:k,height:k,backgroundColor:c,borderColor:s,borderStyle:"solid",borderWidth:u,borderRadius:"50%",transition:"all ".concat(a)},["".concat(t,"-input")]:{position:"absolute",inset:0,zIndex:1,cursor:"pointer",opacity:0},["".concat(t,"-checked")]:{[b]:{borderColor:r,backgroundColor:v,"&::after":{transform:"scale(".concat(e.calc(e.dotSize).div(o).equal(),")"),opacity:1,transition:"all ".concat(i," ").concat(l)}}},["".concat(t,"-disabled")]:{cursor:"not-allowed",[b]:{backgroundColor:d,borderColor:s,cursor:"not-allowed","&::after":{backgroundColor:h}},["".concat(t,"-input")]:{cursor:"not-allowed"},["".concat(t,"-disabled + span")]:{color:f,cursor:"not-allowed"},["&".concat(t,"-checked")]:{[b]:{"&::after":{transform:"scale(".concat(y(S).div(o).equal({unit:!1}),")")}}}},["span".concat(t," + *")]:{paddingInlineStart:p,paddingInlineEnd:p}})}},O=e=>{let{buttonColor:t,controlHeight:n,componentCls:r,lineWidth:o,lineType:i,colorBorder:a,motionDurationSlow:l,motionDurationMid:c,buttonPaddingInline:s,fontSize:u,buttonBg:d,fontSizeLG:f,controlHeightLG:p,controlHeightSM:h,paddingXS:m,borderRadius:g,borderRadiusSM:v,borderRadiusLG:y,buttonCheckedBg:b,buttonSolidCheckedColor:S,colorTextDisabled:k,colorBgContainerDisabled:E,buttonCheckedBgDisabled:C,buttonCheckedColorDisabled:O,colorPrimary:j,colorPrimaryHover:P,colorPrimaryActive:N,buttonSolidCheckedBg:M,buttonSolidCheckedHoverBg:I,buttonSolidCheckedActiveBg:R,calc:T}=e;return{["".concat(r,"-button-wrapper")]:{position:"relative",display:"inline-block",height:n,margin:0,paddingInline:s,paddingBlock:0,color:t,fontSize:u,lineHeight:(0,x.bf)(T(n).sub(T(o).mul(2)).equal()),background:d,border:"".concat((0,x.bf)(o)," ").concat(i," ").concat(a),borderBlockStartWidth:T(o).add(.02).equal(),borderInlineStartWidth:0,borderInlineEndWidth:o,cursor:"pointer",transition:["color ".concat(c),"background ".concat(c),"box-shadow ".concat(c)].join(","),a:{color:t},["> ".concat(r,"-button")]:{position:"absolute",insetBlockStart:0,insetInlineStart:0,zIndex:-1,width:"100%",height:"100%"},"&:not(:first-child)":{"&::before":{position:"absolute",insetBlockStart:T(o).mul(-1).equal(),insetInlineStart:T(o).mul(-1).equal(),display:"block",boxSizing:"content-box",width:1,height:"100%",paddingBlock:o,paddingInline:0,backgroundColor:a,transition:"background-color ".concat(l),content:'""'}},"&:first-child":{borderInlineStart:"".concat((0,x.bf)(o)," ").concat(i," ").concat(a),borderStartStartRadius:g,borderEndStartRadius:g},"&:last-child":{borderStartEndRadius:g,borderEndEndRadius:g},"&:first-child:last-child":{borderRadius:g},["".concat(r,"-group-large &")]:{height:p,fontSize:f,lineHeight:(0,x.bf)(T(p).sub(T(o).mul(2)).equal()),"&:first-child":{borderStartStartRadius:y,borderEndStartRadius:y},"&:last-child":{borderStartEndRadius:y,borderEndEndRadius:y}},["".concat(r,"-group-small &")]:{height:h,paddingInline:T(m).sub(o).equal(),paddingBlock:0,lineHeight:(0,x.bf)(T(h).sub(T(o).mul(2)).equal()),"&:first-child":{borderStartStartRadius:v,borderEndStartRadius:v},"&:last-child":{borderStartEndRadius:v,borderEndEndRadius:v}},"&:hover":{position:"relative",color:j},"&:has(:focus-visible)":Object.assign({},(0,w.oN)(e)),["".concat(r,"-inner, input[type='checkbox'], input[type='radio']")]:{width:0,height:0,opacity:0,pointerEvents:"none"},["&-checked:not(".concat(r,"-button-wrapper-disabled)")]:{zIndex:1,color:j,background:b,borderColor:j,"&::before":{backgroundColor:j},"&:first-child":{borderColor:j},"&:hover":{color:P,borderColor:P,"&::before":{backgroundColor:P}},"&:active":{color:N,borderColor:N,"&::before":{backgroundColor:N}}},["".concat(r,"-group-solid &-checked:not(").concat(r,"-button-wrapper-disabled)")]:{color:S,background:M,borderColor:M,"&:hover":{color:S,background:I,borderColor:I},"&:active":{color:S,background:R,borderColor:R}},"&-disabled":{color:k,backgroundColor:E,borderColor:a,cursor:"not-allowed","&:first-child, &:hover":{color:k,backgroundColor:E,borderColor:a}},["&-disabled".concat(r,"-button-wrapper-checked")]:{color:O,backgroundColor:C,borderColor:a,boxShadow:"none"}}}};var j=(0,S.I$)("Radio",e=>{let{controlOutline:t,controlOutlineWidth:n}=e,r="0 0 0 ".concat((0,x.bf)(n)," ").concat(t),o=(0,k.TS)(e,{radioFocusShadow:r,radioButtonFocusShadow:r});return[E(o),C(o),O(o)]},e=>{let{wireframe:t,padding:n,marginXS:r,lineWidth:o,fontSizeLG:i,colorText:a,colorBgContainer:l,colorTextDisabled:c,controlItemBgActiveDisabled:s,colorTextLightSolid:u,colorPrimary:d,colorPrimaryHover:f,colorPrimaryActive:p,colorWhite:h}=e;return{radioSize:i,dotSize:t?i-8:i-(4+o)*2,dotColorDisabled:c,buttonSolidCheckedColor:u,buttonSolidCheckedBg:d,buttonSolidCheckedHoverBg:f,buttonSolidCheckedActiveBg:p,buttonBg:l,buttonCheckedBg:l,buttonColor:a,buttonCheckedBgDisabled:s,buttonCheckedColorDisabled:c,buttonPaddingInline:n-o,wrapperMarginInlineEnd:r,radioColor:t?d:h,radioBgColor:t?l:d}},{unitless:{radioSize:!0,dotSize:!0}}),P=n(64024),N=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let M=r.forwardRef((e,t)=>{var n,o;let a=r.useContext(u),l=r.useContext(f),{getPrefixCls:s,direction:d,radio:p}=r.useContext(c.E_),x=r.useRef(null),w=(0,m.sQ)(t,x),{isFormItemInput:S}=r.useContext(b.aM),{prefixCls:k,className:E,rootClassName:C,children:O,style:M,title:I}=e,R=N(e,["prefixCls","className","rootClassName","children","style","title"]),T=s("radio",k),A="button"===((null==a?void 0:a.optionType)||l),_=A?"".concat(T,"-button"):T,D=(0,P.Z)(T),[Z,L,z]=j(T,D),B=Object.assign({},R),F=r.useContext(y.Z);a&&(B.name=a.name,B.onChange=t=>{var n,r;null===(n=e.onChange)||void 0===n||n.call(e,t),null===(r=null==a?void 0:a.onChange)||void 0===r||r.call(a,t)},B.checked=e.value===a.value,B.disabled=null!==(n=B.disabled)&&void 0!==n?n:a.disabled),B.disabled=null!==(o=B.disabled)&&void 0!==o?o:F;let H=i()("".concat(_,"-wrapper"),{["".concat(_,"-wrapper-checked")]:B.checked,["".concat(_,"-wrapper-disabled")]:B.disabled,["".concat(_,"-wrapper-rtl")]:"rtl"===d,["".concat(_,"-wrapper-in-form-item")]:S},null==p?void 0:p.className,E,C,L,z,D);return Z(r.createElement(g.Z,{component:"Radio",disabled:B.disabled},r.createElement("label",{className:H,style:Object.assign(Object.assign({},null==p?void 0:p.style),M),onMouseEnter:e.onMouseEnter,onMouseLeave:e.onMouseLeave,title:I},r.createElement(h.Z,Object.assign({},B,{className:i()(B.className,!A&&v.A),type:"radio",prefixCls:_,ref:w})),void 0!==O?r.createElement("span",null,O):null)))}),I=r.forwardRef((e,t)=>{let{getPrefixCls:n,direction:o}=r.useContext(c.E_),[u,f]=(0,a.Z)(e.defaultValue,{value:e.value}),{prefixCls:p,className:h,rootClassName:m,options:g,buttonStyle:v="outline",disabled:y,children:b,size:x,style:w,id:S,onMouseEnter:k,onMouseLeave:E,onFocus:C,onBlur:O}=e,N=n("radio",p),I="".concat(N,"-group"),R=(0,P.Z)(N),[T,A,_]=j(N,R),D=b;g&&g.length>0&&(D=g.map(e=>"string"==typeof e||"number"==typeof e?r.createElement(M,{key:e.toString(),prefixCls:N,disabled:y,value:e,checked:u===e},e):r.createElement(M,{key:"radio-group-value-options-".concat(e.value),prefixCls:N,disabled:e.disabled||y,value:e.value,checked:u===e.value,title:e.title,style:e.style,id:e.id,required:e.required},e.label)));let Z=(0,s.Z)(x),L=i()(I,"".concat(I,"-").concat(v),{["".concat(I,"-").concat(Z)]:Z,["".concat(I,"-rtl")]:"rtl"===o},h,m,A,_,R);return T(r.createElement("div",Object.assign({},(0,l.Z)(e,{aria:!0,data:!0}),{className:L,style:w,onMouseEnter:k,onMouseLeave:E,onFocus:C,onBlur:O,id:S,ref:t}),r.createElement(d,{value:{onChange:t=>{let n=t.target.value;"value"in e||f(n);let{onChange:r}=e;r&&n!==u&&r(t)},value:u,disabled:e.disabled,name:e.name,optionType:e.optionType}},D)))});var R=r.memo(I),T=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n},A=r.forwardRef((e,t)=>{let{getPrefixCls:n}=r.useContext(c.E_),{prefixCls:o}=e,i=T(e,["prefixCls"]),a=n("radio",o);return r.createElement(p,{value:"button"},r.createElement(M,Object.assign({prefixCls:a},i,{type:"radio",ref:t})))});M.Button=A,M.Group=R,M.__ANT_RADIO=!0;var _=M},47451:function(e,t,n){"use strict";var r=n(10295);t.Z=r.Z},52787:function(e,t,n){"use strict";n.d(t,{default:function(){return tt}});var r=n(2265),o=n(36760),i=n.n(o),a=n(1119),l=n(83145),c=n(11993),s=n(31686),u=n(26365),d=n(6989),f=n(41154),p=n(50506),h=n(32559),m=n(27380),g=n(79267),v=n(95814),y=n(28791),b=function(e){var t=e.className,n=e.customizeIcon,o=e.customizeIconProps,a=e.children,l=e.onMouseDown,c=e.onClick,s="function"==typeof n?n(o):n;return r.createElement("span",{className:t,onMouseDown:function(e){e.preventDefault(),null==l||l(e)},style:{userSelect:"none",WebkitUserSelect:"none"},unselectable:"on",onClick:c,"aria-hidden":!0},void 0!==s?s:r.createElement("span",{className:i()(t.split(/\s+/).map(function(e){return"".concat(e,"-icon")}))},a))},x=function(e,t,n,o,i){var a=arguments.length>5&&void 0!==arguments[5]&&arguments[5],l=arguments.length>6?arguments[6]:void 0,c=arguments.length>7?arguments[7]:void 0,s=r.useMemo(function(){return"object"===(0,f.Z)(o)?o.clearIcon:i||void 0},[o,i]);return{allowClear:r.useMemo(function(){return!a&&!!o&&(!!n.length||!!l)&&!("combobox"===c&&""===l)},[o,a,n.length,l,c]),clearIcon:r.createElement(b,{className:"".concat(e,"-clear"),onMouseDown:t,customizeIcon:s},"\xd7")}},w=r.createContext(null);function S(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:250,t=r.useRef(null),n=r.useRef(null);return r.useEffect(function(){return function(){window.clearTimeout(n.current)}},[]),[function(){return t.current},function(r){(r||null===t.current)&&(t.current=r),window.clearTimeout(n.current),n.current=window.setTimeout(function(){t.current=null},e)}]}var k=n(18242),E=n(1699),C=r.forwardRef(function(e,t){var n,o=e.prefixCls,a=e.id,l=e.inputElement,c=e.disabled,u=e.tabIndex,d=e.autoFocus,f=e.autoComplete,p=e.editable,m=e.activeDescendantId,g=e.value,v=e.maxLength,b=e.onKeyDown,x=e.onMouseDown,w=e.onChange,S=e.onPaste,k=e.onCompositionStart,E=e.onCompositionEnd,C=e.open,O=e.attrs,j=l||r.createElement("input",null),P=j,N=P.ref,M=P.props,I=M.onKeyDown,R=M.onChange,T=M.onMouseDown,A=M.onCompositionStart,_=M.onCompositionEnd,D=M.style;return(0,h.Kp)(!("maxLength"in j.props),"Passing 'maxLength' to input element directly may not work because input in BaseSelect is controlled."),j=r.cloneElement(j,(0,s.Z)((0,s.Z)((0,s.Z)({type:"search"},M),{},{id:a,ref:(0,y.sQ)(t,N),disabled:c,tabIndex:u,autoComplete:f||"off",autoFocus:d,className:i()("".concat(o,"-selection-search-input"),null===(n=j)||void 0===n||null===(n=n.props)||void 0===n?void 0:n.className),role:"combobox","aria-expanded":C||!1,"aria-haspopup":"listbox","aria-owns":"".concat(a,"_list"),"aria-autocomplete":"list","aria-controls":"".concat(a,"_list"),"aria-activedescendant":C?m:void 0},O),{},{value:p?g:"",maxLength:v,readOnly:!p,unselectable:p?null:"on",style:(0,s.Z)((0,s.Z)({},D),{},{opacity:p?null:0}),onKeyDown:function(e){b(e),I&&I(e)},onMouseDown:function(e){x(e),T&&T(e)},onChange:function(e){w(e),R&&R(e)},onCompositionStart:function(e){k(e),A&&A(e)},onCompositionEnd:function(e){E(e),_&&_(e)},onPaste:S}))});function O(e){return Array.isArray(e)?e:void 0!==e?[e]:[]}var j="undefined"!=typeof window&&window.document&&window.document.documentElement;function P(e){return["string","number"].includes((0,f.Z)(e))}function N(e){var t=void 0;return e&&(P(e.title)?t=e.title.toString():P(e.label)&&(t=e.label.toString())),t}function M(e){var t;return null!==(t=e.key)&&void 0!==t?t:e.value}var I=function(e){e.preventDefault(),e.stopPropagation()},R=function(e){var t,n,o=e.id,a=e.prefixCls,l=e.values,s=e.open,d=e.searchValue,f=e.autoClearSearchValue,p=e.inputRef,h=e.placeholder,m=e.disabled,g=e.mode,v=e.showSearch,y=e.autoFocus,x=e.autoComplete,w=e.activeDescendantId,S=e.tabIndex,O=e.removeIcon,P=e.maxTagCount,R=e.maxTagTextLength,T=e.maxTagPlaceholder,A=void 0===T?function(e){return"+ ".concat(e.length," ...")}:T,_=e.tagRender,D=e.onToggleOpen,Z=e.onRemove,L=e.onInputChange,z=e.onInputPaste,B=e.onInputKeyDown,F=e.onInputMouseDown,H=e.onInputCompositionStart,q=e.onInputCompositionEnd,W=r.useRef(null),K=(0,r.useState)(0),V=(0,u.Z)(K,2),U=V[0],G=V[1],X=(0,r.useState)(!1),$=(0,u.Z)(X,2),Y=$[0],Q=$[1],J="".concat(a,"-selection"),ee=s||"multiple"===g&&!1===f||"tags"===g?d:"",et="tags"===g||"multiple"===g&&!1===f||v&&(s||Y);t=function(){G(W.current.scrollWidth)},n=[ee],j?r.useLayoutEffect(t,n):r.useEffect(t,n);var en=function(e,t,n,o,a){return r.createElement("span",{title:N(e),className:i()("".concat(J,"-item"),(0,c.Z)({},"".concat(J,"-item-disabled"),n))},r.createElement("span",{className:"".concat(J,"-item-content")},t),o&&r.createElement(b,{className:"".concat(J,"-item-remove"),onMouseDown:I,onClick:a,customizeIcon:O},"\xd7"))},er=r.createElement("div",{className:"".concat(J,"-search"),style:{width:U},onFocus:function(){Q(!0)},onBlur:function(){Q(!1)}},r.createElement(C,{ref:p,open:s,prefixCls:a,id:o,inputElement:null,disabled:m,autoFocus:y,autoComplete:x,editable:et,activeDescendantId:w,value:ee,onKeyDown:B,onMouseDown:F,onChange:L,onPaste:z,onCompositionStart:H,onCompositionEnd:q,tabIndex:S,attrs:(0,k.Z)(e,!0)}),r.createElement("span",{ref:W,className:"".concat(J,"-search-mirror"),"aria-hidden":!0},ee,"\xa0")),eo=r.createElement(E.Z,{prefixCls:"".concat(J,"-overflow"),data:l,renderItem:function(e){var t,n=e.disabled,o=e.label,i=e.value,a=!m&&!n,l=o;if("number"==typeof R&&("string"==typeof o||"number"==typeof o)){var c=String(l);c.length>R&&(l="".concat(c.slice(0,R),"..."))}var u=function(t){t&&t.stopPropagation(),Z(e)};return"function"==typeof _?(t=l,r.createElement("span",{onMouseDown:function(e){I(e),D(!s)}},_({label:t,value:i,disabled:n,closable:a,onClose:u}))):en(e,l,n,a,u)},renderRest:function(e){var t="function"==typeof A?A(e):A;return en({title:t},t,!1)},suffix:er,itemKey:M,maxCount:P});return r.createElement(r.Fragment,null,eo,!l.length&&!ee&&r.createElement("span",{className:"".concat(J,"-placeholder")},h))},T=function(e){var t=e.inputElement,n=e.prefixCls,o=e.id,i=e.inputRef,a=e.disabled,l=e.autoFocus,c=e.autoComplete,s=e.activeDescendantId,d=e.mode,f=e.open,p=e.values,h=e.placeholder,m=e.tabIndex,g=e.showSearch,v=e.searchValue,y=e.activeValue,b=e.maxLength,x=e.onInputKeyDown,w=e.onInputMouseDown,S=e.onInputChange,E=e.onInputPaste,O=e.onInputCompositionStart,j=e.onInputCompositionEnd,P=e.title,M=r.useState(!1),I=(0,u.Z)(M,2),R=I[0],T=I[1],A="combobox"===d,_=A||g,D=p[0],Z=v||"";A&&y&&!R&&(Z=y),r.useEffect(function(){A&&T(!1)},[A,y]);var L=("combobox"===d||!!f||!!g)&&!!Z,z=void 0===P?N(D):P,B=r.useMemo(function(){return D?null:r.createElement("span",{className:"".concat(n,"-selection-placeholder"),style:L?{visibility:"hidden"}:void 0},h)},[D,L,h,n]);return r.createElement(r.Fragment,null,r.createElement("span",{className:"".concat(n,"-selection-search")},r.createElement(C,{ref:i,prefixCls:n,id:o,open:f,inputElement:t,disabled:a,autoFocus:l,autoComplete:c,editable:_,activeDescendantId:s,value:Z,onKeyDown:x,onMouseDown:w,onChange:function(e){T(!0),S(e)},onPaste:E,onCompositionStart:O,onCompositionEnd:j,tabIndex:m,attrs:(0,k.Z)(e,!0),maxLength:A?b:void 0})),!A&&D?r.createElement("span",{className:"".concat(n,"-selection-item"),title:z,style:L?{visibility:"hidden"}:void 0},D.label):null,B)},A=r.forwardRef(function(e,t){var n=(0,r.useRef)(null),o=(0,r.useRef)(!1),i=e.prefixCls,l=e.open,c=e.mode,s=e.showSearch,d=e.tokenWithEnter,f=e.autoClearSearchValue,p=e.onSearch,h=e.onSearchSubmit,m=e.onToggleOpen,g=e.onInputKeyDown,y=e.domRef;r.useImperativeHandle(t,function(){return{focus:function(){n.current.focus()},blur:function(){n.current.blur()}}});var b=S(0),x=(0,u.Z)(b,2),w=x[0],k=x[1],E=(0,r.useRef)(null),C=function(e){!1!==p(e,!0,o.current)&&m(!0)},O={inputRef:n,onInputKeyDown:function(e){var t=e.which;(t===v.Z.UP||t===v.Z.DOWN)&&e.preventDefault(),g&&g(e),t!==v.Z.ENTER||"tags"!==c||o.current||l||null==h||h(e.target.value),[v.Z.ESC,v.Z.SHIFT,v.Z.BACKSPACE,v.Z.TAB,v.Z.WIN_KEY,v.Z.ALT,v.Z.META,v.Z.WIN_KEY_RIGHT,v.Z.CTRL,v.Z.SEMICOLON,v.Z.EQUALS,v.Z.CAPS_LOCK,v.Z.CONTEXT_MENU,v.Z.F1,v.Z.F2,v.Z.F3,v.Z.F4,v.Z.F5,v.Z.F6,v.Z.F7,v.Z.F8,v.Z.F9,v.Z.F10,v.Z.F11,v.Z.F12].includes(t)||m(!0)},onInputMouseDown:function(){k(!0)},onInputChange:function(e){var t=e.target.value;if(d&&E.current&&/[\r\n]/.test(E.current)){var n=E.current.replace(/[\r\n]+$/,"").replace(/\r\n/g," ").replace(/[\r\n]/g," ");t=t.replace(n,E.current)}E.current=null,C(t)},onInputPaste:function(e){var t=e.clipboardData,n=null==t?void 0:t.getData("text");E.current=n||""},onInputCompositionStart:function(){o.current=!0},onInputCompositionEnd:function(e){o.current=!1,"combobox"!==c&&C(e.target.value)}},j="multiple"===c||"tags"===c?r.createElement(R,(0,a.Z)({},e,O)):r.createElement(T,(0,a.Z)({},e,O));return r.createElement("div",{ref:y,className:"".concat(i,"-selector"),onClick:function(e){e.target!==n.current&&(void 0!==document.body.style.msTouchAction?setTimeout(function(){n.current.focus()}):n.current.focus())},onMouseDown:function(e){var t=w();e.target===n.current||t||"combobox"===c||e.preventDefault(),("combobox"===c||s&&t)&&l||(l&&!1!==f&&p("",!0,!1),m())}},j)}),_=n(97821),D=["prefixCls","disabled","visible","children","popupElement","animation","transitionName","dropdownStyle","dropdownClassName","direction","placement","builtinPlacements","dropdownMatchSelectWidth","dropdownRender","dropdownAlign","getPopupContainer","empty","getTriggerDOMNode","onPopupVisibleChange","onPopupMouseEnter"],Z=function(e){var t=!0===e?0:1;return{bottomLeft:{points:["tl","bl"],offset:[0,4],overflow:{adjustX:t,adjustY:1},htmlRegion:"scroll"},bottomRight:{points:["tr","br"],offset:[0,4],overflow:{adjustX:t,adjustY:1},htmlRegion:"scroll"},topLeft:{points:["bl","tl"],offset:[0,-4],overflow:{adjustX:t,adjustY:1},htmlRegion:"scroll"},topRight:{points:["br","tr"],offset:[0,-4],overflow:{adjustX:t,adjustY:1},htmlRegion:"scroll"}}},L=r.forwardRef(function(e,t){var n=e.prefixCls,o=(e.disabled,e.visible),l=e.children,u=e.popupElement,f=e.animation,p=e.transitionName,h=e.dropdownStyle,m=e.dropdownClassName,g=e.direction,v=e.placement,y=e.builtinPlacements,b=e.dropdownMatchSelectWidth,x=e.dropdownRender,w=e.dropdownAlign,S=e.getPopupContainer,k=e.empty,E=e.getTriggerDOMNode,C=e.onPopupVisibleChange,O=e.onPopupMouseEnter,j=(0,d.Z)(e,D),P="".concat(n,"-dropdown"),N=u;x&&(N=x(u));var M=r.useMemo(function(){return y||Z(b)},[y,b]),I=f?"".concat(P,"-").concat(f):p,R="number"==typeof b,T=r.useMemo(function(){return R?null:!1===b?"minWidth":"width"},[b,R]),A=h;R&&(A=(0,s.Z)((0,s.Z)({},A),{},{width:b}));var L=r.useRef(null);return r.useImperativeHandle(t,function(){return{getPopupElement:function(){return L.current}}}),r.createElement(_.Z,(0,a.Z)({},j,{showAction:C?["click"]:[],hideAction:C?["click"]:[],popupPlacement:v||("rtl"===(void 0===g?"ltr":g)?"bottomRight":"bottomLeft"),builtinPlacements:M,prefixCls:P,popupTransitionName:I,popup:r.createElement("div",{ref:L,onMouseEnter:O},N),stretch:T,popupAlign:w,popupVisible:o,getPopupContainer:S,popupClassName:i()(m,(0,c.Z)({},"".concat(P,"-empty"),k)),popupStyle:A,getTriggerDOMNode:E,onPopupVisibleChange:C}),l)}),z=n(87099);function B(e,t){var n,r=e.key;return("value"in e&&(n=e.value),null!=r)?r:void 0!==n?n:"rc-index-key-".concat(t)}function F(e,t){var n=e||{},r=n.label,o=n.value,i=n.options,a=n.groupLabel,l=r||(t?"children":"label");return{label:l,value:o||"value",options:i||"options",groupLabel:a||l}}function H(e){var t=(0,s.Z)({},e);return"props"in t||Object.defineProperty(t,"props",{get:function(){return(0,h.ZP)(!1,"Return type is option instead of Option instance. Please read value directly instead of reading from `props`."),t}}),t}var q=function(e,t,n){if(!t||!t.length)return null;var r=!1,o=function e(t,n){var o=(0,z.Z)(n),i=o[0],a=o.slice(1);if(!i)return[t];var c=t.split(i);return r=r||c.length>1,c.reduce(function(t,n){return[].concat((0,l.Z)(t),(0,l.Z)(e(n,a)))},[]).filter(Boolean)}(e,t);return r?void 0!==n?o.slice(0,n):o:null},W=r.createContext(null),K=["id","prefixCls","className","showSearch","tagRender","direction","omitDomProps","displayValues","onDisplayValuesChange","emptyOptions","notFoundContent","onClear","mode","disabled","loading","getInputElement","getRawInputElement","open","defaultOpen","onDropdownVisibleChange","activeValue","onActiveValueChange","activeDescendantId","searchValue","autoClearSearchValue","onSearch","onSearchSplit","tokenSeparators","allowClear","suffixIcon","clearIcon","OptionList","animation","transitionName","dropdownStyle","dropdownClassName","dropdownMatchSelectWidth","dropdownRender","dropdownAlign","placement","builtinPlacements","getPopupContainer","showAction","onFocus","onBlur","onKeyUp","onKeyDown","onMouseDown"],V=["value","onChange","removeIcon","placeholder","autoFocus","maxTagCount","maxTagTextLength","maxTagPlaceholder","choiceTransitionName","onInputKeyDown","onPopupScroll","tabIndex"],U=function(e){return"tags"===e||"multiple"===e},G=r.forwardRef(function(e,t){var n,o,h,k,E,C,O,j,P=e.id,N=e.prefixCls,M=e.className,I=e.showSearch,R=e.tagRender,T=e.direction,_=e.omitDomProps,D=e.displayValues,Z=e.onDisplayValuesChange,z=e.emptyOptions,B=e.notFoundContent,F=void 0===B?"Not Found":B,H=e.onClear,G=e.mode,X=e.disabled,$=e.loading,Y=e.getInputElement,Q=e.getRawInputElement,J=e.open,ee=e.defaultOpen,et=e.onDropdownVisibleChange,en=e.activeValue,er=e.onActiveValueChange,eo=e.activeDescendantId,ei=e.searchValue,ea=e.autoClearSearchValue,el=e.onSearch,ec=e.onSearchSplit,es=e.tokenSeparators,eu=e.allowClear,ed=e.suffixIcon,ef=e.clearIcon,ep=e.OptionList,eh=e.animation,em=e.transitionName,eg=e.dropdownStyle,ev=e.dropdownClassName,ey=e.dropdownMatchSelectWidth,eb=e.dropdownRender,ex=e.dropdownAlign,ew=e.placement,eS=e.builtinPlacements,ek=e.getPopupContainer,eE=e.showAction,eC=void 0===eE?[]:eE,eO=e.onFocus,ej=e.onBlur,eP=e.onKeyUp,eN=e.onKeyDown,eM=e.onMouseDown,eI=(0,d.Z)(e,K),eR=U(G),eT=(void 0!==I?I:eR)||"combobox"===G,eA=(0,s.Z)({},eI);V.forEach(function(e){delete eA[e]}),null==_||_.forEach(function(e){delete eA[e]});var e_=r.useState(!1),eD=(0,u.Z)(e_,2),eZ=eD[0],eL=eD[1];r.useEffect(function(){eL((0,g.Z)())},[]);var ez=r.useRef(null),eB=r.useRef(null),eF=r.useRef(null),eH=r.useRef(null),eq=r.useRef(null),eW=r.useRef(!1),eK=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:10,t=r.useState(!1),n=(0,u.Z)(t,2),o=n[0],i=n[1],a=r.useRef(null),l=function(){window.clearTimeout(a.current)};return r.useEffect(function(){return l},[]),[o,function(t,n){l(),a.current=window.setTimeout(function(){i(t),n&&n()},e)},l]}(),eV=(0,u.Z)(eK,3),eU=eV[0],eG=eV[1],eX=eV[2];r.useImperativeHandle(t,function(){var e,t;return{focus:null===(e=eH.current)||void 0===e?void 0:e.focus,blur:null===(t=eH.current)||void 0===t?void 0:t.blur,scrollTo:function(e){var t;return null===(t=eq.current)||void 0===t?void 0:t.scrollTo(e)}}});var e$=r.useMemo(function(){if("combobox"!==G)return ei;var e,t=null===(e=D[0])||void 0===e?void 0:e.value;return"string"==typeof t||"number"==typeof t?String(t):""},[ei,G,D]),eY="combobox"===G&&"function"==typeof Y&&Y()||null,eQ="function"==typeof Q&&Q(),eJ=(0,y.x1)(eB,null==eQ||null===(k=eQ.props)||void 0===k?void 0:k.ref),e0=r.useState(!1),e1=(0,u.Z)(e0,2),e2=e1[0],e6=e1[1];(0,m.Z)(function(){e6(!0)},[]);var e4=(0,p.Z)(!1,{defaultValue:ee,value:J}),e3=(0,u.Z)(e4,2),e5=e3[0],e8=e3[1],e7=!!e2&&e5,e9=!F&&z;(X||e9&&e7&&"combobox"===G)&&(e7=!1);var te=!e9&&e7,tt=r.useCallback(function(e){var t=void 0!==e?e:!e7;X||(e8(t),e7!==t&&(null==et||et(t)))},[X,e7,e8,et]),tn=r.useMemo(function(){return(es||[]).some(function(e){return["\n","\r\n"].includes(e)})},[es]),tr=r.useContext(W)||{},to=tr.maxCount,ti=tr.rawValues,ta=function(e,t,n){if(!((null==ti?void 0:ti.size)>=to)){var r=!0,o=e;null==er||er(null);var i=q(e,es,to&&to-ti.size),a=n?null:i;return"combobox"!==G&&a&&(o="",null==ec||ec(a),tt(!1),r=!1),el&&e$!==o&&el(o,{source:t?"typing":"effect"}),r}};r.useEffect(function(){e7||eR||"combobox"===G||ta("",!1,!1)},[e7]),r.useEffect(function(){e5&&X&&e8(!1),X&&!eW.current&&eG(!1)},[X]);var tl=S(),tc=(0,u.Z)(tl,2),ts=tc[0],tu=tc[1],td=r.useRef(!1),tf=[];r.useEffect(function(){return function(){tf.forEach(function(e){return clearTimeout(e)}),tf.splice(0,tf.length)}},[]);var tp=r.useState({}),th=(0,u.Z)(tp,2)[1];eQ&&(C=function(e){tt(e)}),n=function(){var e;return[ez.current,null===(e=eF.current)||void 0===e?void 0:e.getPopupElement()]},o=!!eQ,(h=r.useRef(null)).current={open:te,triggerOpen:tt,customizedTrigger:o},r.useEffect(function(){function e(e){if(null===(t=h.current)||void 0===t||!t.customizedTrigger){var t,r=e.target;r.shadowRoot&&e.composed&&(r=e.composedPath()[0]||r),h.current.open&&n().filter(function(e){return e}).every(function(e){return!e.contains(r)&&e!==r})&&h.current.triggerOpen(!1)}}return window.addEventListener("mousedown",e),function(){return window.removeEventListener("mousedown",e)}},[]);var tm=r.useMemo(function(){return(0,s.Z)((0,s.Z)({},e),{},{notFoundContent:F,open:e7,triggerOpen:te,id:P,showSearch:eT,multiple:eR,toggleOpen:tt})},[e,F,te,e7,P,eT,eR,tt]),tg=!!ed||$;tg&&(O=r.createElement(b,{className:i()("".concat(N,"-arrow"),(0,c.Z)({},"".concat(N,"-arrow-loading"),$)),customizeIcon:ed,customizeIconProps:{loading:$,searchValue:e$,open:e7,focused:eU,showSearch:eT}}));var tv=x(N,function(){var e;null==H||H(),null===(e=eH.current)||void 0===e||e.focus(),Z([],{type:"clear",values:D}),ta("",!1,!1)},D,eu,ef,X,e$,G),ty=tv.allowClear,tb=tv.clearIcon,tx=r.createElement(ep,{ref:eq}),tw=i()(N,M,(E={},(0,c.Z)(E,"".concat(N,"-focused"),eU),(0,c.Z)(E,"".concat(N,"-multiple"),eR),(0,c.Z)(E,"".concat(N,"-single"),!eR),(0,c.Z)(E,"".concat(N,"-allow-clear"),eu),(0,c.Z)(E,"".concat(N,"-show-arrow"),tg),(0,c.Z)(E,"".concat(N,"-disabled"),X),(0,c.Z)(E,"".concat(N,"-loading"),$),(0,c.Z)(E,"".concat(N,"-open"),e7),(0,c.Z)(E,"".concat(N,"-customize-input"),eY),(0,c.Z)(E,"".concat(N,"-show-search"),eT),E)),tS=r.createElement(L,{ref:eF,disabled:X,prefixCls:N,visible:te,popupElement:tx,animation:eh,transitionName:em,dropdownStyle:eg,dropdownClassName:ev,direction:T,dropdownMatchSelectWidth:ey,dropdownRender:eb,dropdownAlign:ex,placement:ew,builtinPlacements:eS,getPopupContainer:ek,empty:z,getTriggerDOMNode:function(){return eB.current},onPopupVisibleChange:C,onPopupMouseEnter:function(){th({})}},eQ?r.cloneElement(eQ,{ref:eJ}):r.createElement(A,(0,a.Z)({},e,{domRef:eB,prefixCls:N,inputElement:eY,ref:eH,id:P,showSearch:eT,autoClearSearchValue:ea,mode:G,activeDescendantId:eo,tagRender:R,values:D,open:e7,onToggleOpen:tt,activeValue:en,searchValue:e$,onSearch:ta,onSearchSubmit:function(e){e&&e.trim()&&el(e,{source:"submit"})},onRemove:function(e){Z(D.filter(function(t){return t!==e}),{type:"remove",values:[e]})},tokenWithEnter:tn})));return j=eQ?tS:r.createElement("div",(0,a.Z)({className:tw},eA,{ref:ez,onMouseDown:function(e){var t,n=e.target,r=null===(t=eF.current)||void 0===t?void 0:t.getPopupElement();if(r&&r.contains(n)){var o=setTimeout(function(){var e,t=tf.indexOf(o);-1!==t&&tf.splice(t,1),eX(),eZ||r.contains(document.activeElement)||null===(e=eH.current)||void 0===e||e.focus()});tf.push(o)}for(var i=arguments.length,a=Array(i>1?i-1:0),l=1;l=0;a-=1){var c=o[a];if(!c.disabled){o.splice(a,1),i=c;break}}i&&Z(o,{type:"remove",values:[i]})}for(var s=arguments.length,u=Array(s>1?s-1:0),d=1;d1?n-1:0),o=1;o=E},[p,E,null==M?void 0:M.size]),F=function(e){e.preventDefault()},H=function(e){var t;null===(t=z.current)||void 0===t||t.scrollTo("number"==typeof e?{index:e}:e)},q=function(e){for(var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,n=L.length,r=0;r1&&void 0!==arguments[1]&&arguments[1];G(e);var n={source:t?"keyboard":"mouse"},r=L[e];if(!r){O(null,-1,n);return}O(r.value,e,n)};(0,r.useEffect)(function(){X(!1!==j?q(0):-1)},[L.length,m]);var $=r.useCallback(function(e){return M.has(e)&&"combobox"!==h},[h,(0,l.Z)(M).toString(),M.size]);(0,r.useEffect)(function(){var e,t=setTimeout(function(){if(!p&&f&&1===M.size){var e=Array.from(M)[0],t=L.findIndex(function(t){return t.data.value===e});-1!==t&&(X(t),H(t))}});return f&&(null===(e=z.current)||void 0===e||e.scrollTo(void 0)),function(){return clearTimeout(t)}},[f,m]);var en=function(e){void 0!==e&&P(e,{selected:!M.has(e)}),p||g(!1)};if(r.useImperativeHandle(t,function(){return{onKeyDown:function(e){var t=e.which,n=e.ctrlKey;switch(t){case v.Z.N:case v.Z.P:case v.Z.UP:case v.Z.DOWN:var r=0;if(t===v.Z.UP?r=-1:t===v.Z.DOWN?r=1:/(mac\sos|macintosh)/i.test(navigator.appVersion)&&n&&(t===v.Z.N?r=1:t===v.Z.P&&(r=-1)),0!==r){var o=q(U+r,r);H(o),X(o,!0)}break;case v.Z.ENTER:var i,a=L[U];!a||null!=a&&null!==(i=a.data)&&void 0!==i&&i.disabled||B?en(void 0):en(a.value),f&&e.preventDefault();break;case v.Z.ESC:g(!1),f&&e.stopPropagation()}},onKeyUp:function(){},scrollTo:function(e){H(e)}}}),0===L.length)return r.createElement("div",{role:"listbox",id:"".concat(s,"_list"),className:"".concat(Z,"-empty"),onMouseDown:F},y);var er=Object.keys(I).map(function(e){return I[e]}),eo=function(e){return e.label};function ei(e,t){return{role:e.group?"presentation":"option",id:"".concat(s,"_list_").concat(t)}}var ea=function(e){var t=L[e];if(!t)return null;var n=t.data||{},o=n.value,i=t.group,l=(0,k.Z)(n,!0),c=eo(t);return t?r.createElement("div",(0,a.Z)({"aria-label":"string"!=typeof c||i?null:c},l,{key:e},ei(t,e),{"aria-selected":$(o)}),o):null},el={role:"listbox",id:"".concat(s,"_list")};return r.createElement(r.Fragment,null,R&&r.createElement("div",(0,a.Z)({},el,{style:{height:0,width:0,overflow:"hidden"}}),ea(U-1),ea(U),ea(U+1)),r.createElement(J.Z,{itemKey:"key",ref:z,data:L,height:A,itemHeight:_,fullHeight:!1,onMouseDown:F,onScroll:x,virtual:R,direction:T,innerProps:R?null:el},function(e,t){var n=e.group,o=e.groupOption,l=e.data,s=e.label,u=e.value,f=l.key;if(n){var p,h,m=null!==(h=l.title)&&void 0!==h?h:et(s)?s.toString():void 0;return r.createElement("div",{className:i()(Z,"".concat(Z,"-group")),title:m},void 0!==s?s:f)}var g=l.disabled,v=l.title,y=(l.children,l.style),x=l.className,w=(0,d.Z)(l,ee),S=(0,Q.Z)(w,er),E=$(u),C=g||!E&&B,O="".concat(Z,"-option"),j=i()(Z,O,x,(p={},(0,c.Z)(p,"".concat(O,"-grouped"),o),(0,c.Z)(p,"".concat(O,"-active"),U===t&&!C),(0,c.Z)(p,"".concat(O,"-disabled"),C),(0,c.Z)(p,"".concat(O,"-selected"),E),p)),P=eo(e),M=!N||"function"==typeof N||E,I="number"==typeof P?P:P||u,T=et(I)?I.toString():void 0;return void 0!==v&&(T=v),r.createElement("div",(0,a.Z)({},(0,k.Z)(S),R?{}:ei(e,t),{"aria-selected":E,className:j,title:T,onMouseMove:function(){U===t||C||X(t)},onClick:function(){C||en(u)},style:y}),r.createElement("div",{className:"".concat(O,"-content")},"function"==typeof D?D(e,{index:t}):I),r.isValidElement(N)||E,M&&r.createElement(b,{className:"".concat(Z,"-option-state"),customizeIcon:N,customizeIconProps:{value:u,disabled:C,isSelected:E}},E?"✓":null))}))}),er=function(e,t){var n=r.useRef({values:new Map,options:new Map});return[r.useMemo(function(){var r=n.current,o=r.values,i=r.options,a=e.map(function(e){if(void 0===e.label){var t;return(0,s.Z)((0,s.Z)({},e),{},{label:null===(t=o.get(e.value))||void 0===t?void 0:t.label})}return e}),l=new Map,c=new Map;return a.forEach(function(e){l.set(e.value,e),c.set(e.value,t.get(e.value)||i.get(e.value))}),n.current.values=l,n.current.options=c,a},[e,t]),r.useCallback(function(e){return t.get(e)||n.current.options.get(e)},[t])]};function eo(e,t){return O(e).join("").toUpperCase().includes(t)}var ei=n(94981),ea=0,el=(0,ei.Z)(),ec=n(45287),es=["children","value"],eu=["children"];function ed(e){var t=r.useRef();return t.current=e,r.useCallback(function(){return t.current.apply(t,arguments)},[])}var ef=["id","mode","prefixCls","backfill","fieldNames","inputValue","searchValue","onSearch","autoClearSearchValue","onSelect","onDeselect","dropdownMatchSelectWidth","filterOption","filterSort","optionFilterProp","optionLabelProp","options","optionRender","children","defaultActiveFirstOption","menuItemSelectedIcon","virtual","direction","listHeight","listItemHeight","value","defaultValue","labelInValue","onChange","maxCount"],ep=["inputValue"],eh=r.forwardRef(function(e,t){var n,o,i,h,m,g=e.id,v=e.mode,y=e.prefixCls,b=e.backfill,x=e.fieldNames,w=e.inputValue,S=e.searchValue,k=e.onSearch,E=e.autoClearSearchValue,C=void 0===E||E,j=e.onSelect,P=e.onDeselect,N=e.dropdownMatchSelectWidth,M=void 0===N||N,I=e.filterOption,R=e.filterSort,T=e.optionFilterProp,A=e.optionLabelProp,_=e.options,D=e.optionRender,Z=e.children,L=e.defaultActiveFirstOption,z=e.menuItemSelectedIcon,q=e.virtual,K=e.direction,V=e.listHeight,X=void 0===V?200:V,$=e.listItemHeight,Y=void 0===$?20:$,Q=e.value,J=e.defaultValue,ee=e.labelInValue,et=e.onChange,ei=e.maxCount,eh=(0,d.Z)(e,ef),em=(n=r.useState(),i=(o=(0,u.Z)(n,2))[0],h=o[1],r.useEffect(function(){var e;h("rc_select_".concat((el?(e=ea,ea+=1):e="TEST_OR_SSR",e)))},[]),g||i),eg=U(v),ev=!!(!_&&Z),ey=r.useMemo(function(){return(void 0!==I||"combobox"!==v)&&I},[I,v]),eb=r.useMemo(function(){return F(x,ev)},[JSON.stringify(x),ev]),ex=(0,p.Z)("",{value:void 0!==S?S:w,postState:function(e){return e||""}}),ew=(0,u.Z)(ex,2),eS=ew[0],ek=ew[1],eE=r.useMemo(function(){var e=_;_||(e=function e(t){var n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];return(0,ec.Z)(t).map(function(t,o){if(!r.isValidElement(t)||!t.type)return null;var i,a,l,c,u,f=t.type.isSelectOptGroup,p=t.key,h=t.props,m=h.children,g=(0,d.Z)(h,eu);return n||!f?(i=t.key,l=(a=t.props).children,c=a.value,u=(0,d.Z)(a,es),(0,s.Z)({key:i,value:void 0!==c?c:i,children:l},u)):(0,s.Z)((0,s.Z)({key:"__RC_SELECT_GRP__".concat(null===p?o:p,"__"),label:p},g),{},{options:e(m)})}).filter(function(e){return e})}(Z));var t=new Map,n=new Map,o=function(e,t,n){n&&"string"==typeof n&&e.set(t[n],t)};return function e(r){for(var i=arguments.length>1&&void 0!==arguments[1]&&arguments[1],a=0;a1&&void 0!==arguments[1]?arguments[1]:{},n=t.fieldNames,r=t.childrenAsData,o=[],i=F(n,!1),a=i.label,l=i.value,c=i.options,s=i.groupLabel;return!function e(t,n){Array.isArray(t)&&t.forEach(function(t){if(!n&&c in t){var i=t[s];void 0===i&&r&&(i=t.label),o.push({key:B(t,o.length),group:!0,data:t,label:i}),e(t[c],!0)}else{var u=t[l];o.push({key:B(t,o.length),groupOption:n,data:t,label:t[a],value:u})}})}(e,!1),o}(eH,{fieldNames:eb,childrenAsData:ev})},[eH,eb,ev]),eW=function(e){var t=eP(e);if(eR(t),et&&(t.length!==e_.length||t.some(function(e,t){var n;return(null===(n=e_[t])||void 0===n?void 0:n.value)!==(null==e?void 0:e.value)}))){var n=ee?t:t.map(function(e){return e.value}),r=t.map(function(e){return H(eD(e.value))});et(eg?n:n[0],eg?r:r[0])}},eK=r.useState(null),eV=(0,u.Z)(eK,2),eU=eV[0],eG=eV[1],eX=r.useState(0),e$=(0,u.Z)(eX,2),eY=e$[0],eQ=e$[1],eJ=void 0!==L?L:"combobox"!==v,e0=r.useCallback(function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.source;eQ(t),b&&"combobox"===v&&null!==e&&"keyboard"===(void 0===r?"keyboard":r)&&eG(String(e))},[b,v]),e1=function(e,t,n){var r=function(){var t,n=eD(e);return[ee?{label:null==n?void 0:n[eb.label],value:e,key:null!==(t=null==n?void 0:n.key)&&void 0!==t?t:e}:e,H(n)]};if(t&&j){var o=r(),i=(0,u.Z)(o,2);j(i[0],i[1])}else if(!t&&P&&"clear"!==n){var a=r(),l=(0,u.Z)(a,2);P(l[0],l[1])}},e2=ed(function(e,t){var n=!eg||t.selected;eW(n?eg?[].concat((0,l.Z)(e_),[e]):[e]:e_.filter(function(t){return t.value!==e})),e1(e,n),"combobox"===v?eG(""):(!U||C)&&(ek(""),eG(""))}),e6=r.useMemo(function(){var e=!1!==q&&!1!==M;return(0,s.Z)((0,s.Z)({},eE),{},{flattenOptions:eq,onActiveValue:e0,defaultActiveFirstOption:eJ,onSelect:e2,menuItemSelectedIcon:z,rawValues:eL,fieldNames:eb,virtual:e,direction:K,listHeight:X,listItemHeight:Y,childrenAsData:ev,maxCount:ei,optionRender:D})},[ei,eE,eq,e0,eJ,e2,z,eL,eb,q,M,K,X,Y,ev,D]);return r.createElement(W.Provider,{value:e6},r.createElement(G,(0,a.Z)({},eh,{id:em,prefixCls:void 0===y?"rc-select":y,ref:t,omitDomProps:ep,mode:v,displayValues:eZ,onDisplayValuesChange:function(e,t){eW(e);var n=t.type,r=t.values;("remove"===n||"clear"===n)&&r.forEach(function(e){e1(e.value,!1,n)})},direction:K,searchValue:eS,onSearch:function(e,t){if(ek(e),eG(null),"submit"===t.source){var n=(e||"").trim();n&&(eW(Array.from(new Set([].concat((0,l.Z)(eL),[n])))),e1(n,!0),ek(""));return}"blur"!==t.source&&("combobox"===v&&eW(e),null==k||k(e))},autoClearSearchValue:C,onSearchSplit:function(e){var t=e;"tags"!==v&&(t=e.map(function(e){var t=eO.get(e);return null==t?void 0:t.value}).filter(function(e){return void 0!==e}));var n=Array.from(new Set([].concat((0,l.Z)(eL),(0,l.Z)(t))));eW(n),n.forEach(function(e){e1(e,!0)})},dropdownMatchSelectWidth:M,OptionList:en,emptyOptions:!eq.length,activeValue:eU,activeDescendantId:"".concat(em,"_list_").concat(eY)})))});eh.Option=$,eh.OptGroup=X;var em=n(62236),eg=n(68710),ev=n(93942),ey=n(12757),eb=n(71744),ex=n(91086),ew=n(86586),eS=n(64024),ek=n(33759),eE=n(39109),eC=n(56250),eO=n(65658),ej=n(29961);let eP=e=>{let t={overflow:{adjustX:!0,adjustY:!0,shiftY:!0},htmlRegion:"scroll"===e?"scroll":"visible",dynamicInset:!0};return{bottomLeft:Object.assign(Object.assign({},t),{points:["tl","bl"],offset:[0,4]}),bottomRight:Object.assign(Object.assign({},t),{points:["tr","br"],offset:[0,4]}),topLeft:Object.assign(Object.assign({},t),{points:["bl","tl"],offset:[0,-4]}),topRight:Object.assign(Object.assign({},t),{points:["br","tr"],offset:[0,-4]})}};var eN=n(12918),eM=n(17691),eI=n(80669),eR=n(3104),eT=n(18544),eA=n(29382);let e_=e=>{let{optionHeight:t,optionFontSize:n,optionLineHeight:r,optionPadding:o}=e;return{position:"relative",display:"block",minHeight:t,padding:o,color:e.colorText,fontWeight:"normal",fontSize:n,lineHeight:r,boxSizing:"border-box"}};var eD=e=>{let{antCls:t,componentCls:n}=e,r="".concat(n,"-item"),o="&".concat(t,"-slide-up-enter").concat(t,"-slide-up-enter-active"),i="&".concat(t,"-slide-up-appear").concat(t,"-slide-up-appear-active"),a="&".concat(t,"-slide-up-leave").concat(t,"-slide-up-leave-active"),l="".concat(n,"-dropdown-placement-");return[{["".concat(n,"-dropdown")]:Object.assign(Object.assign({},(0,eN.Wf)(e)),{position:"absolute",top:-9999,zIndex:e.zIndexPopup,boxSizing:"border-box",padding:e.paddingXXS,overflow:"hidden",fontSize:e.fontSize,fontVariant:"initial",backgroundColor:e.colorBgElevated,borderRadius:e.borderRadiusLG,outline:"none",boxShadow:e.boxShadowSecondary,["\n ".concat(o).concat(l,"bottomLeft,\n ").concat(i).concat(l,"bottomLeft\n ")]:{animationName:eT.fJ},["\n ".concat(o).concat(l,"topLeft,\n ").concat(i).concat(l,"topLeft,\n ").concat(o).concat(l,"topRight,\n ").concat(i).concat(l,"topRight\n ")]:{animationName:eT.Qt},["".concat(a).concat(l,"bottomLeft")]:{animationName:eT.Uw},["\n ".concat(a).concat(l,"topLeft,\n ").concat(a).concat(l,"topRight\n ")]:{animationName:eT.ly},"&-hidden":{display:"none"},["".concat(r)]:Object.assign(Object.assign({},e_(e)),{cursor:"pointer",transition:"background ".concat(e.motionDurationSlow," ease"),borderRadius:e.borderRadiusSM,"&-group":{color:e.colorTextDescription,fontSize:e.fontSizeSM,cursor:"default"},"&-option":{display:"flex","&-content":Object.assign({flex:"auto"},eN.vS),"&-state":{flex:"none",display:"flex",alignItems:"center"},["&-active:not(".concat(r,"-option-disabled)")]:{backgroundColor:e.optionActiveBg},["&-selected:not(".concat(r,"-option-disabled)")]:{color:e.optionSelectedColor,fontWeight:e.optionSelectedFontWeight,backgroundColor:e.optionSelectedBg,["".concat(r,"-option-state")]:{color:e.colorPrimary},["&:has(+ ".concat(r,"-option-selected:not(").concat(r,"-option-disabled))")]:{borderEndStartRadius:0,borderEndEndRadius:0,["& + ".concat(r,"-option-selected:not(").concat(r,"-option-disabled)")]:{borderStartStartRadius:0,borderStartEndRadius:0}}},"&-disabled":{["&".concat(r,"-option-selected")]:{backgroundColor:e.colorBgContainerDisabled},color:e.colorTextDisabled,cursor:"not-allowed"},"&-grouped":{paddingInlineStart:e.calc(e.controlPaddingHorizontal).mul(2).equal()}}}),"&-rtl":{direction:"rtl"}})},(0,eT.oN)(e,"slide-up"),(0,eT.oN)(e,"slide-down"),(0,eA.Fm)(e,"move-up"),(0,eA.Fm)(e,"move-down")]},eZ=n(352);let eL=e=>{let{multipleSelectItemHeight:t,selectHeight:n,lineWidth:r}=e;return e.calc(n).sub(t).div(2).sub(r).equal()};function ez(e,t){let{componentCls:n,iconCls:r}=e,o="".concat(n,"-selection-overflow"),i=e.multipleSelectItemHeight,a=eL(e),l=t?"".concat(n,"-").concat(t):"";return{["".concat(n,"-multiple").concat(l)]:{fontSize:e.fontSize,[o]:{position:"relative",display:"flex",flex:"auto",flexWrap:"wrap",maxWidth:"100%","&-item":{flex:"none",alignSelf:"center",maxWidth:"100%",display:"inline-flex"}},["".concat(n,"-selector")]:{display:"flex",flexWrap:"wrap",alignItems:"center",height:"100%",paddingInline:e.calc(2).mul(2).equal(),paddingBlock:e.calc(a).sub(2).equal(),borderRadius:e.borderRadius,["".concat(n,"-show-search&")]:{cursor:"text"},["".concat(n,"-disabled&")]:{background:e.multipleSelectorBgDisabled,cursor:"not-allowed"},"&:after":{display:"inline-block",width:0,margin:"".concat((0,eZ.bf)(2)," 0"),lineHeight:(0,eZ.bf)(i),visibility:"hidden",content:'"\\a0"'}},["\n &".concat(n,"-show-arrow ").concat(n,"-selector,\n &").concat(n,"-allow-clear ").concat(n,"-selector\n ")]:{paddingInlineEnd:e.calc(e.fontSizeIcon).add(e.controlPaddingHorizontal).equal()},["".concat(n,"-selection-item")]:{display:"flex",alignSelf:"center",flex:"none",boxSizing:"border-box",maxWidth:"100%",height:i,marginTop:2,marginBottom:2,lineHeight:(0,eZ.bf)(e.calc(i).sub(e.calc(e.lineWidth).mul(2)).equal()),borderRadius:e.borderRadiusSM,cursor:"default",transition:"font-size ".concat(e.motionDurationSlow,", line-height ").concat(e.motionDurationSlow,", height ").concat(e.motionDurationSlow),marginInlineEnd:e.calc(2).mul(2).equal(),paddingInlineStart:e.paddingXS,paddingInlineEnd:e.calc(e.paddingXS).div(2).equal(),["".concat(n,"-disabled&")]:{color:e.multipleItemColorDisabled,borderColor:e.multipleItemBorderColorDisabled,cursor:"not-allowed"},"&-content":{display:"inline-block",marginInlineEnd:e.calc(e.paddingXS).div(2).equal(),overflow:"hidden",whiteSpace:"pre",textOverflow:"ellipsis"},"&-remove":Object.assign(Object.assign({},(0,eN.Ro)()),{display:"inline-flex",alignItems:"center",color:e.colorIcon,fontWeight:"bold",fontSize:10,lineHeight:"inherit",cursor:"pointer",["> ".concat(r)]:{verticalAlign:"-0.2em"},"&:hover":{color:e.colorIconHover}})},["".concat(o,"-item + ").concat(o,"-item")]:{["".concat(n,"-selection-search")]:{marginInlineStart:0}},["".concat(o,"-item-suffix")]:{height:"100%"},["".concat(n,"-selection-search")]:{display:"inline-flex",position:"relative",maxWidth:"100%",marginInlineStart:e.calc(e.inputPaddingHorizontalBase).sub(a).equal(),"\n &-input,\n &-mirror\n ":{height:i,fontFamily:e.fontFamily,lineHeight:(0,eZ.bf)(i),transition:"all ".concat(e.motionDurationSlow)},"&-input":{width:"100%",minWidth:4.1},"&-mirror":{position:"absolute",top:0,insetInlineStart:0,insetInlineEnd:"auto",zIndex:999,whiteSpace:"pre",visibility:"hidden"}},["".concat(n,"-selection-placeholder")]:{position:"absolute",top:"50%",insetInlineStart:e.inputPaddingHorizontalBase,insetInlineEnd:e.inputPaddingHorizontalBase,transform:"translateY(-50%)",transition:"all ".concat(e.motionDurationSlow)}}}}var eB=e=>{let{componentCls:t}=e,n=(0,eR.TS)(e,{selectHeight:e.controlHeightSM,multipleSelectItemHeight:e.controlHeightXS,borderRadius:e.borderRadiusSM,borderRadiusSM:e.borderRadiusXS}),r=(0,eR.TS)(e,{fontSize:e.fontSizeLG,selectHeight:e.controlHeightLG,multipleSelectItemHeight:e.multipleItemHeightLG,borderRadius:e.borderRadiusLG,borderRadiusSM:e.borderRadius});return[ez(e),ez(n,"sm"),{["".concat(t,"-multiple").concat(t,"-sm")]:{["".concat(t,"-selection-placeholder")]:{insetInline:e.calc(e.controlPaddingHorizontalSM).sub(e.lineWidth).equal()},["".concat(t,"-selection-search")]:{marginInlineStart:2}}},ez(r,"lg")]};function eF(e,t){let{componentCls:n,inputPaddingHorizontalBase:r,borderRadius:o}=e,i=e.calc(e.controlHeight).sub(e.calc(e.lineWidth).mul(2)).equal(),a=t?"".concat(n,"-").concat(t):"";return{["".concat(n,"-single").concat(a)]:{fontSize:e.fontSize,height:e.controlHeight,["".concat(n,"-selector")]:Object.assign(Object.assign({},(0,eN.Wf)(e,!0)),{display:"flex",borderRadius:o,["".concat(n,"-selection-search")]:{position:"absolute",top:0,insetInlineStart:r,insetInlineEnd:r,bottom:0,"&-input":{width:"100%",WebkitAppearance:"textfield"}},["\n ".concat(n,"-selection-item,\n ").concat(n,"-selection-placeholder\n ")]:{padding:0,lineHeight:(0,eZ.bf)(i),transition:"all ".concat(e.motionDurationSlow,", visibility 0s"),alignSelf:"center"},["".concat(n,"-selection-placeholder")]:{transition:"none",pointerEvents:"none"},[["&:after","".concat(n,"-selection-item:empty:after"),"".concat(n,"-selection-placeholder:empty:after")].join(",")]:{display:"inline-block",width:0,visibility:"hidden",content:'"\\a0"'}}),["\n &".concat(n,"-show-arrow ").concat(n,"-selection-item,\n &").concat(n,"-show-arrow ").concat(n,"-selection-placeholder\n ")]:{paddingInlineEnd:e.showArrowPaddingInlineEnd},["&".concat(n,"-open ").concat(n,"-selection-item")]:{color:e.colorTextPlaceholder},["&:not(".concat(n,"-customize-input)")]:{["".concat(n,"-selector")]:{width:"100%",height:"100%",padding:"0 ".concat((0,eZ.bf)(r)),["".concat(n,"-selection-search-input")]:{height:i},"&:after":{lineHeight:(0,eZ.bf)(i)}}},["&".concat(n,"-customize-input")]:{["".concat(n,"-selector")]:{"&:after":{display:"none"},["".concat(n,"-selection-search")]:{position:"static",width:"100%"},["".concat(n,"-selection-placeholder")]:{position:"absolute",insetInlineStart:0,insetInlineEnd:0,padding:"0 ".concat((0,eZ.bf)(r)),"&:after":{display:"none"}}}}}}}let eH=(e,t)=>{let{componentCls:n,antCls:r,controlOutlineWidth:o}=e;return{["&:not(".concat(n,"-customize-input) ").concat(n,"-selector")]:{border:"".concat((0,eZ.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(t.borderColor),background:e.selectorBg},["&:not(".concat(n,"-disabled):not(").concat(n,"-customize-input):not(").concat(r,"-pagination-size-changer)")]:{["&:hover ".concat(n,"-selector")]:{borderColor:t.hoverBorderHover},["".concat(n,"-focused& ").concat(n,"-selector")]:{borderColor:t.activeBorderColor,boxShadow:"0 0 0 ".concat((0,eZ.bf)(o)," ").concat(t.activeShadowColor),outline:0}}}},eq=(e,t)=>({["&".concat(e.componentCls,"-status-").concat(t.status)]:Object.assign({},eH(e,t))}),eW=e=>({"&-outlined":Object.assign(Object.assign(Object.assign(Object.assign({},eH(e,{borderColor:e.colorBorder,hoverBorderHover:e.colorPrimaryHover,activeBorderColor:e.colorPrimary,activeShadowColor:e.controlOutline})),eq(e,{status:"error",borderColor:e.colorError,hoverBorderHover:e.colorErrorHover,activeBorderColor:e.colorError,activeShadowColor:e.colorErrorOutline})),eq(e,{status:"warning",borderColor:e.colorWarning,hoverBorderHover:e.colorWarningHover,activeBorderColor:e.colorWarning,activeShadowColor:e.colorWarningOutline})),{["&".concat(e.componentCls,"-disabled")]:{["&:not(".concat(e.componentCls,"-customize-input) ").concat(e.componentCls,"-selector")]:{background:e.colorBgContainerDisabled,color:e.colorTextDisabled}},["&".concat(e.componentCls,"-multiple ").concat(e.componentCls,"-selection-item")]:{background:e.multipleItemBg,border:"".concat((0,eZ.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.multipleItemBorderColor)}})}),eK=(e,t)=>{let{componentCls:n,antCls:r}=e;return{["&:not(".concat(n,"-customize-input) ").concat(n,"-selector")]:{background:t.bg,border:"".concat((0,eZ.bf)(e.lineWidth)," ").concat(e.lineType," transparent"),color:t.color},["&:not(".concat(n,"-disabled):not(").concat(n,"-customize-input):not(").concat(r,"-pagination-size-changer)")]:{["&:hover ".concat(n,"-selector")]:{background:t.hoverBg},["".concat(n,"-focused& ").concat(n,"-selector")]:{background:e.selectorBg,borderColor:t.activeBorderColor,outline:0}}}},eV=(e,t)=>({["&".concat(e.componentCls,"-status-").concat(t.status)]:Object.assign({},eK(e,t))}),eU=e=>({"&-filled":Object.assign(Object.assign(Object.assign(Object.assign({},eK(e,{bg:e.colorFillTertiary,hoverBg:e.colorFillSecondary,activeBorderColor:e.colorPrimary,color:e.colorText})),eV(e,{status:"error",bg:e.colorErrorBg,hoverBg:e.colorErrorBgHover,activeBorderColor:e.colorError,color:e.colorError})),eV(e,{status:"warning",bg:e.colorWarningBg,hoverBg:e.colorWarningBgHover,activeBorderColor:e.colorWarning,color:e.colorWarning})),{["&".concat(e.componentCls,"-disabled")]:{["&:not(".concat(e.componentCls,"-customize-input) ").concat(e.componentCls,"-selector")]:{borderColor:e.colorBorder,background:e.colorBgContainerDisabled,color:e.colorTextDisabled}},["&".concat(e.componentCls,"-multiple ").concat(e.componentCls,"-selection-item")]:{background:e.colorBgContainer,border:"".concat((0,eZ.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorSplit)}})}),eG=e=>({"&-borderless":{["".concat(e.componentCls,"-selector")]:{background:"transparent",borderColor:"transparent"},["&".concat(e.componentCls,"-disabled")]:{["&:not(".concat(e.componentCls,"-customize-input) ").concat(e.componentCls,"-selector")]:{color:e.colorTextDisabled}},["&".concat(e.componentCls,"-multiple ").concat(e.componentCls,"-selection-item")]:{background:e.multipleItemBg,border:"".concat((0,eZ.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.multipleItemBorderColor)}}});var eX=e=>({[e.componentCls]:Object.assign(Object.assign(Object.assign({},eW(e)),eU(e)),eG(e))});let e$=e=>{let{componentCls:t}=e;return{position:"relative",transition:"all ".concat(e.motionDurationMid," ").concat(e.motionEaseInOut),input:{cursor:"pointer"},["".concat(t,"-show-search&")]:{cursor:"text",input:{cursor:"auto",color:"inherit",height:"100%"}},["".concat(t,"-disabled&")]:{cursor:"not-allowed",input:{cursor:"not-allowed"}}}},eY=e=>{let{componentCls:t}=e;return{["".concat(t,"-selection-search-input")]:{margin:0,padding:0,background:"transparent",border:"none",outline:"none",appearance:"none",fontFamily:"inherit","&::-webkit-search-cancel-button":{display:"none","-webkit-appearance":"none"}}}},eQ=e=>{let{antCls:t,componentCls:n,inputPaddingHorizontalBase:r,iconCls:o}=e;return{[n]:Object.assign(Object.assign({},(0,eN.Wf)(e)),{position:"relative",display:"inline-block",cursor:"pointer",["&:not(".concat(n,"-customize-input) ").concat(n,"-selector")]:Object.assign(Object.assign({},e$(e)),eY(e)),["".concat(n,"-selection-item")]:Object.assign(Object.assign({flex:1,fontWeight:"normal",position:"relative",userSelect:"none"},eN.vS),{["> ".concat(t,"-typography")]:{display:"inline"}}),["".concat(n,"-selection-placeholder")]:Object.assign(Object.assign({},eN.vS),{flex:1,color:e.colorTextPlaceholder,pointerEvents:"none"}),["".concat(n,"-arrow")]:Object.assign(Object.assign({},(0,eN.Ro)()),{position:"absolute",top:"50%",insetInlineStart:"auto",insetInlineEnd:r,height:e.fontSizeIcon,marginTop:e.calc(e.fontSizeIcon).mul(-1).div(2).equal(),color:e.colorTextQuaternary,fontSize:e.fontSizeIcon,lineHeight:1,textAlign:"center",pointerEvents:"none",display:"flex",alignItems:"center",transition:"opacity ".concat(e.motionDurationSlow," ease"),[o]:{verticalAlign:"top",transition:"transform ".concat(e.motionDurationSlow),"> svg":{verticalAlign:"top"},["&:not(".concat(n,"-suffix)")]:{pointerEvents:"auto"}},["".concat(n,"-disabled &")]:{cursor:"not-allowed"},"> *:not(:last-child)":{marginInlineEnd:8}}),["".concat(n,"-clear")]:{position:"absolute",top:"50%",insetInlineStart:"auto",insetInlineEnd:r,zIndex:1,display:"inline-block",width:e.fontSizeIcon,height:e.fontSizeIcon,marginTop:e.calc(e.fontSizeIcon).mul(-1).div(2).equal(),color:e.colorTextQuaternary,fontSize:e.fontSizeIcon,fontStyle:"normal",lineHeight:1,textAlign:"center",textTransform:"none",cursor:"pointer",opacity:0,transition:"color ".concat(e.motionDurationMid," ease, opacity ").concat(e.motionDurationSlow," ease"),textRendering:"auto","&:before":{display:"block"},"&:hover":{color:e.colorTextTertiary}},"&:hover":{["".concat(n,"-clear")]:{opacity:1},["".concat(n,"-arrow:not(:last-child)")]:{opacity:0}}}),["".concat(n,"-has-feedback")]:{["".concat(n,"-clear")]:{insetInlineEnd:e.calc(r).add(e.fontSize).add(e.paddingXS).equal()}}}},eJ=e=>{let{componentCls:t}=e;return[{[t]:{["&".concat(t,"-in-form-item")]:{width:"100%"}}},eQ(e),function(e){let{componentCls:t}=e,n=e.calc(e.controlPaddingHorizontalSM).sub(e.lineWidth).equal();return[eF(e),eF((0,eR.TS)(e,{controlHeight:e.controlHeightSM,borderRadius:e.borderRadiusSM}),"sm"),{["".concat(t,"-single").concat(t,"-sm")]:{["&:not(".concat(t,"-customize-input)")]:{["".concat(t,"-selection-search")]:{insetInlineStart:n,insetInlineEnd:n},["".concat(t,"-selector")]:{padding:"0 ".concat((0,eZ.bf)(n))},["&".concat(t,"-show-arrow ").concat(t,"-selection-search")]:{insetInlineEnd:e.calc(n).add(e.calc(e.fontSize).mul(1.5)).equal()},["\n &".concat(t,"-show-arrow ").concat(t,"-selection-item,\n &").concat(t,"-show-arrow ").concat(t,"-selection-placeholder\n ")]:{paddingInlineEnd:e.calc(e.fontSize).mul(1.5).equal()}}}},eF((0,eR.TS)(e,{controlHeight:e.singleItemHeightLG,fontSize:e.fontSizeLG,borderRadius:e.borderRadiusLG}),"lg")]}(e),eB(e),eD(e),{["".concat(t,"-rtl")]:{direction:"rtl"}},(0,eM.c)(e,{borderElCls:"".concat(t,"-selector"),focusElCls:"".concat(t,"-focused")})]};var e0=(0,eI.I$)("Select",(e,t)=>{let{rootPrefixCls:n}=t,r=(0,eR.TS)(e,{rootPrefixCls:n,inputPaddingHorizontalBase:e.calc(e.paddingSM).sub(1).equal(),multipleSelectItemHeight:e.multipleItemHeight,selectHeight:e.controlHeight});return[eJ(r),eX(r)]},e=>{let{fontSize:t,lineHeight:n,controlHeight:r,controlPaddingHorizontal:o,zIndexPopupBase:i,colorText:a,fontWeightStrong:l,controlItemBgActive:c,controlItemBgHover:s,colorBgContainer:u,colorFillSecondary:d,controlHeightLG:f,controlHeightSM:p,colorBgContainerDisabled:h,colorTextDisabled:m}=e;return{zIndexPopup:i+50,optionSelectedColor:a,optionSelectedFontWeight:l,optionSelectedBg:c,optionActiveBg:s,optionPadding:"".concat((r-t*n)/2,"px ").concat(o,"px"),optionFontSize:t,optionLineHeight:n,optionHeight:r,selectorBg:u,clearBg:u,singleItemHeightLG:f,multipleItemBg:d,multipleItemBorderColor:"transparent",multipleItemHeight:p,multipleItemHeightLG:r,multipleSelectorBgDisabled:h,multipleItemColorDisabled:m,multipleItemBorderColorDisabled:"transparent",showArrowPaddingInlineEnd:Math.ceil(1.25*e.fontSize)}},{unitless:{optionLineHeight:!0,optionSelectedFontWeight:!0}}),e1=n(9738),e2=n(39725),e6=n(49638),e4=n(70464),e3=n(61935),e5=n(29436),e8=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let e7="SECRET_COMBOBOX_MODE_DO_NOT_USE",e9=r.forwardRef((e,t)=>{var n,o,a;let l;let{prefixCls:c,bordered:s,className:u,rootClassName:d,getPopupContainer:f,popupClassName:p,dropdownClassName:h,listHeight:m=256,placement:g,listItemHeight:v,size:y,disabled:b,notFoundContent:x,status:w,builtinPlacements:S,dropdownMatchSelectWidth:k,popupMatchSelectWidth:E,direction:C,style:O,allowClear:j,variant:P,dropdownStyle:N,transitionName:M,tagRender:I,maxCount:R}=e,T=e8(e,["prefixCls","bordered","className","rootClassName","getPopupContainer","popupClassName","dropdownClassName","listHeight","placement","listItemHeight","size","disabled","notFoundContent","status","builtinPlacements","dropdownMatchSelectWidth","popupMatchSelectWidth","direction","style","allowClear","variant","dropdownStyle","transitionName","tagRender","maxCount"]),{getPopupContainer:A,getPrefixCls:_,renderEmpty:D,direction:Z,virtual:L,popupMatchSelectWidth:z,popupOverflow:B,select:F}=r.useContext(eb.E_),[,H]=(0,ej.ZP)(),q=null!=v?v:null==H?void 0:H.controlHeight,W=_("select",c),K=_(),V=null!=C?C:Z,{compactSize:U,compactItemClassnames:G}=(0,eO.ri)(W,V),[X,$]=(0,eC.Z)(P,s),Y=(0,eS.Z)(W),[J,ee,et]=e0(W,Y),en=r.useMemo(()=>{let{mode:t}=e;return"combobox"===t?void 0:t===e7?"combobox":t},[e.mode]),er="multiple"===en||"tags"===en,eo=(o=e.suffixIcon,void 0!==(a=e.showArrow)?a:null!==o),ei=null!==(n=null!=E?E:k)&&void 0!==n?n:z,{status:ea,hasFeedback:el,isFormItemInput:ec,feedbackIcon:es}=r.useContext(eE.aM),eu=(0,ey.F)(ea,w);l=void 0!==x?x:"combobox"===en?null:(null==D?void 0:D("Select"))||r.createElement(ex.Z,{componentName:"Select"});let{suffixIcon:ed,itemIcon:ef,removeIcon:ep,clearIcon:ev}=function(e){let{suffixIcon:t,clearIcon:n,menuItemSelectedIcon:o,removeIcon:i,loading:a,multiple:l,hasFeedback:c,prefixCls:s,showSuffixIcon:u,feedbackIcon:d,showArrow:f,componentName:p}=e,h=null!=n?n:r.createElement(e2.Z,null),m=e=>null!==t||c||f?r.createElement(r.Fragment,null,!1!==u&&e,c&&d):null,g=null;if(void 0!==t)g=m(t);else if(a)g=m(r.createElement(e3.Z,{spin:!0}));else{let e="".concat(s,"-suffix");g=t=>{let{open:n,showSearch:o}=t;return n&&o?m(r.createElement(e5.Z,{className:e})):m(r.createElement(e4.Z,{className:e}))}}let v=null;return v=void 0!==o?o:l?r.createElement(e1.Z,null):null,{clearIcon:h,suffixIcon:g,itemIcon:v,removeIcon:void 0!==i?i:r.createElement(e6.Z,null)}}(Object.assign(Object.assign({},T),{multiple:er,hasFeedback:el,feedbackIcon:es,showSuffixIcon:eo,prefixCls:W,componentName:"Select"})),eN=(0,Q.Z)(T,["suffixIcon","itemIcon"]),eM=i()(p||h,{["".concat(W,"-dropdown-").concat(V)]:"rtl"===V},d,et,Y,ee),eI=(0,ek.Z)(e=>{var t;return null!==(t=null!=y?y:U)&&void 0!==t?t:e}),eR=r.useContext(ew.Z),eT=i()({["".concat(W,"-lg")]:"large"===eI,["".concat(W,"-sm")]:"small"===eI,["".concat(W,"-rtl")]:"rtl"===V,["".concat(W,"-").concat(X)]:$,["".concat(W,"-in-form-item")]:ec},(0,ey.Z)(W,eu,el),G,null==F?void 0:F.className,u,d,et,Y,ee),eA=r.useMemo(()=>void 0!==g?g:"rtl"===V?"bottomRight":"bottomLeft",[g,V]),[e_]=(0,em.Cn)("SelectLike",null==N?void 0:N.zIndex);return J(r.createElement(eh,Object.assign({ref:t,virtual:L,showSearch:null==F?void 0:F.showSearch},eN,{style:Object.assign(Object.assign({},null==F?void 0:F.style),O),dropdownMatchSelectWidth:ei,transitionName:(0,eg.m)(K,"slide-up",M),builtinPlacements:S||eP(B),listHeight:m,listItemHeight:q,mode:en,prefixCls:W,placement:eA,direction:V,suffixIcon:ed,menuItemSelectedIcon:ef,removeIcon:ep,allowClear:!0===j?{clearIcon:ev}:j,notFoundContent:l,className:eT,getPopupContainer:f||A,dropdownClassName:eM,disabled:null!=b?b:eR,dropdownStyle:Object.assign(Object.assign({},N),{zIndex:e_}),maxCount:er?R:void 0,tagRender:er?I:void 0})))}),te=(0,ev.Z)(e9);e9.SECRET_COMBOBOX_MODE_DO_NOT_USE=e7,e9.Option=$,e9.OptGroup=X,e9._InternalPanelDoNotUseOrYouWillBeFired=te;var tt=e9},93142:function(e,t,n){"use strict";n.d(t,{Z:function(){return v}});var r=n(2265),o=n(36760),i=n.n(o),a=n(45287);function l(e){return["small","middle","large"].includes(e)}function c(e){return!!e&&"number"==typeof e&&!Number.isNaN(e)}var s=n(71744),u=n(65658);let d=r.createContext({latestIndex:0}),f=d.Provider;var p=e=>{let{className:t,index:n,children:o,split:i,style:a}=e,{latestIndex:l}=r.useContext(d);return null==o?null:r.createElement(r.Fragment,null,r.createElement("div",{className:t,style:a},o),nt.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let g=r.forwardRef((e,t)=>{var n,o;let{getPrefixCls:u,space:d,direction:g}=r.useContext(s.E_),{size:v=(null==d?void 0:d.size)||"small",align:y,className:b,rootClassName:x,children:w,direction:S="horizontal",prefixCls:k,split:E,style:C,wrap:O=!1,classNames:j,styles:P}=e,N=m(e,["size","align","className","rootClassName","children","direction","prefixCls","split","style","wrap","classNames","styles"]),[M,I]=Array.isArray(v)?v:[v,v],R=l(I),T=l(M),A=c(I),_=c(M),D=(0,a.Z)(w,{keepEmpty:!0}),Z=void 0===y&&"horizontal"===S?"center":y,L=u("space",k),[z,B,F]=(0,h.Z)(L),H=i()(L,null==d?void 0:d.className,B,"".concat(L,"-").concat(S),{["".concat(L,"-rtl")]:"rtl"===g,["".concat(L,"-align-").concat(Z)]:Z,["".concat(L,"-gap-row-").concat(I)]:R,["".concat(L,"-gap-col-").concat(M)]:T},b,x,F),q=i()("".concat(L,"-item"),null!==(n=null==j?void 0:j.item)&&void 0!==n?n:null===(o=null==d?void 0:d.classNames)||void 0===o?void 0:o.item),W=0,K=D.map((e,t)=>{var n,o;null!=e&&(W=t);let i=e&&e.key||"".concat(q,"-").concat(t);return r.createElement(p,{className:q,key:i,index:t,split:E,style:null!==(n=null==P?void 0:P.item)&&void 0!==n?n:null===(o=null==d?void 0:d.styles)||void 0===o?void 0:o.item},e)}),V=r.useMemo(()=>({latestIndex:W}),[W]);if(0===D.length)return null;let U={};return O&&(U.flexWrap="wrap"),!T&&_&&(U.columnGap=M),!R&&A&&(U.rowGap=I),z(r.createElement("div",Object.assign({ref:t,className:H,style:Object.assign(Object.assign(Object.assign({},U),null==d?void 0:d.style),C)},N),r.createElement(f,{value:V},K)))});g.Compact=u.ZP;var v=g},87908:function(e,t,n){"use strict";n.d(t,{Z:function(){return x}});var r=n(2265),o=n(36760),i=n.n(o),a=n(18694),l=n(19722),c=n(71744),s=n(352),u=n(12918),d=n(80669),f=n(3104);let p=new s.E4("antSpinMove",{to:{opacity:1}}),h=new s.E4("antRotate",{to:{transform:"rotate(405deg)"}}),m=e=>{let{componentCls:t,calc:n}=e;return{["".concat(t)]:Object.assign(Object.assign({},(0,u.Wf)(e)),{position:"absolute",display:"none",color:e.colorPrimary,fontSize:0,textAlign:"center",verticalAlign:"middle",opacity:0,transition:"transform ".concat(e.motionDurationSlow," ").concat(e.motionEaseInOutCirc),"&-spinning":{position:"static",display:"inline-block",opacity:1},["".concat(t,"-text")]:{fontSize:e.fontSize,paddingTop:n(n(e.dotSize).sub(e.fontSize)).div(2).add(2).equal()},"&-fullscreen":{position:"fixed",width:"100vw",height:"100vh",backgroundColor:e.colorBgMask,zIndex:e.zIndexPopupBase,inset:0,display:"flex",alignItems:"center",flexDirection:"column",justifyContent:"center",opacity:0,visibility:"hidden",transition:"all ".concat(e.motionDurationMid),"&-show":{opacity:1,visibility:"visible"},["".concat(t,"-dot ").concat(t,"-dot-item")]:{backgroundColor:e.colorWhite},["".concat(t,"-text")]:{color:e.colorTextLightSolid}},"&-nested-loading":{position:"relative",["> div > ".concat(t)]:{position:"absolute",top:0,insetInlineStart:0,zIndex:4,display:"block",width:"100%",height:"100%",maxHeight:e.contentHeight,["".concat(t,"-dot")]:{position:"absolute",top:"50%",insetInlineStart:"50%",margin:n(e.dotSize).mul(-1).div(2).equal()},["".concat(t,"-text")]:{position:"absolute",top:"50%",width:"100%",textShadow:"0 1px 2px ".concat(e.colorBgContainer)},["&".concat(t,"-show-text ").concat(t,"-dot")]:{marginTop:n(e.dotSize).div(2).mul(-1).sub(10).equal()},"&-sm":{["".concat(t,"-dot")]:{margin:n(e.dotSizeSM).mul(-1).div(2).equal()},["".concat(t,"-text")]:{paddingTop:n(n(e.dotSizeSM).sub(e.fontSize)).div(2).add(2).equal()},["&".concat(t,"-show-text ").concat(t,"-dot")]:{marginTop:n(e.dotSizeSM).div(2).mul(-1).sub(10).equal()}},"&-lg":{["".concat(t,"-dot")]:{margin:n(e.dotSizeLG).mul(-1).div(2).equal()},["".concat(t,"-text")]:{paddingTop:n(n(e.dotSizeLG).sub(e.fontSize)).div(2).add(2).equal()},["&".concat(t,"-show-text ").concat(t,"-dot")]:{marginTop:n(e.dotSizeLG).div(2).mul(-1).sub(10).equal()}}},["".concat(t,"-container")]:{position:"relative",transition:"opacity ".concat(e.motionDurationSlow),"&::after":{position:"absolute",top:0,insetInlineEnd:0,bottom:0,insetInlineStart:0,zIndex:10,width:"100%",height:"100%",background:e.colorBgContainer,opacity:0,transition:"all ".concat(e.motionDurationSlow),content:'""',pointerEvents:"none"}},["".concat(t,"-blur")]:{clear:"both",opacity:.5,userSelect:"none",pointerEvents:"none","&::after":{opacity:.4,pointerEvents:"auto"}}},"&-tip":{color:e.spinDotDefault},["".concat(t,"-dot")]:{position:"relative",display:"inline-block",fontSize:e.dotSize,width:"1em",height:"1em","&-item":{position:"absolute",display:"block",width:n(e.dotSize).sub(n(e.marginXXS).div(2)).div(2).equal(),height:n(e.dotSize).sub(n(e.marginXXS).div(2)).div(2).equal(),backgroundColor:e.colorPrimary,borderRadius:"100%",transform:"scale(0.75)",transformOrigin:"50% 50%",opacity:.3,animationName:p,animationDuration:"1s",animationIterationCount:"infinite",animationTimingFunction:"linear",animationDirection:"alternate","&:nth-child(1)":{top:0,insetInlineStart:0,animationDelay:"0s"},"&:nth-child(2)":{top:0,insetInlineEnd:0,animationDelay:"0.4s"},"&:nth-child(3)":{insetInlineEnd:0,bottom:0,animationDelay:"0.8s"},"&:nth-child(4)":{bottom:0,insetInlineStart:0,animationDelay:"1.2s"}},"&-spin":{transform:"rotate(45deg)",animationName:h,animationDuration:"1.2s",animationIterationCount:"infinite",animationTimingFunction:"linear"}},["&-sm ".concat(t,"-dot")]:{fontSize:e.dotSizeSM,i:{width:n(n(e.dotSizeSM).sub(n(e.marginXXS).div(2))).div(2).equal(),height:n(n(e.dotSizeSM).sub(n(e.marginXXS).div(2))).div(2).equal()}},["&-lg ".concat(t,"-dot")]:{fontSize:e.dotSizeLG,i:{width:n(n(e.dotSizeLG).sub(e.marginXXS)).div(2).equal(),height:n(n(e.dotSizeLG).sub(e.marginXXS)).div(2).equal()}},["&".concat(t,"-show-text ").concat(t,"-text")]:{display:"block"}})}};var g=(0,d.I$)("Spin",e=>[m((0,f.TS)(e,{spinDotDefault:e.colorTextDescription}))],e=>{let{controlHeightLG:t,controlHeight:n}=e;return{contentHeight:400,dotSize:t/2,dotSizeSM:.35*t,dotSizeLG:n}}),v=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let y=null,b=e=>{let{prefixCls:t,spinning:n=!0,delay:o=0,className:s,rootClassName:u,size:d="default",tip:f,wrapperClassName:p,style:h,children:m,fullscreen:b}=e,x=v(e,["prefixCls","spinning","delay","className","rootClassName","size","tip","wrapperClassName","style","children","fullscreen"]),{getPrefixCls:w}=r.useContext(c.E_),S=w("spin",t),[k,E,C]=g(S),[O,j]=r.useState(()=>n&&(!n||!o||!!isNaN(Number(o))));r.useEffect(()=>{if(n){var e;let t=function(e,t,n){var r,o=n||{},i=o.noTrailing,a=void 0!==i&&i,l=o.noLeading,c=void 0!==l&&l,s=o.debounceMode,u=void 0===s?void 0:s,d=!1,f=0;function p(){r&&clearTimeout(r)}function h(){for(var n=arguments.length,o=Array(n),i=0;ie?c?(f=Date.now(),a||(r=setTimeout(u?m:h,e))):h():!0!==a&&(r=setTimeout(u?m:h,void 0===u?e-s:e)))}return h.cancel=function(e){var t=(e||{}).upcomingOnly;p(),d=!(void 0!==t&&t)},h}(o,()=>{j(!0)},{debounceMode:!1!==(void 0!==(e=({}).atBegin)&&e)});return t(),()=>{var e;null===(e=null==t?void 0:t.cancel)||void 0===e||e.call(t)}}j(!1)},[o,n]);let P=r.useMemo(()=>void 0!==m&&!b,[m,b]),{direction:N,spin:M}=r.useContext(c.E_),I=i()(S,null==M?void 0:M.className,{["".concat(S,"-sm")]:"small"===d,["".concat(S,"-lg")]:"large"===d,["".concat(S,"-spinning")]:O,["".concat(S,"-show-text")]:!!f,["".concat(S,"-fullscreen")]:b,["".concat(S,"-fullscreen-show")]:b&&O,["".concat(S,"-rtl")]:"rtl"===N},s,u,E,C),R=i()("".concat(S,"-container"),{["".concat(S,"-blur")]:O}),T=(0,a.Z)(x,["indicator"]),A=Object.assign(Object.assign({},null==M?void 0:M.style),h),_=r.createElement("div",Object.assign({},T,{style:A,className:I,"aria-live":"polite","aria-busy":O}),function(e,t){let{indicator:n}=t,o="".concat(e,"-dot");return null===n?null:(0,l.l$)(n)?(0,l.Tm)(n,{className:i()(n.props.className,o)}):(0,l.l$)(y)?(0,l.Tm)(y,{className:i()(y.props.className,o)}):r.createElement("span",{className:i()(o,"".concat(e,"-dot-spin"))},r.createElement("i",{className:"".concat(e,"-dot-item"),key:1}),r.createElement("i",{className:"".concat(e,"-dot-item"),key:2}),r.createElement("i",{className:"".concat(e,"-dot-item"),key:3}),r.createElement("i",{className:"".concat(e,"-dot-item"),key:4}))}(S,e),f&&(P||b)?r.createElement("div",{className:"".concat(S,"-text")},f):null);return k(P?r.createElement("div",Object.assign({},T,{className:i()("".concat(S,"-nested-loading"),p,E,C)}),O&&r.createElement("div",{key:"loading"},_),r.createElement("div",{className:R,key:"container"},m)):_)};b.setDefaultIndicator=e=>{y=e};var x=b},29382:function(e,t,n){"use strict";n.d(t,{Fm:function(){return f}});var r=n(352),o=n(37133);let i=new r.E4("antMoveDownIn",{"0%":{transform:"translate3d(0, 100%, 0)",transformOrigin:"0 0",opacity:0},"100%":{transform:"translate3d(0, 0, 0)",transformOrigin:"0 0",opacity:1}}),a=new r.E4("antMoveDownOut",{"0%":{transform:"translate3d(0, 0, 0)",transformOrigin:"0 0",opacity:1},"100%":{transform:"translate3d(0, 100%, 0)",transformOrigin:"0 0",opacity:0}}),l=new r.E4("antMoveLeftIn",{"0%":{transform:"translate3d(-100%, 0, 0)",transformOrigin:"0 0",opacity:0},"100%":{transform:"translate3d(0, 0, 0)",transformOrigin:"0 0",opacity:1}}),c=new r.E4("antMoveLeftOut",{"0%":{transform:"translate3d(0, 0, 0)",transformOrigin:"0 0",opacity:1},"100%":{transform:"translate3d(-100%, 0, 0)",transformOrigin:"0 0",opacity:0}}),s=new r.E4("antMoveRightIn",{"0%":{transform:"translate3d(100%, 0, 0)",transformOrigin:"0 0",opacity:0},"100%":{transform:"translate3d(0, 0, 0)",transformOrigin:"0 0",opacity:1}}),u=new r.E4("antMoveRightOut",{"0%":{transform:"translate3d(0, 0, 0)",transformOrigin:"0 0",opacity:1},"100%":{transform:"translate3d(100%, 0, 0)",transformOrigin:"0 0",opacity:0}}),d={"move-up":{inKeyframes:new r.E4("antMoveUpIn",{"0%":{transform:"translate3d(0, -100%, 0)",transformOrigin:"0 0",opacity:0},"100%":{transform:"translate3d(0, 0, 0)",transformOrigin:"0 0",opacity:1}}),outKeyframes:new r.E4("antMoveUpOut",{"0%":{transform:"translate3d(0, 0, 0)",transformOrigin:"0 0",opacity:1},"100%":{transform:"translate3d(0, -100%, 0)",transformOrigin:"0 0",opacity:0}})},"move-down":{inKeyframes:i,outKeyframes:a},"move-left":{inKeyframes:l,outKeyframes:c},"move-right":{inKeyframes:s,outKeyframes:u}},f=(e,t)=>{let{antCls:n}=e,r="".concat(n,"-").concat(t),{inKeyframes:i,outKeyframes:a}=d[t];return[(0,o.R)(r,i,a,e.motionDurationMid),{["\n ".concat(r,"-enter,\n ").concat(r,"-appear\n ")]:{opacity:0,animationTimingFunction:e.motionEaseOutCirc},["".concat(r,"-leave")]:{animationTimingFunction:e.motionEaseInOutCirc}}]}},18544:function(e,t,n){"use strict";n.d(t,{Qt:function(){return l},Uw:function(){return a},fJ:function(){return i},ly:function(){return c},oN:function(){return d}});var r=n(352),o=n(37133);let i=new r.E4("antSlideUpIn",{"0%":{transform:"scaleY(0.8)",transformOrigin:"0% 0%",opacity:0},"100%":{transform:"scaleY(1)",transformOrigin:"0% 0%",opacity:1}}),a=new r.E4("antSlideUpOut",{"0%":{transform:"scaleY(1)",transformOrigin:"0% 0%",opacity:1},"100%":{transform:"scaleY(0.8)",transformOrigin:"0% 0%",opacity:0}}),l=new r.E4("antSlideDownIn",{"0%":{transform:"scaleY(0.8)",transformOrigin:"100% 100%",opacity:0},"100%":{transform:"scaleY(1)",transformOrigin:"100% 100%",opacity:1}}),c=new r.E4("antSlideDownOut",{"0%":{transform:"scaleY(1)",transformOrigin:"100% 100%",opacity:1},"100%":{transform:"scaleY(0.8)",transformOrigin:"100% 100%",opacity:0}}),s=new r.E4("antSlideLeftIn",{"0%":{transform:"scaleX(0.8)",transformOrigin:"0% 0%",opacity:0},"100%":{transform:"scaleX(1)",transformOrigin:"0% 0%",opacity:1}}),u={"slide-up":{inKeyframes:i,outKeyframes:a},"slide-down":{inKeyframes:l,outKeyframes:c},"slide-left":{inKeyframes:s,outKeyframes:new r.E4("antSlideLeftOut",{"0%":{transform:"scaleX(1)",transformOrigin:"0% 0%",opacity:1},"100%":{transform:"scaleX(0.8)",transformOrigin:"0% 0%",opacity:0}})},"slide-right":{inKeyframes:new r.E4("antSlideRightIn",{"0%":{transform:"scaleX(0.8)",transformOrigin:"100% 0%",opacity:0},"100%":{transform:"scaleX(1)",transformOrigin:"100% 0%",opacity:1}}),outKeyframes:new r.E4("antSlideRightOut",{"0%":{transform:"scaleX(1)",transformOrigin:"100% 0%",opacity:1},"100%":{transform:"scaleX(0.8)",transformOrigin:"100% 0%",opacity:0}})}},d=(e,t)=>{let{antCls:n}=e,r="".concat(n,"-").concat(t),{inKeyframes:i,outKeyframes:a}=u[t];return[(0,o.R)(r,i,a,e.motionDurationMid),{["\n ".concat(r,"-enter,\n ").concat(r,"-appear\n ")]:{transform:"scale(0)",transformOrigin:"0% 0%",opacity:0,animationTimingFunction:e.motionEaseOutQuint,"&-prepare":{transform:"scale(1)"}},["".concat(r,"-leave")]:{animationTimingFunction:e.motionEaseInQuint}}]}},76122:function(e,t,n){"use strict";n.d(t,{N:function(){return r}});let r=e=>({color:e.colorLink,textDecoration:"none",outline:"none",cursor:"pointer",transition:"color ".concat(e.motionDurationSlow),"&:focus, &:hover":{color:e.colorLinkHover},"&:active":{color:e.colorLinkActive}})},63709:function(e,t,n){"use strict";n.d(t,{Z:function(){return R}});var r=n(2265),o=n(61935),i=n(36760),a=n.n(i),l=n(1119),c=n(11993),s=n(26365),u=n(6989),d=n(50506),f=n(95814),p=["prefixCls","className","checked","defaultChecked","disabled","loadingIcon","checkedChildren","unCheckedChildren","onClick","onChange","onKeyDown"],h=r.forwardRef(function(e,t){var n,o=e.prefixCls,i=void 0===o?"rc-switch":o,h=e.className,m=e.checked,g=e.defaultChecked,v=e.disabled,y=e.loadingIcon,b=e.checkedChildren,x=e.unCheckedChildren,w=e.onClick,S=e.onChange,k=e.onKeyDown,E=(0,u.Z)(e,p),C=(0,d.Z)(!1,{value:m,defaultValue:g}),O=(0,s.Z)(C,2),j=O[0],P=O[1];function N(e,t){var n=j;return v||(P(n=e),null==S||S(n,t)),n}var M=a()(i,h,(n={},(0,c.Z)(n,"".concat(i,"-checked"),j),(0,c.Z)(n,"".concat(i,"-disabled"),v),n));return r.createElement("button",(0,l.Z)({},E,{type:"button",role:"switch","aria-checked":j,disabled:v,className:M,ref:t,onKeyDown:function(e){e.which===f.Z.LEFT?N(!1,e):e.which===f.Z.RIGHT&&N(!0,e),null==k||k(e)},onClick:function(e){var t=N(!j,e);null==w||w(t,e)}}),y,r.createElement("span",{className:"".concat(i,"-inner")},r.createElement("span",{className:"".concat(i,"-inner-checked")},b),r.createElement("span",{className:"".concat(i,"-inner-unchecked")},x)))});h.displayName="Switch";var m=n(6694),g=n(71744),v=n(86586),y=n(33759),b=n(352),x=n(36360),w=n(12918),S=n(80669),k=n(3104);let E=e=>{let{componentCls:t,trackHeightSM:n,trackPadding:r,trackMinWidthSM:o,innerMinMarginSM:i,innerMaxMarginSM:a,handleSizeSM:l,calc:c}=e,s="".concat(t,"-inner"),u=(0,b.bf)(c(l).add(c(r).mul(2)).equal()),d=(0,b.bf)(c(a).mul(2).equal());return{[t]:{["&".concat(t,"-small")]:{minWidth:o,height:n,lineHeight:(0,b.bf)(n),["".concat(t,"-inner")]:{paddingInlineStart:a,paddingInlineEnd:i,["".concat(s,"-checked")]:{marginInlineStart:"calc(-100% + ".concat(u," - ").concat(d,")"),marginInlineEnd:"calc(100% - ".concat(u," + ").concat(d,")")},["".concat(s,"-unchecked")]:{marginTop:c(n).mul(-1).equal(),marginInlineStart:0,marginInlineEnd:0}},["".concat(t,"-handle")]:{width:l,height:l},["".concat(t,"-loading-icon")]:{top:c(c(l).sub(e.switchLoadingIconSize)).div(2).equal(),fontSize:e.switchLoadingIconSize},["&".concat(t,"-checked")]:{["".concat(t,"-inner")]:{paddingInlineStart:i,paddingInlineEnd:a,["".concat(s,"-checked")]:{marginInlineStart:0,marginInlineEnd:0},["".concat(s,"-unchecked")]:{marginInlineStart:"calc(100% - ".concat(u," + ").concat(d,")"),marginInlineEnd:"calc(-100% + ".concat(u," - ").concat(d,")")}},["".concat(t,"-handle")]:{insetInlineStart:"calc(100% - ".concat((0,b.bf)(c(l).add(r).equal()),")")}},["&:not(".concat(t,"-disabled):active")]:{["&:not(".concat(t,"-checked) ").concat(s)]:{["".concat(s,"-unchecked")]:{marginInlineStart:c(e.marginXXS).div(2).equal(),marginInlineEnd:c(e.marginXXS).mul(-1).div(2).equal()}},["&".concat(t,"-checked ").concat(s)]:{["".concat(s,"-checked")]:{marginInlineStart:c(e.marginXXS).mul(-1).div(2).equal(),marginInlineEnd:c(e.marginXXS).div(2).equal()}}}}}}},C=e=>{let{componentCls:t,handleSize:n,calc:r}=e;return{[t]:{["".concat(t,"-loading-icon").concat(e.iconCls)]:{position:"relative",top:r(r(n).sub(e.fontSize)).div(2).equal(),color:e.switchLoadingIconColor,verticalAlign:"top"},["&".concat(t,"-checked ").concat(t,"-loading-icon")]:{color:e.switchColor}}}},O=e=>{let{componentCls:t,trackPadding:n,handleBg:r,handleShadow:o,handleSize:i,calc:a}=e,l="".concat(t,"-handle");return{[t]:{[l]:{position:"absolute",top:n,insetInlineStart:n,width:i,height:i,transition:"all ".concat(e.switchDuration," ease-in-out"),"&::before":{position:"absolute",top:0,insetInlineEnd:0,bottom:0,insetInlineStart:0,backgroundColor:r,borderRadius:a(i).div(2).equal(),boxShadow:o,transition:"all ".concat(e.switchDuration," ease-in-out"),content:'""'}},["&".concat(t,"-checked ").concat(l)]:{insetInlineStart:"calc(100% - ".concat((0,b.bf)(a(i).add(n).equal()),")")},["&:not(".concat(t,"-disabled):active")]:{["".concat(l,"::before")]:{insetInlineEnd:e.switchHandleActiveInset,insetInlineStart:0},["&".concat(t,"-checked ").concat(l,"::before")]:{insetInlineEnd:0,insetInlineStart:e.switchHandleActiveInset}}}}},j=e=>{let{componentCls:t,trackHeight:n,trackPadding:r,innerMinMargin:o,innerMaxMargin:i,handleSize:a,calc:l}=e,c="".concat(t,"-inner"),s=(0,b.bf)(l(a).add(l(r).mul(2)).equal()),u=(0,b.bf)(l(i).mul(2).equal());return{[t]:{[c]:{display:"block",overflow:"hidden",borderRadius:100,height:"100%",paddingInlineStart:i,paddingInlineEnd:o,transition:"padding-inline-start ".concat(e.switchDuration," ease-in-out, padding-inline-end ").concat(e.switchDuration," ease-in-out"),["".concat(c,"-checked, ").concat(c,"-unchecked")]:{display:"block",color:e.colorTextLightSolid,fontSize:e.fontSizeSM,transition:"margin-inline-start ".concat(e.switchDuration," ease-in-out, margin-inline-end ").concat(e.switchDuration," ease-in-out"),pointerEvents:"none"},["".concat(c,"-checked")]:{marginInlineStart:"calc(-100% + ".concat(s," - ").concat(u,")"),marginInlineEnd:"calc(100% - ".concat(s," + ").concat(u,")")},["".concat(c,"-unchecked")]:{marginTop:l(n).mul(-1).equal(),marginInlineStart:0,marginInlineEnd:0}},["&".concat(t,"-checked ").concat(c)]:{paddingInlineStart:o,paddingInlineEnd:i,["".concat(c,"-checked")]:{marginInlineStart:0,marginInlineEnd:0},["".concat(c,"-unchecked")]:{marginInlineStart:"calc(100% - ".concat(s," + ").concat(u,")"),marginInlineEnd:"calc(-100% + ".concat(s," - ").concat(u,")")}},["&:not(".concat(t,"-disabled):active")]:{["&:not(".concat(t,"-checked) ").concat(c)]:{["".concat(c,"-unchecked")]:{marginInlineStart:l(r).mul(2).equal(),marginInlineEnd:l(r).mul(-1).mul(2).equal()}},["&".concat(t,"-checked ").concat(c)]:{["".concat(c,"-checked")]:{marginInlineStart:l(r).mul(-1).mul(2).equal(),marginInlineEnd:l(r).mul(2).equal()}}}}}},P=e=>{let{componentCls:t,trackHeight:n,trackMinWidth:r}=e;return{[t]:Object.assign(Object.assign(Object.assign(Object.assign({},(0,w.Wf)(e)),{position:"relative",display:"inline-block",boxSizing:"border-box",minWidth:r,height:n,lineHeight:"".concat((0,b.bf)(n)),verticalAlign:"middle",background:e.colorTextQuaternary,border:"0",borderRadius:100,cursor:"pointer",transition:"all ".concat(e.motionDurationMid),userSelect:"none",["&:hover:not(".concat(t,"-disabled)")]:{background:e.colorTextTertiary}}),(0,w.Qy)(e)),{["&".concat(t,"-checked")]:{background:e.switchColor,["&:hover:not(".concat(t,"-disabled)")]:{background:e.colorPrimaryHover}},["&".concat(t,"-loading, &").concat(t,"-disabled")]:{cursor:"not-allowed",opacity:e.switchDisabledOpacity,"*":{boxShadow:"none",cursor:"not-allowed"}},["&".concat(t,"-rtl")]:{direction:"rtl"}})}};var N=(0,S.I$)("Switch",e=>{let t=(0,k.TS)(e,{switchDuration:e.motionDurationMid,switchColor:e.colorPrimary,switchDisabledOpacity:e.opacityLoading,switchLoadingIconSize:e.calc(e.fontSizeIcon).mul(.75).equal(),switchLoadingIconColor:"rgba(0, 0, 0, ".concat(e.opacityLoading,")"),switchHandleActiveInset:"-30%"});return[P(t),j(t),O(t),C(t),E(t)]},e=>{let{fontSize:t,lineHeight:n,controlHeight:r,colorWhite:o}=e,i=t*n,a=r/2,l=i-4,c=a-4;return{trackHeight:i,trackHeightSM:a,trackMinWidth:2*l+8,trackMinWidthSM:2*c+4,trackPadding:2,handleBg:o,handleSize:l,handleSizeSM:c,handleShadow:"0 2px 4px 0 ".concat(new x.C("#00230b").setAlpha(.2).toRgbString()),innerMinMargin:l/2,innerMaxMargin:l+2+4,innerMinMarginSM:c/2,innerMaxMarginSM:c+2+4}}),M=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let I=r.forwardRef((e,t)=>{let{prefixCls:n,size:i,disabled:l,loading:c,className:s,rootClassName:u,style:f,checked:p,value:b,defaultChecked:x,defaultValue:w,onChange:S}=e,k=M(e,["prefixCls","size","disabled","loading","className","rootClassName","style","checked","value","defaultChecked","defaultValue","onChange"]),[E,C]=(0,d.Z)(!1,{value:null!=p?p:b,defaultValue:null!=x?x:w}),{getPrefixCls:O,direction:j,switch:P}=r.useContext(g.E_),I=r.useContext(v.Z),R=(null!=l?l:I)||c,T=O("switch",n),A=r.createElement("div",{className:"".concat(T,"-handle")},c&&r.createElement(o.Z,{className:"".concat(T,"-loading-icon")})),[_,D,Z]=N(T),L=(0,y.Z)(i),z=a()(null==P?void 0:P.className,{["".concat(T,"-small")]:"small"===L,["".concat(T,"-loading")]:c,["".concat(T,"-rtl")]:"rtl"===j},s,u,D,Z),B=Object.assign(Object.assign({},null==P?void 0:P.style),f);return _(r.createElement(m.Z,{component:"Switch"},r.createElement(h,Object.assign({},k,{checked:E,onChange:function(){C(arguments.length<=0?void 0:arguments[0]),null==S||S.apply(void 0,arguments)},prefixCls:T,className:z,style:B,disabled:R,ref:t,loadingIcon:A}))))});I.__ANT_SWITCH=!0;var R=I},28181:function(e,t,n){"use strict";n.d(t,{Z:function(){return of}});var r,o,i=n(2265),a={},l="rc-table-internal-hook",c=n(26365),s=n(58525),u=n(27380),d=n(16671),f=n(54887);function p(e){var t=i.createContext(void 0);return{Context:t,Provider:function(e){var n=e.value,r=e.children,o=i.useRef(n);o.current=n;var a=i.useState(function(){return{getValue:function(){return o.current},listeners:new Set}}),l=(0,c.Z)(a,1)[0];return(0,u.Z)(function(){(0,f.unstable_batchedUpdates)(function(){l.listeners.forEach(function(e){e(n)})})},[n]),i.createElement(t.Provider,{value:l},r)},defaultValue:e}}function h(e,t){var n=(0,s.Z)("function"==typeof t?t:function(e){if(void 0===t)return e;if(!Array.isArray(t))return e[t];var n={};return t.forEach(function(t){n[t]=e[t]}),n}),r=i.useContext(null==e?void 0:e.Context),o=r||{},a=o.listeners,l=o.getValue,f=i.useRef();f.current=n(r?l():null==e?void 0:e.defaultValue);var p=i.useState({}),h=(0,c.Z)(p,2)[1];return(0,u.Z)(function(){if(r)return a.add(e),function(){a.delete(e)};function e(e){var t=n(e);(0,d.Z)(f.current,t,!0)||h({})}},[r]),f.current}var m=n(1119),g=n(28791);function v(){var e=i.createContext(null);function t(){return i.useContext(e)}return{makeImmutable:function(n,r){var o=(0,g.Yr)(n),a=function(a,l){var c=o?{ref:l}:{},s=i.useRef(0),u=i.useRef(a);return null!==t()?i.createElement(n,(0,m.Z)({},a,c)):((!r||r(u.current,a))&&(s.current+=1),u.current=a,i.createElement(e.Provider,{value:s.current},i.createElement(n,(0,m.Z)({},a,c))))};return o?i.forwardRef(a):a},responseImmutable:function(e,n){var r=(0,g.Yr)(e),o=function(n,o){return t(),i.createElement(e,(0,m.Z)({},n,r?{ref:o}:{}))};return r?i.memo(i.forwardRef(o),n):i.memo(o,n)},useImmutableMark:t}}var y=v();y.makeImmutable,y.responseImmutable,y.useImmutableMark;var b=v(),x=b.makeImmutable,w=b.responseImmutable,S=b.useImmutableMark,k=p();i.memo(function(){var e,t,n,r,o,a=(t=i.useRef(0),t.current+=1,n=i.useRef(void 0),r=[],Object.keys(e||{}).map(function(t){var o;(null==e?void 0:e[t])!==(null===(o=n.current)||void 0===o?void 0:o[t])&&r.push(t)}),n.current=e,o=i.useRef([]),r.length&&(o.current=r),i.useDebugValue(t.current),i.useDebugValue(o.current.join(", ")),t.current);return i.createElement("h1",null,"Render Times: ",a)}).displayName="RenderBlock";var E=n(41154),C=n(31686),O=n(11993),j=n(36760),P=n.n(j),N=n(6397),M=n(16847),I=n(32559),R=i.createContext({renderWithProps:!1});function T(e){var t=[],n={};return e.forEach(function(e){for(var r=e||{},o=r.key,i=r.dataIndex,a=o||(null==i?[]:Array.isArray(i)?i:[i]).join("-")||"RC_TABLE_KEY";n[a];)a="".concat(a,"_next");n[a]=!0,t.push(a)}),t}var A=n(74126),_=function(e){var t,n=e.ellipsis,r=e.rowType,o=e.children,a=!0===n?{showTitle:!0}:n;return a&&(a.showTitle||"header"===r)&&("string"==typeof o||"number"==typeof o?t=o.toString():i.isValidElement(o)&&"string"==typeof o.props.children&&(t=o.props.children)),t},D=i.memo(function(e){var t,n,r,o,a,l,s,u,f,p,g=e.component,v=e.children,y=e.ellipsis,b=e.scope,x=e.prefixCls,w=e.className,j=e.align,I=e.record,T=e.render,D=e.dataIndex,Z=e.renderIndex,L=e.shouldCellUpdate,z=e.index,B=e.rowType,F=e.colSpan,H=e.rowSpan,q=e.fixLeft,W=e.fixRight,K=e.firstFixLeft,V=e.lastFixLeft,U=e.firstFixRight,G=e.lastFixRight,X=e.appendNode,$=e.additionalProps,Y=void 0===$?{}:$,Q=e.isSticky,J="".concat(x,"-cell"),ee=h(k,["supportSticky","allColumnsFixedLeft"]),et=ee.supportSticky,en=ee.allColumnsFixedLeft,er=(t=i.useContext(R),n=S(),(0,N.Z)(function(){if(null!=v)return[v];var e=null==D||""===D?[]:Array.isArray(D)?D:[D],n=(0,M.Z)(I,e),r=n,o=void 0;if(T){var a=T(n,I,Z);!a||"object"!==(0,E.Z)(a)||Array.isArray(a)||i.isValidElement(a)?r=a:(r=a.children,o=a.props,t.renderWithProps=!0)}return[r,o]},[n,I,v,D,T,Z],function(e,n){if(L){var r=(0,c.Z)(e,2)[1];return L((0,c.Z)(n,2)[1],r)}return!!t.renderWithProps||!(0,d.Z)(e,n,!0)})),eo=(0,c.Z)(er,2),ei=eo[0],ea=eo[1],el={},ec="number"==typeof q&&et,es="number"==typeof W&&et;ec&&(el.position="sticky",el.left=q),es&&(el.position="sticky",el.right=W);var eu=null!==(r=null!==(o=null!==(a=null==ea?void 0:ea.colSpan)&&void 0!==a?a:Y.colSpan)&&void 0!==o?o:F)&&void 0!==r?r:1,ed=null!==(l=null!==(s=null!==(u=null==ea?void 0:ea.rowSpan)&&void 0!==u?u:Y.rowSpan)&&void 0!==s?s:H)&&void 0!==l?l:1,ef=h(k,function(e){var t,n;return[(t=ed||1,n=e.hoverStartRow,z<=e.hoverEndRow&&z+t-1>=n),e.onHover]}),ep=(0,c.Z)(ef,2),eh=ep[0],em=ep[1],eg=(0,A.zX)(function(e){var t;I&&em(z,z+ed-1),null==Y||null===(t=Y.onMouseEnter)||void 0===t||t.call(Y,e)}),ev=(0,A.zX)(function(e){var t;I&&em(-1,-1),null==Y||null===(t=Y.onMouseLeave)||void 0===t||t.call(Y,e)});if(0===eu||0===ed)return null;var ey=null!==(f=Y.title)&&void 0!==f?f:_({rowType:B,ellipsis:y,children:ei}),eb=P()(J,w,(p={},(0,O.Z)((0,O.Z)((0,O.Z)((0,O.Z)((0,O.Z)((0,O.Z)((0,O.Z)((0,O.Z)((0,O.Z)((0,O.Z)(p,"".concat(J,"-fix-left"),ec&&et),"".concat(J,"-fix-left-first"),K&&et),"".concat(J,"-fix-left-last"),V&&et),"".concat(J,"-fix-left-all"),V&&en&&et),"".concat(J,"-fix-right"),es&&et),"".concat(J,"-fix-right-first"),U&&et),"".concat(J,"-fix-right-last"),G&&et),"".concat(J,"-ellipsis"),y),"".concat(J,"-with-append"),X),"".concat(J,"-fix-sticky"),(ec||es)&&Q&&et),(0,O.Z)(p,"".concat(J,"-row-hover"),!ea&&eh)),Y.className,null==ea?void 0:ea.className),ex={};j&&(ex.textAlign=j);var ew=(0,C.Z)((0,C.Z)((0,C.Z)((0,C.Z)({},el),Y.style),ex),null==ea?void 0:ea.style),eS=ei;return"object"!==(0,E.Z)(eS)||Array.isArray(eS)||i.isValidElement(eS)||(eS=null),y&&(V||U)&&(eS=i.createElement("span",{className:"".concat(J,"-content")},eS)),i.createElement(g,(0,m.Z)({},ea,Y,{className:eb,style:ew,title:ey,scope:b,onMouseEnter:eg,onMouseLeave:ev,colSpan:1!==eu?eu:null,rowSpan:1!==ed?ed:null}),X,eS)});function Z(e,t,n,r,o,i){var a,l,c=n[e]||{},s=n[t]||{};"left"===c.fixed?a=r.left["rtl"===o?t:e]:"right"===s.fixed&&(l=r.right["rtl"===o?e:t]);var u=!1,d=!1,f=!1,p=!1,h=n[t+1],m=n[e-1],g=!(null!=i&&i.children);return"rtl"===o?void 0!==a?p=!(m&&"left"===m.fixed)&&g:void 0!==l&&(f=!(h&&"right"===h.fixed)&&g):void 0!==a?u=!(h&&"left"===h.fixed)&&g:void 0!==l&&(d=!(m&&"right"===m.fixed)&&g),{fixLeft:a,fixRight:l,lastFixLeft:u,firstFixRight:d,lastFixRight:f,firstFixLeft:p,isSticky:r.isSticky}}var L=i.createContext({}),z=n(6989),B=["children"];function F(e){return e.children}F.Row=function(e){var t=e.children,n=(0,z.Z)(e,B);return i.createElement("tr",n,t)},F.Cell=function(e){var t=e.className,n=e.index,r=e.children,o=e.colSpan,a=void 0===o?1:o,l=e.rowSpan,c=e.align,s=h(k,["prefixCls","direction"]),u=s.prefixCls,d=s.direction,f=i.useContext(L),p=f.scrollColumnIndex,g=f.stickyOffsets,v=f.flattenColumns,y=f.columns,b=n+a-1+1===p?a+1:a,x=Z(n,n+b-1,v,g,d,null==y?void 0:y[n]);return i.createElement(D,(0,m.Z)({className:t,index:n,component:"td",prefixCls:u,record:null,dataIndex:null,align:c,colSpan:b,rowSpan:l,render:function(){return r}},x))};var H=w(function(e){var t=e.children,n=e.stickyOffsets,r=e.flattenColumns,o=e.columns,a=h(k,"prefixCls"),l=r.length-1,c=r[l],s=i.useMemo(function(){return{stickyOffsets:n,flattenColumns:r,scrollColumnIndex:null!=c&&c.scrollbar?l:null,columns:o}},[c,r,l,n,o]);return i.createElement(L.Provider,{value:s},i.createElement("tfoot",{className:"".concat(a,"-summary")},t))}),q=n(31474),W=n(2857),K=n(10281),V=n(3208),U=n(18242);function G(e,t,n,r){return i.useMemo(function(){if(null!=n&&n.size){for(var o=[],i=0;i<(null==e?void 0:e.length);i+=1)!function e(t,n,r,o,i,a,l){t.push({record:n,indent:r,index:l});var c=a(n),s=null==i?void 0:i.has(c);if(n&&Array.isArray(n[o])&&s)for(var u=0;u1?n-1:0),o=1;o=0;c-=1){var s=t[c],u=n&&n[c],d=u&&u[ei];if(s||d||l){var f=d||{},p=(f.columnType,(0,z.Z)(f,ea));o.unshift(i.createElement("col",(0,m.Z)({key:c,style:{width:s}},p))),l=!0}}return i.createElement("colgroup",null,o)},ec=n(83145),es=["className","noData","columns","flattenColumns","colWidths","columCount","stickyOffsets","direction","fixHeader","stickyTopOffset","stickyBottomOffset","stickyClassName","onScroll","maxContentScroll","children"],eu=i.forwardRef(function(e,t){var n=e.className,r=e.noData,o=e.columns,a=e.flattenColumns,l=e.colWidths,c=e.columCount,s=e.stickyOffsets,u=e.direction,d=e.fixHeader,f=e.stickyTopOffset,p=e.stickyBottomOffset,m=e.stickyClassName,v=e.onScroll,y=e.maxContentScroll,b=e.children,x=(0,z.Z)(e,es),w=h(k,["prefixCls","scrollbarSize","isSticky"]),S=w.prefixCls,E=w.scrollbarSize,j=w.isSticky,N=j&&!d?0:E,M=i.useRef(null),I=i.useCallback(function(e){(0,g.mH)(t,e),(0,g.mH)(M,e)},[]);i.useEffect(function(){var e;function t(e){var t=e.currentTarget,n=e.deltaX;n&&(v({currentTarget:t,scrollLeft:t.scrollLeft+n}),e.preventDefault())}return null===(e=M.current)||void 0===e||e.addEventListener("wheel",t),function(){var e;null===(e=M.current)||void 0===e||e.removeEventListener("wheel",t)}},[]);var R=i.useMemo(function(){return a.every(function(e){return e.width})},[a]),T=a[a.length-1],A={fixed:T?T.fixed:null,scrollbar:!0,onHeaderCell:function(){return{className:"".concat(S,"-cell-scrollbar")}}},_=(0,i.useMemo)(function(){return N?[].concat((0,ec.Z)(o),[A]):o},[N,o]),D=(0,i.useMemo)(function(){return N?[].concat((0,ec.Z)(a),[A]):a},[N,a]),Z=(0,i.useMemo)(function(){var e=s.right,t=s.left;return(0,C.Z)((0,C.Z)({},s),{},{left:"rtl"===u?[].concat((0,ec.Z)(t.map(function(e){return e+N})),[0]):t,right:"rtl"===u?e:[].concat((0,ec.Z)(e.map(function(e){return e+N})),[0]),isSticky:j})},[N,s,j]),L=(0,i.useMemo)(function(){for(var e=[],t=0;t1?"colgroup":"col":null,ellipsis:a.ellipsis,align:a.align,component:a.title?l:c,prefixCls:f,key:g[t]},s,{additionalProps:n,rowType:"header"}))}))}ef.displayName="HeaderRow";var ep=w(function(e){var t=e.stickyOffsets,n=e.columns,r=e.flattenColumns,o=e.onHeaderRow,a=h(k,["prefixCls","getComponent"]),l=a.prefixCls,c=a.getComponent,s=i.useMemo(function(){return function(e){var t=[];!function e(n,r){var o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0;t[o]=t[o]||[];var i=r;return n.filter(Boolean).map(function(n){var r={key:n.key,className:n.className||"",children:n.title,column:n,colStart:i},a=1,l=n.children;return l&&l.length>0&&(a=e(l,i,o+1).reduce(function(e,t){return e+t},0),r.hasSubColumns=!0),"colSpan"in n&&(a=n.colSpan),"rowSpan"in n&&(r.rowSpan=n.rowSpan),r.colSpan=a,r.colEnd=r.colStart+a-1,t[o].push(r),i+=a,a})}(e,0);for(var n=t.length,r=function(e){t[e].forEach(function(t){("rowSpan"in t)||t.hasSubColumns||(t.rowSpan=n-e)})},o=0;o1&&void 0!==arguments[1]?arguments[1]:"";return"number"==typeof t?t:t.endsWith("%")?e*parseFloat(t)/100:null}var eg=["children"],ev=["fixed"];function ey(e){return(0,eh.Z)(e).filter(function(e){return i.isValidElement(e)}).map(function(e){var t=e.key,n=e.props,r=n.children,o=(0,z.Z)(n,eg),i=(0,C.Z)({key:t},o);return r&&(i.children=ey(r)),i})}function eb(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"key";return e.filter(function(e){return e&&"object"===(0,E.Z)(e)}).reduce(function(e,n,r){var o=n.fixed,i=!0===o?"left":o,a="".concat(t,"-").concat(r),l=n.children;return l&&l.length>0?[].concat((0,ec.Z)(e),(0,ec.Z)(eb(l,a).map(function(e){return(0,C.Z)({fixed:i},e)}))):[].concat((0,ec.Z)(e),[(0,C.Z)((0,C.Z)({key:a},n),{},{fixed:i})])},[])}var ex=function(e,t){var n=e.prefixCls,r=e.columns,o=e.children,l=e.expandable,s=e.expandedKeys,u=e.columnTitle,d=e.getRowKey,f=e.onTriggerExpand,p=e.expandIcon,h=e.rowExpandable,m=e.expandIconColumnIndex,g=e.direction,v=e.expandRowByClick,y=e.columnWidth,b=e.fixed,x=e.scrollWidth,w=e.clientWidth,S=i.useMemo(function(){return function e(t){return t.filter(function(e){return e&&"object"===(0,E.Z)(e)&&!e.hidden}).map(function(t){var n=t.children;return n&&n.length>0?(0,C.Z)((0,C.Z)({},t),{},{children:e(n)}):t})}((r||ey(o)||[]).slice())},[r,o]),k=i.useMemo(function(){if(l){var e,t=S.slice();if(!t.includes(a)){var r=m||0;r>=0&&t.splice(r,0,a)}var o=t.indexOf(a);t=t.filter(function(e,t){return e!==a||t===o});var c=S[o];e=("left"===b||b)&&!m?"left":("right"===b||b)&&m===S.length?"right":c?c.fixed:null;var g=(0,O.Z)((0,O.Z)((0,O.Z)((0,O.Z)((0,O.Z)((0,O.Z)({},ei,{className:"".concat(n,"-expand-icon-col"),columnType:"EXPAND_COLUMN"}),"title",u),"fixed",e),"className","".concat(n,"-row-expand-icon-cell")),"width",y),"render",function(e,t,r){var o=d(t,r),a=p({prefixCls:n,expanded:s.has(o),expandable:!h||h(t),record:t,onExpand:f});return v?i.createElement("span",{onClick:function(e){return e.stopPropagation()}},a):a});return t.map(function(e){return e===a?g:e})}return S.filter(function(e){return e!==a})},[l,S,d,s,p,g]),j=i.useMemo(function(){var e=k;return t&&(e=t(e)),e.length||(e=[{render:function(){return null}}]),e},[t,k,g]),P=i.useMemo(function(){return"rtl"===g?eb(j).map(function(e){var t=e.fixed,n=(0,z.Z)(e,ev),r=t;return"left"===t?r="right":"right"===t&&(r="left"),(0,C.Z)({fixed:r},n)}):eb(j)},[j,g,x]),N=i.useMemo(function(){if(x&&x>0){var e=0,t=0;P.forEach(function(n){var r=em(x,n.width);r?e+=r:t+=1});var n=Math.max(x,w),r=Math.max(n-e,t),o=t,i=r/t,a=0,l=P.map(function(e){var t=(0,C.Z)({},e),n=em(x,t.width);if(n)t.width=n;else{var l=Math.floor(i);t.width=1===o?r:l,r-=l,o-=1}return a+=t.width,t});if(a=f&&(r=f-p),a({scrollLeft:r/f*(d+2)}),x.current.x=e.pageX},I=function(){if(o.current){var e=eO(o.current).top,t=e+o.current.offsetHeight,n=s===window?document.documentElement.scrollTop+window.innerHeight:eO(s).top+s.clientHeight;t-(0,V.Z)()<=n||e>=n-l?b(function(e){return(0,C.Z)((0,C.Z)({},e),{},{isHiddenScrollBar:!0})}):b(function(e){return(0,C.Z)((0,C.Z)({},e),{},{isHiddenScrollBar:!1})})}},R=function(e){b(function(t){return(0,C.Z)((0,C.Z)({},t),{},{scrollLeft:e/d*f||0})})};return(i.useImperativeHandle(t,function(){return{setScrollLeft:R}}),i.useEffect(function(){var e=eC(document.body,"mouseup",N,!1),t=eC(document.body,"mousemove",M,!1);return I(),function(){e.remove(),t.remove()}},[p,E]),i.useEffect(function(){var e=eC(s,"scroll",I,!1),t=eC(window,"resize",I,!1);return function(){e.remove(),t.remove()}},[s]),i.useEffect(function(){y.isHiddenScrollBar||b(function(e){var t=o.current;return t?(0,C.Z)((0,C.Z)({},e),{},{scrollLeft:t.scrollLeft/t.scrollWidth*t.clientWidth}):e})},[y.isHiddenScrollBar]),d<=f||!p||y.isHiddenScrollBar)?null:i.createElement("div",{style:{height:(0,V.Z)(),width:f,bottom:l},className:"".concat(u,"-sticky-scroll")},i.createElement("div",{onMouseDown:function(e){e.persist(),x.current.delta=e.pageX-y.scrollLeft,x.current.x=0,j(!0),e.preventDefault()},ref:m,className:P()("".concat(u,"-sticky-scroll-bar"),(0,O.Z)({},"".concat(u,"-sticky-scroll-bar-active"),E)),style:{width:"".concat(p,"px"),transform:"translate3d(".concat(y.scrollLeft,"px, 0, 0)")}}))}),eP="rc-table",eN=[],eM={};function eI(){return"No Data"}var eR=i.forwardRef(function(e,t){var n,r=(0,C.Z)({rowKey:"key",prefixCls:eP,emptyText:eI},e),o=r.prefixCls,a=r.className,u=r.rowClassName,f=r.style,p=r.data,h=r.rowKey,g=r.scroll,v=r.tableLayout,y=r.direction,b=r.title,x=r.footer,w=r.summary,S=r.caption,j=r.id,I=r.showHeader,R=r.components,A=r.emptyText,_=r.onRow,D=r.onHeaderRow,L=r.internalHooks,B=r.transformColumns,G=r.internalRefs,X=r.tailor,$=r.getContainerWidth,Y=r.sticky,Q=p||eN,J=!!Q.length,ee=L===l,et=i.useCallback(function(e,t){return(0,M.Z)(R,e)||t},[R]),en=i.useMemo(function(){return"function"==typeof h?h:function(e){return e&&e[h]}},[h]),ei=et(["body"]),ea=(tH=i.useState(-1),tW=(tq=(0,c.Z)(tH,2))[0],tK=tq[1],tV=i.useState(-1),tG=(tU=(0,c.Z)(tV,2))[0],tX=tU[1],[tW,tG,i.useCallback(function(e,t){tK(e),tX(t)},[])]),es=(0,c.Z)(ea,3),eu=es[0],ef=es[1],eh=es[2],em=(tJ=(tY=r.expandable,tQ=(0,z.Z)(r,eo),!1===(t$="expandable"in r?(0,C.Z)((0,C.Z)({},tQ),tY):tQ).showExpandColumn&&(t$.expandIconColumnIndex=-1),t$).expandIcon,t0=t$.expandedRowKeys,t1=t$.defaultExpandedRowKeys,t2=t$.defaultExpandAllRows,t6=t$.expandedRowRender,t4=t$.onExpand,t3=t$.onExpandedRowsChange,t5=t$.childrenColumnName||"children",t8=i.useMemo(function(){return t6?"row":!!(r.expandable&&r.internalHooks===l&&r.expandable.__PARENT_RENDER_ICON__||Q.some(function(e){return e&&"object"===(0,E.Z)(e)&&e[t5]}))&&"nest"},[!!t6,Q]),t7=i.useState(function(){if(t1)return t1;if(t2){var e;return e=[],function t(n){(n||[]).forEach(function(n,r){e.push(en(n,r)),t(n[t5])})}(Q),e}return[]}),ne=(t9=(0,c.Z)(t7,2))[0],nt=t9[1],nn=i.useMemo(function(){return new Set(t0||ne||[])},[t0,ne]),nr=i.useCallback(function(e){var t,n=en(e,Q.indexOf(e)),r=nn.has(n);r?(nn.delete(n),t=(0,ec.Z)(nn)):t=[].concat((0,ec.Z)(nn),[n]),nt(t),t4&&t4(!r,e),t3&&t3(t)},[en,nn,Q,t4,t3]),[t$,t8,nn,tJ||ew,t5,nr]),eg=(0,c.Z)(em,6),ev=eg[0],ey=eg[1],eb=eg[2],eC=eg[3],eO=eg[4],eR=eg[5],eT=null==g?void 0:g.x,eA=i.useState(0),e_=(0,c.Z)(eA,2),eD=e_[0],eZ=e_[1],eL=ex((0,C.Z)((0,C.Z)((0,C.Z)({},r),ev),{},{expandable:!!ev.expandedRowRender,columnTitle:ev.columnTitle,expandedKeys:eb,getRowKey:en,onTriggerExpand:eR,expandIcon:eC,expandIconColumnIndex:ev.expandIconColumnIndex,direction:y,scrollWidth:ee&&X&&"number"==typeof eT?eT:null,clientWidth:eD}),ee?B:null),ez=(0,c.Z)(eL,3),eB=ez[0],eF=ez[1],eH=ez[2],eq=null!=eH?eH:eT,eW=i.useMemo(function(){return{columns:eB,flattenColumns:eF}},[eB,eF]),eK=i.useRef(),eV=i.useRef(),eU=i.useRef(),eG=i.useRef();i.useImperativeHandle(t,function(){return{nativeElement:eK.current,scrollTo:function(e){var t;if(eU.current instanceof HTMLElement){var n=e.index,r=e.top,o=e.key;if(r)null===(i=eU.current)||void 0===i||i.scrollTo({top:r});else{var i,a,l=null!=o?o:en(Q[n]);null===(a=eU.current.querySelector('[data-row-key="'.concat(l,'"]')))||void 0===a||a.scrollIntoView()}}else null!==(t=eU.current)&&void 0!==t&&t.scrollTo&&eU.current.scrollTo(e)}}});var eX=i.useRef(),e$=i.useState(!1),eY=(0,c.Z)(e$,2),eQ=eY[0],eJ=eY[1],e0=i.useState(!1),e1=(0,c.Z)(e0,2),e2=e1[0],e6=e1[1],e4=eS(new Map),e3=(0,c.Z)(e4,2),e5=e3[0],e8=e3[1],e7=T(eF).map(function(e){return e5.get(e)}),e9=i.useMemo(function(){return e7},[e7.join("_")]),te=(no=eF.length,(0,i.useMemo)(function(){for(var e=[],t=[],n=0,r=0,o=0;o0)):(eJ(i>0),e6(i1?b-T:0,pointerEvents:"auto"}),_=i.useMemo(function(){return f?R<=1:0===M||0===R||R>1},[R,M,f]);_?A.visibility="hidden":f&&(A.height=null==p?void 0:p(R));var Z={};return(0===R||0===M)&&(Z.rowSpan=1,Z.colSpan=1),i.createElement(D,(0,m.Z)({className:P()(y,d),ellipsis:r.ellipsis,align:r.align,scope:r.rowScope,component:"div",prefixCls:n.prefixCls,key:S,record:s,index:l,renderIndex:c,dataIndex:v,render:_?function(){return null}:g,shouldCellUpdate:r.shouldCellUpdate},k,{appendNode:E,additionalProps:(0,C.Z)((0,C.Z)({},O),{},{style:A},Z)}))},eL=["data","index","className","rowKey","style","extra","getHeight"],ez=w(i.forwardRef(function(e,t){var n,r=e.data,o=e.index,a=e.className,l=e.rowKey,c=e.style,s=e.extra,u=e.getHeight,d=(0,z.Z)(e,eL),f=r.record,p=r.indent,g=r.index,v=h(k,["prefixCls","flattenColumns","fixColumn","componentWidth","scrollX"]),y=v.scrollX,b=v.flattenColumns,x=v.prefixCls,w=v.fixColumn,S=v.componentWidth,E=X(f,l,o,p),j=E.rowSupportExpand,N=E.expanded,M=E.rowProps,I=E.expandedRowRender,R=E.expandedRowClassName;if(j&&N){var T=I(f,o,p+1,N),A=null==R?void 0:R(f,o,p),_={};w&&(_={style:(0,O.Z)({},"--virtual-width","".concat(S,"px"))});var Z="".concat(x,"-expanded-row-cell");n=i.createElement("div",{className:P()("".concat(x,"-expanded-row"),"".concat(x,"-expanded-row-level-").concat(p+1),A)},i.createElement(D,{component:"div",prefixCls:x,className:P()(Z,(0,O.Z)({},"".concat(Z,"-fixed"),w)),additionalProps:_},T))}var L=(0,C.Z)((0,C.Z)({},c),{},{width:y});s&&(L.position="absolute",L.pointerEvents="none");var B=i.createElement("div",(0,m.Z)({},M,d,{ref:j?null:t,className:P()(a,"".concat(x,"-row"),null==M?void 0:M.className,(0,O.Z)({},"".concat(x,"-row-extra"),s)),style:(0,C.Z)((0,C.Z)({},L),null==M?void 0:M.style)}),b.map(function(e,t){return i.createElement(eZ,{key:t,rowInfo:E,column:e,colIndex:t,indent:p,index:o,renderIndex:g,record:f,inverse:s,getHeight:u})}));return j?i.createElement("div",{ref:t},B,n):B})),eB=w(i.forwardRef(function(e,t){var n,r=e.data,o=e.onScroll,a=h(k,["flattenColumns","onColumnResize","getRowKey","prefixCls","expandedKeys","childrenColumnName","emptyNode","scrollX"]),l=a.flattenColumns,s=a.onColumnResize,u=a.getRowKey,d=a.expandedKeys,f=a.prefixCls,p=a.childrenColumnName,g=a.emptyNode,v=a.scrollX,y=h(e_),b=y.sticky,x=y.scrollY,w=y.listItemHeight,S=i.useRef(),C=G(r,p,d,u),O=i.useMemo(function(){var e=0;return l.map(function(t){var n=t.width,r=t.key;return e+=n,[r,n,e]})},[l]),j=i.useMemo(function(){return O.map(function(e){return e[2]})},[O]);i.useEffect(function(){O.forEach(function(e){var t=(0,c.Z)(e,2);s(t[0],t[1])})},[O]),i.useImperativeHandle(t,function(){var e={scrollTo:function(e){var t;null===(t=S.current)||void 0===t||t.scrollTo(e)}};return Object.defineProperty(e,"scrollLeft",{get:function(){var e;return(null===(e=S.current)||void 0===e?void 0:e.getScrollInfo().x)||0},set:function(e){var t;null===(t=S.current)||void 0===t||t.scrollTo({left:e})}}),e});var N=function(e,t){var n=null===(o=C[t])||void 0===o?void 0:o.record,r=e.onCell;if(r){var o,i,a=r(n,t);return null!==(i=null==a?void 0:a.rowSpan)&&void 0!==i?i:1}return 1},M=i.useMemo(function(){return{columnsOffset:j}},[j]),I="".concat(f,"-tbody");if(C.length){var R={};b&&(R.position="sticky",R.bottom=0,"object"===(0,E.Z)(b)&&b.offsetScroll&&(R.bottom=b.offsetScroll)),n=i.createElement(eA.Z,{fullHeight:!1,ref:S,styles:{horizontalScrollBar:R},className:P()(I,"".concat(I,"-virtual")),height:x,itemHeight:w||24,data:C,itemKey:function(e){return u(e.record)},scrollWidth:v,onVirtualScroll:function(e){o({scrollLeft:e.x})},extraRender:function(e){var t=e.start,n=e.end,r=e.getSize,o=e.offsetY;if(n<0)return null;for(var a=l.filter(function(e){return 0===N(e,t)}),c=t,s=function(e){if(!(a=a.filter(function(t){return 0===N(t,e)})).length)return c=e,1},d=t;d>=0&&!s(d);d-=1);for(var f=l.filter(function(e){return 1!==N(e,n)}),p=n,h=function(e){if(!(f=f.filter(function(t){return 1!==N(t,e)})).length)return p=Math.max(e-1,n),1},m=n;m1})&&g.push(e)},y=c;y<=p;y+=1)if(v(y))continue;return g.map(function(e){var t=C[e],n=u(t.record,e),a=r(n);return i.createElement(ez,{key:e,data:t,rowKey:n,index:e,style:{top:-o+a.top},extra:!0,getHeight:function(t){var o=e+t-1,i=r(n,u(C[o].record,o));return i.bottom-i.top}})})}},function(e,t,n){var r=u(e.record,t);return i.createElement(ez,(0,m.Z)({data:e,rowKey:r,index:t},n))})}else n=i.createElement("div",{className:P()("".concat(f,"-placeholder"))},i.createElement(D,{component:"div",prefixCls:f},g));return i.createElement(eD.Provider,{value:M},n)})),eF=function(e,t){var n=t.ref,r=t.onScroll;return i.createElement(eB,{ref:n,data:e,onScroll:r})},eH=i.forwardRef(function(e,t){var n=e.columns,r=e.scroll,o=e.sticky,a=e.prefixCls,c=void 0===a?eP:a,s=e.className,u=e.listItemHeight,d=e.components,f=r||{},p=f.x,h=f.y;"number"!=typeof p&&(p=1),"number"!=typeof h&&(h=500);var g=i.useMemo(function(){return{sticky:o,scrollY:h,listItemHeight:u}},[o,h,u]);return i.createElement(e_.Provider,{value:g},i.createElement(eT,(0,m.Z)({},e,{className:P()(s,"".concat(c,"-virtual")),scroll:(0,C.Z)((0,C.Z)({},r),{},{x:p}),components:(0,C.Z)((0,C.Z)({},d),{},{body:eF}),columns:n,internalHooks:l,tailor:!0,ref:t})))});x(eH,void 0);var eq=n(70464),eW=n(76405),eK=n(25049),eV=n(63496),eU=n(15354),eG=n(15900),eX=i.createContext(null),e$=i.memo(function(e){for(var t,n=e.prefixCls,r=e.level,o=e.isStart,a=e.isEnd,l="".concat(n,"-indent-unit"),c=[],s=0;s1&&void 0!==arguments[1]?arguments[1]:null;return n.map(function(s,u){for(var d,f=eJ(r?r.pos:"0",u),p=e0(s[i],f),h=0;h1&&void 0!==arguments[1]?arguments[1]:{},f=d.initWrapper,p=d.processEntity,h=d.onProcessFinished,m=d.externalGetKey,g=d.childrenPropName,v=d.fieldNames,y=arguments.length>2?arguments[2]:void 0,b={},x={},w={posEntities:b,keyEntities:x};return f&&(w=f(w)||w),t=function(e){var t=e.node,n=e.index,r=e.pos,o=e.key,i=e.parentPos,a=e.level,l={node:t,nodes:e.nodes,index:n,key:o,pos:r,level:a},c=e0(o,r);b[r]=l,x[c]=l,l.parent=b[i],l.parent&&(l.parent.children=l.parent.children||[],l.parent.children.push(l)),p&&p(l,w)},n={externalGetKey:m||y,childrenPropName:g,fieldNames:v},i=(o=("object"===(0,E.Z)(n)?n:{externalGetKey:n})||{}).childrenPropName,a=o.externalGetKey,c=(l=e1(o.fieldNames)).key,s=l.children,u=i||s,a?"string"==typeof a?r=function(e){return e[a]}:"function"==typeof a&&(r=function(e){return a(e)}):r=function(e,t){return e0(e[c],t)},function n(o,i,a,l){var c=o?o[u]:e,s=o?eJ(a.pos,i):"0",d=o?[].concat((0,ec.Z)(l),[o]):[];if(o){var f=r(o,s);t({node:o,index:i,pos:s,key:f,parentPos:a.node?a.pos:null,level:a.level+1,nodes:d})}c&&c.forEach(function(e,t){n(e,t,{node:o,pos:s,level:a?a.level+1:-1},d)})}(null),h&&h(w),w}function e3(e,t){var n=t.expandedKeys,r=t.selectedKeys,o=t.loadedKeys,i=t.loadingKeys,a=t.checkedKeys,l=t.halfCheckedKeys,c=t.dragOverNodeKey,s=t.dropPosition,u=t.keyEntities[e];return{eventKey:e,expanded:-1!==n.indexOf(e),selected:-1!==r.indexOf(e),loaded:-1!==o.indexOf(e),loading:-1!==i.indexOf(e),checked:-1!==a.indexOf(e),halfChecked:-1!==l.indexOf(e),pos:String(u?u.pos:""),dragOver:c===e&&0===s,dragOverGapTop:c===e&&-1===s,dragOverGapBottom:c===e&&1===s}}function e5(e){var t=e.data,n=e.expanded,r=e.selected,o=e.checked,i=e.loaded,a=e.loading,l=e.halfChecked,c=e.dragOver,s=e.dragOverGapTop,u=e.dragOverGapBottom,d=e.pos,f=e.active,p=e.eventKey,h=(0,C.Z)((0,C.Z)({},t),{},{expanded:n,selected:r,checked:o,loaded:i,loading:a,halfChecked:l,dragOver:c,dragOverGapTop:s,dragOverGapBottom:u,pos:d,active:f,key:p});return"props"in h||Object.defineProperty(h,"props",{get:function(){return(0,I.ZP)(!1,"Second param return from event is node data instead of TreeNode instance. Please read value directly instead of reading from `props`."),e}}),h}var e8=["eventKey","className","style","dragOver","dragOverGapTop","dragOverGapBottom","isLeaf","isStart","isEnd","expanded","selected","checked","halfChecked","loading","domRef","active","data","onMouseMove","selectable"],e7="open",e9="close",te=function(e){(0,eU.Z)(n,e);var t=(0,eG.Z)(n);function n(){var e;(0,eW.Z)(this,n);for(var r=arguments.length,o=Array(r),a=0;a=0&&n.splice(r,1),n}function tr(e,t){var n=(e||[]).slice();return -1===n.indexOf(t)&&n.push(t),n}function to(e){return e.split("-")}function ti(e,t,n,r,o,i,a,l,c,s){var u,d,f=e.clientX,p=e.clientY,h=e.target.getBoundingClientRect(),m=h.top,g=h.height,v=(("rtl"===s?-1:1)*(((null==o?void 0:o.x)||0)-f)-12)/r,y=l[n.props.eventKey];if(p-1.5?i({dragNode:O,dropNode:j,dropPosition:1})?k=1:P=!1:i({dragNode:O,dropNode:j,dropPosition:0})?k=0:i({dragNode:O,dropNode:j,dropPosition:1})?k=1:P=!1:i({dragNode:O,dropNode:j,dropPosition:1})?k=1:P=!1,{dropPosition:k,dropLevelOffset:E,dropTargetKey:y.key,dropTargetPos:y.pos,dragOverNodeKey:S,dropContainerKey:0===k?null:(null===(d=y.parent)||void 0===d?void 0:d.key)||null,dropAllowed:P}}function ta(e,t){if(e)return t.multiple?e.slice():e.length?[e[0]]:e}function tl(e){var t;if(!e)return null;if(Array.isArray(e))t={checkedKeys:e,halfCheckedKeys:void 0};else{if("object"!==(0,E.Z)(e))return(0,I.ZP)(!1,"`checkedKeys` is not an array or an object"),null;t={checkedKeys:e.checked||void 0,halfCheckedKeys:e.halfChecked||void 0}}return t}function tc(e,t){var n=new Set;return(e||[]).forEach(function(e){!function e(r){if(!n.has(r)){var o=t[r];if(o){n.add(r);var i=o.parent;!o.node.disabled&&i&&e(i.key)}}}(e)}),(0,ec.Z)(n)}function ts(e,t){var n=new Set;return e.forEach(function(e){t.has(e)||n.add(e)}),n}function tu(e){var t=e||{},n=t.disabled,r=t.disableCheckbox,o=t.checkable;return!!(n||r)||!1===o}function td(e,t,n,r){var o,i=[];o=r||tu;var a=new Set(e.filter(function(e){var t=!!n[e];return t||i.push(e),t})),l=new Map,c=0;return Object.keys(n).forEach(function(e){var t=n[e],r=t.level,o=l.get(r);o||(o=new Set,l.set(r,o)),o.add(t),c=Math.max(c,r)}),(0,I.ZP)(!i.length,"Tree missing follow keys: ".concat(i.slice(0,100).map(function(e){return"'".concat(e,"'")}).join(", "))),!0===t?function(e,t,n,r){for(var o=new Set(e),i=new Set,a=0;a<=n;a+=1)(t.get(a)||new Set).forEach(function(e){var t=e.key,n=e.node,i=e.children,a=void 0===i?[]:i;o.has(t)&&!r(n)&&a.filter(function(e){return!r(e.node)}).forEach(function(e){o.add(e.key)})});for(var l=new Set,c=n;c>=0;c-=1)(t.get(c)||new Set).forEach(function(e){var t=e.parent;if(!(r(e.node)||!e.parent||l.has(e.parent.key))){if(r(e.parent.node)){l.add(t.key);return}var n=!0,a=!1;(t.children||[]).filter(function(e){return!r(e.node)}).forEach(function(e){var t=e.key,r=o.has(t);n&&!r&&(n=!1),!a&&(r||i.has(t))&&(a=!0)}),n&&o.add(t.key),a&&i.add(t.key),l.add(t.key)}});return{checkedKeys:Array.from(o),halfCheckedKeys:Array.from(ts(i,o))}}(a,l,c,o):function(e,t,n,r,o){for(var i=new Set(e),a=new Set(t),l=0;l<=r;l+=1)(n.get(l)||new Set).forEach(function(e){var t=e.key,n=e.node,r=e.children,l=void 0===r?[]:r;i.has(t)||a.has(t)||o(n)||l.filter(function(e){return!o(e.node)}).forEach(function(e){i.delete(e.key)})});a=new Set;for(var c=new Set,s=r;s>=0;s-=1)(n.get(s)||new Set).forEach(function(e){var t=e.parent;if(!(o(e.node)||!e.parent||c.has(e.parent.key))){if(o(e.parent.node)){c.add(t.key);return}var n=!0,r=!1;(t.children||[]).filter(function(e){return!o(e.node)}).forEach(function(e){var t=e.key,o=i.has(t);n&&!o&&(n=!1),!r&&(o||a.has(t))&&(r=!0)}),n||i.delete(t.key),r&&a.add(t.key),c.add(t.key)}});return{checkedKeys:Array.from(i),halfCheckedKeys:Array.from(ts(a,i))}}(a,t.halfCheckedKeys,l,c,o)}tt.displayName="TreeNode",tt.isTreeNode=1;var tf=n(50506),tp=n(13613),th=n(20873),tm=n(6694),tg=n(34709),tv=n(71744),ty=n(86586),tb=n(64024),tx=n(39109);let tw=i.createContext(null);var tS=n(352),tk=n(12918),tE=n(3104),tC=n(80669);let tO=e=>{let{checkboxCls:t}=e,n="".concat(t,"-wrapper");return[{["".concat(t,"-group")]:Object.assign(Object.assign({},(0,tk.Wf)(e)),{display:"inline-flex",flexWrap:"wrap",columnGap:e.marginXS,["> ".concat(e.antCls,"-row")]:{flex:1}}),[n]:Object.assign(Object.assign({},(0,tk.Wf)(e)),{display:"inline-flex",alignItems:"baseline",cursor:"pointer","&:after":{display:"inline-block",width:0,overflow:"hidden",content:"'\\a0'"},["& + ".concat(n)]:{marginInlineStart:0},["&".concat(n,"-in-form-item")]:{'input[type="checkbox"]':{width:14,height:14}}}),[t]:Object.assign(Object.assign({},(0,tk.Wf)(e)),{position:"relative",whiteSpace:"nowrap",lineHeight:1,cursor:"pointer",borderRadius:e.borderRadiusSM,alignSelf:"center",["".concat(t,"-input")]:{position:"absolute",inset:0,zIndex:1,cursor:"pointer",opacity:0,margin:0,["&:focus-visible + ".concat(t,"-inner")]:Object.assign({},(0,tk.oN)(e))},["".concat(t,"-inner")]:{boxSizing:"border-box",display:"block",width:e.checkboxSize,height:e.checkboxSize,direction:"ltr",backgroundColor:e.colorBgContainer,border:"".concat((0,tS.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder),borderRadius:e.borderRadiusSM,borderCollapse:"separate",transition:"all ".concat(e.motionDurationSlow),"&:after":{boxSizing:"border-box",position:"absolute",top:"50%",insetInlineStart:"25%",display:"table",width:e.calc(e.checkboxSize).div(14).mul(5).equal(),height:e.calc(e.checkboxSize).div(14).mul(8).equal(),border:"".concat((0,tS.bf)(e.lineWidthBold)," solid ").concat(e.colorWhite),borderTop:0,borderInlineStart:0,transform:"rotate(45deg) scale(0) translate(-50%,-50%)",opacity:0,content:'""',transition:"all ".concat(e.motionDurationFast," ").concat(e.motionEaseInBack,", opacity ").concat(e.motionDurationFast)}},"& + span":{paddingInlineStart:e.paddingXS,paddingInlineEnd:e.paddingXS}})},{["\n ".concat(n,":not(").concat(n,"-disabled),\n ").concat(t,":not(").concat(t,"-disabled)\n ")]:{["&:hover ".concat(t,"-inner")]:{borderColor:e.colorPrimary}},["".concat(n,":not(").concat(n,"-disabled)")]:{["&:hover ".concat(t,"-checked:not(").concat(t,"-disabled) ").concat(t,"-inner")]:{backgroundColor:e.colorPrimaryHover,borderColor:"transparent"},["&:hover ".concat(t,"-checked:not(").concat(t,"-disabled):after")]:{borderColor:e.colorPrimaryHover}}},{["".concat(t,"-checked")]:{["".concat(t,"-inner")]:{backgroundColor:e.colorPrimary,borderColor:e.colorPrimary,"&:after":{opacity:1,transform:"rotate(45deg) scale(1) translate(-50%,-50%)",transition:"all ".concat(e.motionDurationMid," ").concat(e.motionEaseOutBack," ").concat(e.motionDurationFast)}}},["\n ".concat(n,"-checked:not(").concat(n,"-disabled),\n ").concat(t,"-checked:not(").concat(t,"-disabled)\n ")]:{["&:hover ".concat(t,"-inner")]:{backgroundColor:e.colorPrimaryHover,borderColor:"transparent"}}},{[t]:{"&-indeterminate":{["".concat(t,"-inner")]:{backgroundColor:e.colorBgContainer,borderColor:e.colorBorder,"&:after":{top:"50%",insetInlineStart:"50%",width:e.calc(e.fontSizeLG).div(2).equal(),height:e.calc(e.fontSizeLG).div(2).equal(),backgroundColor:e.colorPrimary,border:0,transform:"translate(-50%, -50%) scale(1)",opacity:1,content:'""'}}}}},{["".concat(n,"-disabled")]:{cursor:"not-allowed"},["".concat(t,"-disabled")]:{["&, ".concat(t,"-input")]:{cursor:"not-allowed",pointerEvents:"none"},["".concat(t,"-inner")]:{background:e.colorBgContainerDisabled,borderColor:e.colorBorder,"&:after":{borderColor:e.colorTextDisabled}},"&:after":{display:"none"},"& + span":{color:e.colorTextDisabled},["&".concat(t,"-indeterminate ").concat(t,"-inner::after")]:{background:e.colorTextDisabled}}}]};function tj(e,t){return[tO((0,tE.TS)(t,{checkboxCls:".".concat(e),checkboxSize:t.controlInteractiveSize}))]}var tP=(0,tC.I$)("Checkbox",(e,t)=>{let{prefixCls:n}=t;return[tj(n,e)]}),tN=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let tM=i.forwardRef((e,t)=>{var n;let{prefixCls:r,className:o,rootClassName:a,children:l,indeterminate:c=!1,style:s,onMouseEnter:u,onMouseLeave:d,skipGroup:f=!1,disabled:p}=e,h=tN(e,["prefixCls","className","rootClassName","children","indeterminate","style","onMouseEnter","onMouseLeave","skipGroup","disabled"]),{getPrefixCls:m,direction:g,checkbox:v}=i.useContext(tv.E_),y=i.useContext(tw),{isFormItemInput:b}=i.useContext(tx.aM),x=i.useContext(ty.Z),w=null!==(n=(null==y?void 0:y.disabled)||p)&&void 0!==n?n:x,S=i.useRef(h.value);i.useEffect(()=>{null==y||y.registerValue(h.value)},[]),i.useEffect(()=>{if(!f)return h.value!==S.current&&(null==y||y.cancelValue(S.current),null==y||y.registerValue(h.value),S.current=h.value),()=>null==y?void 0:y.cancelValue(h.value)},[h.value]);let k=m("checkbox",r),E=(0,tb.Z)(k),[C,O,j]=tP(k,E),N=Object.assign({},h);y&&!f&&(N.onChange=function(){h.onChange&&h.onChange.apply(h,arguments),y.toggleOption&&y.toggleOption({label:l,value:h.value})},N.name=y.name,N.checked=y.value.includes(h.value));let M=P()("".concat(k,"-wrapper"),{["".concat(k,"-rtl")]:"rtl"===g,["".concat(k,"-wrapper-checked")]:N.checked,["".concat(k,"-wrapper-disabled")]:w,["".concat(k,"-wrapper-in-form-item")]:b},null==v?void 0:v.className,o,a,j,E,O),I=P()({["".concat(k,"-indeterminate")]:c},tg.A,O),R=c?"mixed":void 0;return C(i.createElement(tm.Z,{component:"Checkbox",disabled:w},i.createElement("label",{className:M,style:Object.assign(Object.assign({},null==v?void 0:v.style),s),onMouseEnter:u,onMouseLeave:d},i.createElement(th.Z,Object.assign({"aria-checked":R},N,{prefixCls:k,className:I,disabled:w,ref:t})),void 0!==l&&i.createElement("span",null,l))))});var tI=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};let tR=i.forwardRef((e,t)=>{let{defaultValue:n,children:r,options:o=[],prefixCls:a,className:l,rootClassName:c,style:s,onChange:u}=e,d=tI(e,["defaultValue","children","options","prefixCls","className","rootClassName","style","onChange"]),{getPrefixCls:f,direction:p}=i.useContext(tv.E_),[h,m]=i.useState(d.value||n||[]),[g,v]=i.useState([]);i.useEffect(()=>{"value"in d&&m(d.value||[])},[d.value]);let y=i.useMemo(()=>o.map(e=>"string"==typeof e||"number"==typeof e?{label:e,value:e}:e),[o]),b=f("checkbox",a),x="".concat(b,"-group"),w=(0,tb.Z)(b),[S,k,E]=tP(b,w),C=(0,eY.Z)(d,["value","disabled"]),O=o.length?y.map(e=>i.createElement(tM,{prefixCls:b,key:e.value.toString(),disabled:"disabled"in e?e.disabled:d.disabled,value:e.value,checked:h.includes(e.value),onChange:e.onChange,className:"".concat(x,"-item"),style:e.style,title:e.title,id:e.id,required:e.required},e.label)):r,j={toggleOption:e=>{let t=h.indexOf(e.value),n=(0,ec.Z)(h);-1===t?n.push(e.value):n.splice(t,1),"value"in d||m(n),null==u||u(n.filter(e=>g.includes(e)).sort((e,t)=>y.findIndex(t=>t.value===e)-y.findIndex(e=>e.value===t)))},value:h,disabled:d.disabled,name:d.name,registerValue:e=>{v(t=>[].concat((0,ec.Z)(t),[e]))},cancelValue:e=>{v(t=>t.filter(t=>t!==e))}},N=P()(x,{["".concat(x,"-rtl")]:"rtl"===p},l,c,E,w,k);return S(i.createElement("div",Object.assign({className:N,style:s},C,{ref:t}),i.createElement(tw.Provider,{value:j},O)))});tM.Group=tR,tM.__ANT_CHECKBOX=!0;var tT=n(80795),tA=n(29967);let t_={},tD="SELECT_ALL",tZ="SELECT_INVERT",tL="SELECT_NONE",tz=[],tB=(e,t)=>{let n=[];return(t||[]).forEach(t=>{n.push(t),t&&"object"==typeof t&&e in t&&(n=[].concat((0,ec.Z)(n),(0,ec.Z)(tB(e,t[e]))))}),n};var tF=(e,t)=>{let{preserveSelectedRowKeys:n,selectedRowKeys:r,defaultSelectedRowKeys:o,getCheckboxProps:a,onChange:l,onSelect:c,onSelectAll:s,onSelectInvert:u,onSelectNone:d,onSelectMultiple:f,columnWidth:p,type:h,selections:m,fixed:g,renderCell:v,hideSelectAll:y,checkStrictly:b=!0}=t||{},{prefixCls:x,data:w,pageData:S,getRecordByKey:k,getRowKey:E,expandType:C,childrenColumnName:O,locale:j,getPopupContainer:N}=e,M=(0,tp.ln)("Table"),[I,R]=function(e){let[t,n]=(0,i.useState)(null);return[(0,i.useCallback)((r,o,i)=>{let a=null!=t?t:r,l=Math.max(a||0,r),c=o.slice(Math.min(a||0,r),l+1).map(t=>e(t)),s=c.some(e=>!i.has(e)),u=[];return c.forEach(e=>{s?(i.has(e)||u.push(e),i.add(e)):(i.delete(e),u.push(e))}),n(s?l:null),u},[t]),e=>{n(e)}]}(e=>e),[T,A]=(0,tf.Z)(r||o||tz,{value:r}),_=i.useRef(new Map),D=(0,i.useCallback)(e=>{if(n){let t=new Map;e.forEach(e=>{let n=k(e);!n&&_.current.has(e)&&(n=_.current.get(e)),t.set(e,n)}),_.current=t}},[k,n]);i.useEffect(()=>{D(T)},[T]);let{keyEntities:Z}=(0,i.useMemo)(()=>{if(b)return{keyEntities:null};let e=w;if(n){let t=new Set(w.map((e,t)=>E(e,t))),n=Array.from(_.current).reduce((e,n)=>{let[r,o]=n;return t.has(r)?e:e.concat(o)},[]);e=[].concat((0,ec.Z)(e),(0,ec.Z)(n))}return e4(e,{externalGetKey:E,childrenPropName:O})},[w,E,b,O,n]),L=(0,i.useMemo)(()=>tB(O,S),[O,S]),z=(0,i.useMemo)(()=>{let e=new Map;return L.forEach((t,n)=>{let r=E(t,n),o=(a?a(t):null)||{};e.set(r,o)}),e},[L,E,a]),B=(0,i.useCallback)(e=>{var t;return!!(null===(t=z.get(E(e)))||void 0===t?void 0:t.disabled)},[z,E]),[F,H]=(0,i.useMemo)(()=>{if(b)return[T||[],[]];let{checkedKeys:e,halfCheckedKeys:t}=td(T,!0,Z,B);return[e||[],t]},[T,b,Z,B]),q=(0,i.useMemo)(()=>new Set("radio"===h?F.slice(0,1):F),[F,h]),W=(0,i.useMemo)(()=>"radio"===h?new Set:new Set(H),[H,h]);i.useEffect(()=>{t||A(tz)},[!!t]);let K=(0,i.useCallback)((e,t)=>{let r,o;D(e),n?(r=e,o=e.map(e=>_.current.get(e))):(r=[],o=[],e.forEach(e=>{let t=k(e);void 0!==t&&(r.push(e),o.push(t))})),A(r),null==l||l(r,o,{type:t})},[A,k,l,n]),V=(0,i.useCallback)((e,t,n,r)=>{if(c){let o=n.map(e=>k(e));c(k(e),t,o,r)}K(n,"single")},[c,k,K]),U=(0,i.useMemo)(()=>!m||y?null:(!0===m?[tD,tZ,tL]:m).map(e=>e===tD?{key:"all",text:j.selectionAll,onSelect(){K(w.map((e,t)=>E(e,t)).filter(e=>{let t=z.get(e);return!(null==t?void 0:t.disabled)||q.has(e)}),"all")}}:e===tZ?{key:"invert",text:j.selectInvert,onSelect(){let e=new Set(q);S.forEach((t,n)=>{let r=E(t,n),o=z.get(r);(null==o?void 0:o.disabled)||(e.has(r)?e.delete(r):e.add(r))});let t=Array.from(e);u&&(M.deprecated(!1,"onSelectInvert","onChange"),u(t)),K(t,"invert")}}:e===tL?{key:"none",text:j.selectNone,onSelect(){null==d||d(),K(Array.from(q).filter(e=>{let t=z.get(e);return null==t?void 0:t.disabled}),"none")}}:e).map(e=>Object.assign(Object.assign({},e),{onSelect:function(){for(var t,n=arguments.length,r=Array(n),o=0;o{var n;let r,o,a;if(!t)return e.filter(e=>e!==t_);let l=(0,ec.Z)(e),c=new Set(q),u=L.map(E).filter(e=>!z.get(e).disabled),d=u.every(e=>c.has(e)),w=u.some(e=>c.has(e));if("radio"!==h){let e;if(U){let t={getPopupContainer:N,items:U.map((e,t)=>{let{key:n,text:r,onSelect:o}=e;return{key:null!=n?n:t,onClick:()=>{null==o||o(u)},label:r}})};e=i.createElement("div",{className:"".concat(x,"-selection-extra")},i.createElement(tT.Z,{menu:t,getPopupContainer:N},i.createElement("span",null,i.createElement(eq.Z,null))))}let t=L.map((e,t)=>{let n=E(e,t),r=z.get(n)||{};return Object.assign({checked:c.has(n)},r)}).filter(e=>{let{disabled:t}=e;return t}),n=!!t.length&&t.length===L.length,a=n&&t.every(e=>{let{checked:t}=e;return t}),l=n&&t.some(e=>{let{checked:t}=e;return t});o=i.createElement(tM,{checked:n?a:!!L.length&&d,indeterminate:n?!a&&l:!d&&w,onChange:()=>{let e=[];d?u.forEach(t=>{c.delete(t),e.push(t)}):u.forEach(t=>{c.has(t)||(c.add(t),e.push(t))});let t=Array.from(c);null==s||s(!d,t.map(e=>k(e)),e.map(e=>k(e))),K(t,"all"),R(null)},disabled:0===L.length||n,"aria-label":e?"Custom selection":"Select all",skipGroup:!0}),r=!y&&i.createElement("div",{className:"".concat(x,"-selection")},o,e)}if(a="radio"===h?(e,t,n)=>{let r=E(t,n),o=c.has(r);return{node:i.createElement(tA.ZP,Object.assign({},z.get(r),{checked:o,onClick:e=>e.stopPropagation(),onChange:e=>{c.has(r)||V(r,!0,[r],e.nativeEvent)}})),checked:o}}:(e,t,n)=>{var r;let o;let a=E(t,n),l=c.has(a),s=W.has(a),d=z.get(a);return o="nest"===C?s:null!==(r=null==d?void 0:d.indeterminate)&&void 0!==r?r:s,{node:i.createElement(tM,Object.assign({},d,{indeterminate:o,checked:l,skipGroup:!0,onClick:e=>e.stopPropagation(),onChange:e=>{let{nativeEvent:t}=e,{shiftKey:n}=t,r=u.findIndex(e=>e===a),o=F.some(e=>u.includes(e));if(n&&b&&o){let e=I(r,u,c),t=Array.from(c);null==f||f(!l,t.map(e=>k(e)),e.map(e=>k(e))),K(t,"multiple")}else if(b){let e=l?tn(F,a):tr(F,a);V(a,!l,e,t)}else{let{checkedKeys:e,halfCheckedKeys:n}=td([].concat((0,ec.Z)(F),[a]),!0,Z,B),r=e;if(l){let t=new Set(e);t.delete(a),r=td(Array.from(t),{checked:!1,halfCheckedKeys:n},Z,B).checkedKeys}V(a,!l,r,t)}l?R(null):R(r)}})),checked:l}},!l.includes(t_)){if(0===l.findIndex(e=>{var t;return(null===(t=e[ei])||void 0===t?void 0:t.columnType)==="EXPAND_COLUMN"})){let[e,...t]=l;l=[e,t_].concat((0,ec.Z)(t))}else l=[t_].concat((0,ec.Z)(l))}let S=l.indexOf(t_),O=(l=l.filter((e,t)=>e!==t_||t===S))[S-1],j=l[S+1],M=g;void 0===M&&((null==j?void 0:j.fixed)!==void 0?M=j.fixed:(null==O?void 0:O.fixed)!==void 0&&(M=O.fixed)),M&&O&&(null===(n=O[ei])||void 0===n?void 0:n.columnType)==="EXPAND_COLUMN"&&void 0===O.fixed&&(O.fixed=M);let T=P()("".concat(x,"-selection-col"),{["".concat(x,"-selection-col-with-dropdown")]:m&&"checkbox"===h}),A={fixed:M,width:p,className:"".concat(x,"-selection-column"),title:(null==t?void 0:t.columnTitle)?"function"==typeof t.columnTitle?t.columnTitle(o):t.columnTitle:r,render:(e,t,n)=>{let{node:r,checked:o}=a(e,t,n);return v?v(o,t,n,r):r},onCell:t.onCell,[ei]:{className:T}};return l.map(e=>e===t_?A:e)},[E,L,t,F,q,W,p,U,C,z,f,V,B]),q]},tH=n(53346);function tq(e){return null!=e&&e===e.window}var tW=n(91086),tK=n(33759),tV=n(51646),tU=n(6543),tG=function(){let e=!(arguments.length>0)||void 0===arguments[0]||arguments[0],t=(0,i.useRef)({}),n=(0,tV.Z)(),r=(0,tU.ZP)();return(0,u.Z)(()=>{let o=r.subscribe(r=>{t.current=r,e&&n()});return()=>r.unsubscribe(o)},[]),t.current},tX=n(13823),t$={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M272.9 512l265.4-339.1c4.1-5.2.4-12.9-6.3-12.9h-77.3c-4.9 0-9.6 2.3-12.6 6.1L186.8 492.3a31.99 31.99 0 000 39.5l255.3 326.1c3 3.9 7.7 6.1 12.6 6.1H532c6.7 0 10.4-7.7 6.3-12.9L272.9 512zm304 0l265.4-339.1c4.1-5.2.4-12.9-6.3-12.9h-77.3c-4.9 0-9.6 2.3-12.6 6.1L490.8 492.3a31.99 31.99 0 000 39.5l255.3 326.1c3 3.9 7.7 6.1 12.6 6.1H836c6.7 0 10.4-7.7 6.3-12.9L576.9 512z"}}]},name:"double-left",theme:"outlined"},tY=n(55015),tQ=i.forwardRef(function(e,t){return i.createElement(tY.Z,(0,m.Z)({},e,{ref:t,icon:t$}))}),tJ={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M533.2 492.3L277.9 166.1c-3-3.9-7.7-6.1-12.6-6.1H188c-6.7 0-10.4 7.7-6.3 12.9L447.1 512 181.7 851.1A7.98 7.98 0 00188 864h77.3c4.9 0 9.6-2.3 12.6-6.1l255.3-326.1c9.1-11.7 9.1-27.9 0-39.5zm304 0L581.9 166.1c-3-3.9-7.7-6.1-12.6-6.1H492c-6.7 0-10.4 7.7-6.3 12.9L751.1 512 485.7 851.1A7.98 7.98 0 00492 864h77.3c4.9 0 9.6-2.3 12.6-6.1l255.3-326.1c9.1-11.7 9.1-27.9 0-39.5z"}}]},name:"double-right",theme:"outlined"},t0=i.forwardRef(function(e,t){return i.createElement(tY.Z,(0,m.Z)({},e,{ref:t,icon:tJ}))}),t1=n(15327),t2=n(77565),t6=n(95814),t4={items_per_page:"条/页",jump_to:"跳至",jump_to_confirm:"确定",page:"页",prev_page:"上一页",next_page:"下一页",prev_5:"向前 5 页",next_5:"向后 5 页",prev_3:"向前 3 页",next_3:"向后 3 页",page_size:"页码"},t3=["10","20","50","100"],t5=function(e){var t=e.pageSizeOptions,n=void 0===t?t3:t,r=e.locale,o=e.changeSize,a=e.pageSize,l=e.goButton,s=e.quickGo,u=e.rootPrefixCls,d=e.selectComponentClass,f=e.selectPrefixCls,p=e.disabled,h=e.buildOptionText,m=i.useState(""),g=(0,c.Z)(m,2),v=g[0],y=g[1],b=function(){return!v||Number.isNaN(v)?void 0:Number(v)},x="function"==typeof h?h:function(e){return"".concat(e," ").concat(r.items_per_page)},w=function(e){""!==v&&(e.keyCode===t6.Z.ENTER||"click"===e.type)&&(y(""),null==s||s(b()))},S="".concat(u,"-options");if(!o&&!s)return null;var k=null,E=null,C=null;if(o&&d){var O=(n.some(function(e){return e.toString()===a.toString()})?n:n.concat([a.toString()]).sort(function(e,t){return(Number.isNaN(Number(e))?0:Number(e))-(Number.isNaN(Number(t))?0:Number(t))})).map(function(e,t){return i.createElement(d.Option,{key:t,value:e.toString()},x(e))});k=i.createElement(d,{disabled:p,prefixCls:f,showSearch:!1,className:"".concat(S,"-size-changer"),optionLabelProp:"children",popupMatchSelectWidth:!1,value:(a||n[0]).toString(),onChange:function(e){null==o||o(Number(e))},getPopupContainer:function(e){return e.parentNode},"aria-label":r.page_size,defaultOpen:!1},O)}return s&&(l&&(C="boolean"==typeof l?i.createElement("button",{type:"button",onClick:w,onKeyUp:w,disabled:p,className:"".concat(S,"-quick-jumper-button")},r.jump_to_confirm):i.createElement("span",{onClick:w,onKeyUp:w},l)),E=i.createElement("div",{className:"".concat(S,"-quick-jumper")},r.jump_to,i.createElement("input",{disabled:p,type:"text",value:v,onChange:function(e){y(e.target.value)},onKeyUp:w,onBlur:function(e){!l&&""!==v&&(y(""),e.relatedTarget&&(e.relatedTarget.className.indexOf("".concat(u,"-item-link"))>=0||e.relatedTarget.className.indexOf("".concat(u,"-item"))>=0)||null==s||s(b()))},"aria-label":r.page}),r.page,C)),i.createElement("li",{className:S},k,E)},t8=function(e){var t,n=e.rootPrefixCls,r=e.page,o=e.active,a=e.className,l=e.showTitle,c=e.onClick,s=e.onKeyPress,u=e.itemRender,d="".concat(n,"-item"),f=P()(d,"".concat(d,"-").concat(r),(t={},(0,O.Z)(t,"".concat(d,"-active"),o),(0,O.Z)(t,"".concat(d,"-disabled"),!r),t),a),p=u(r,"page",i.createElement("a",{rel:"nofollow"},r));return p?i.createElement("li",{title:l?String(r):null,className:f,onClick:function(){c(r)},onKeyDown:function(e){s(e,c,r)},tabIndex:0},p):null},t7=function(e,t,n){return n};function t9(){}function ne(e){var t=Number(e);return"number"==typeof t&&!Number.isNaN(t)&&isFinite(t)&&Math.floor(t)===t}function nt(e,t,n){return Math.floor((n-1)/(void 0===e?t:e))+1}var nn=function(e){var t,n,r,o,a,l=e.prefixCls,s=void 0===l?"rc-pagination":l,u=e.selectPrefixCls,d=e.className,f=e.selectComponentClass,p=e.current,h=e.defaultCurrent,g=e.total,v=void 0===g?0:g,y=e.pageSize,b=e.defaultPageSize,x=e.onChange,w=void 0===x?t9:x,S=e.hideOnSinglePage,k=e.showPrevNextJumpers,E=e.showQuickJumper,j=e.showLessItems,N=e.showTitle,M=void 0===N||N,I=e.onShowSizeChange,R=void 0===I?t9:I,T=e.locale,A=void 0===T?t4:T,_=e.style,D=e.totalBoundaryShowSizeChanger,Z=e.disabled,L=e.simple,z=e.showTotal,B=e.showSizeChanger,F=e.pageSizeOptions,H=e.itemRender,q=void 0===H?t7:H,W=e.jumpPrevIcon,K=e.jumpNextIcon,V=e.prevIcon,G=e.nextIcon,X=i.useRef(null),$=(0,tf.Z)(10,{value:y,defaultValue:void 0===b?10:b}),Y=(0,c.Z)($,2),Q=Y[0],J=Y[1],ee=(0,tf.Z)(1,{value:p,defaultValue:void 0===h?1:h,postState:function(e){return Math.max(1,Math.min(e,nt(void 0,Q,v)))}}),et=(0,c.Z)(ee,2),en=et[0],er=et[1],eo=i.useState(en),ei=(0,c.Z)(eo,2),ea=ei[0],el=ei[1];(0,i.useEffect)(function(){el(en)},[en]);var ec=Math.max(1,en-(j?3:5)),es=Math.min(nt(void 0,Q,v),en+(j?3:5));function eu(t,n){var r=t||i.createElement("button",{type:"button","aria-label":n,className:"".concat(s,"-item-link")});return"function"==typeof t&&(r=i.createElement(t,(0,C.Z)({},e))),r}function ed(e){var t=e.target.value,n=nt(void 0,Q,v);return""===t?t:Number.isNaN(Number(t))?ea:t>=n?n:Number(t)}var ef=v>Q&&E;function ep(e){var t=ed(e);switch(t!==ea&&el(t),e.keyCode){case t6.Z.ENTER:eh(t);break;case t6.Z.UP:eh(t-1);break;case t6.Z.DOWN:eh(t+1)}}function eh(e){if(ne(e)&&e!==en&&ne(v)&&v>0&&!Z){var t=nt(void 0,Q,v),n=e;return e>t?n=t:e<1&&(n=1),n!==ea&&el(n),er(n),null==w||w(n,Q),n}return en}var em=en>1,eg=en(void 0===D?50:D);function ey(){em&&eh(en-1)}function eb(){eg&&eh(en+1)}function ex(){eh(ec)}function ew(){eh(es)}function eS(e,t){if("Enter"===e.key||e.charCode===t6.Z.ENTER||e.keyCode===t6.Z.ENTER){for(var n=arguments.length,r=Array(n>2?n-2:0),o=2;ov?v:en*Q])),ej=null,eP=nt(void 0,Q,v);if(S&&v<=Q)return null;var eN=[],eM={rootPrefixCls:s,onClick:eh,onKeyPress:eS,showTitle:M,itemRender:q,page:-1},eI=en-1>0?en-1:0,eR=en+1=2*eD&&3!==en&&(eN[0]=i.cloneElement(eN[0],{className:P()("".concat(s,"-item-after-jump-prev"),eN[0].props.className)}),eN.unshift(eE)),eP-en>=2*eD&&en!==eP-2){var eK=eN[eN.length-1];eN[eN.length-1]=i.cloneElement(eK,{className:P()("".concat(s,"-item-before-jump-next"),eK.props.className)}),eN.push(ej)}1!==eH&&eN.unshift(i.createElement(t8,(0,m.Z)({},eM,{key:1,page:1}))),eq!==eP&&eN.push(i.createElement(t8,(0,m.Z)({},eM,{key:eP,page:eP})))}var eV=(t=q(eI,"prev",eu(V,"prev page")),i.isValidElement(t)?i.cloneElement(t,{disabled:!em}):t);if(eV){var eU=!em||!eP;eV=i.createElement("li",{title:M?A.prev_page:null,onClick:ey,tabIndex:eU?null:0,onKeyDown:function(e){eS(e,ey)},className:P()("".concat(s,"-prev"),(0,O.Z)({},"".concat(s,"-disabled"),eU)),"aria-disabled":eU},eV)}var eG=(n=q(eR,"next",eu(G,"next page")),i.isValidElement(n)?i.cloneElement(n,{disabled:!eg}):n);eG&&(L?(o=!eg,a=em?0:null):a=(o=!eg||!eP)?null:0,eG=i.createElement("li",{title:M?A.next_page:null,onClick:eb,tabIndex:a,onKeyDown:function(e){eS(e,eb)},className:P()("".concat(s,"-next"),(0,O.Z)({},"".concat(s,"-disabled"),o)),"aria-disabled":o},eG));var eX=P()(s,d,(r={},(0,O.Z)(r,"".concat(s,"-simple"),L),(0,O.Z)(r,"".concat(s,"-disabled"),Z),r));return i.createElement("ul",(0,m.Z)({className:eX,style:_,ref:X},eC),eO,eV,L?e_:eN,eG,i.createElement(t5,{locale:A,rootPrefixCls:s,disabled:Z,selectComponentClass:f,selectPrefixCls:void 0===u?"rc-select":u,changeSize:ev?function(e){var t=nt(e,Q,v),n=en>t&&0!==t?t:en;J(e),el(n),null==R||R(en,e),er(n),null==w||w(n,e)}:null,pageSize:Q,pageSizeOptions:F,quickGo:ef?eh:null,goButton:eA}))},nr=n(96257),no=n(55274),ni=n(52787);let na=e=>i.createElement(ni.default,Object.assign({},e,{showSearch:!0,size:"small"})),nl=e=>i.createElement(ni.default,Object.assign({},e,{showSearch:!0,size:"middle"}));na.Option=ni.default.Option,nl.Option=ni.default.Option;var nc=n(31282),ns=n(37433),nu=n(65265);let nd=e=>{let{componentCls:t}=e;return{["".concat(t,"-disabled")]:{"&, &:hover":{cursor:"not-allowed",["".concat(t,"-item-link")]:{color:e.colorTextDisabled,cursor:"not-allowed"}},"&:focus-visible":{cursor:"not-allowed",["".concat(t,"-item-link")]:{color:e.colorTextDisabled,cursor:"not-allowed"}}},["&".concat(t,"-disabled")]:{cursor:"not-allowed",["".concat(t,"-item")]:{cursor:"not-allowed","&:hover, &:active":{backgroundColor:"transparent"},a:{color:e.colorTextDisabled,backgroundColor:"transparent",border:"none",cursor:"not-allowed"},"&-active":{borderColor:e.colorBorder,backgroundColor:e.itemActiveBgDisabled,"&:hover, &:active":{backgroundColor:e.itemActiveBgDisabled},a:{color:e.itemActiveColorDisabled}}},["".concat(t,"-item-link")]:{color:e.colorTextDisabled,cursor:"not-allowed","&:hover, &:active":{backgroundColor:"transparent"},["".concat(t,"-simple&")]:{backgroundColor:"transparent","&:hover, &:active":{backgroundColor:"transparent"}}},["".concat(t,"-simple-pager")]:{color:e.colorTextDisabled},["".concat(t,"-jump-prev, ").concat(t,"-jump-next")]:{["".concat(t,"-item-link-icon")]:{opacity:0},["".concat(t,"-item-ellipsis")]:{opacity:1}}},["&".concat(t,"-simple")]:{["".concat(t,"-prev, ").concat(t,"-next")]:{["&".concat(t,"-disabled ").concat(t,"-item-link")]:{"&:hover, &:active":{backgroundColor:"transparent"}}}}}},nf=e=>{let{componentCls:t}=e;return{["&".concat(t,"-mini ").concat(t,"-total-text, &").concat(t,"-mini ").concat(t,"-simple-pager")]:{height:e.itemSizeSM,lineHeight:(0,tS.bf)(e.itemSizeSM)},["&".concat(t,"-mini ").concat(t,"-item")]:{minWidth:e.itemSizeSM,height:e.itemSizeSM,margin:0,lineHeight:(0,tS.bf)(e.calc(e.itemSizeSM).sub(2).equal())},["&".concat(t,"-mini:not(").concat(t,"-disabled) ").concat(t,"-item:not(").concat(t,"-item-active)")]:{backgroundColor:"transparent",borderColor:"transparent","&:hover":{backgroundColor:e.colorBgTextHover},"&:active":{backgroundColor:e.colorBgTextActive}},["&".concat(t,"-mini ").concat(t,"-prev, &").concat(t,"-mini ").concat(t,"-next")]:{minWidth:e.itemSizeSM,height:e.itemSizeSM,margin:0,lineHeight:(0,tS.bf)(e.itemSizeSM)},["&".concat(t,"-mini:not(").concat(t,"-disabled)")]:{["".concat(t,"-prev, ").concat(t,"-next")]:{["&:hover ".concat(t,"-item-link")]:{backgroundColor:e.colorBgTextHover},["&:active ".concat(t,"-item-link")]:{backgroundColor:e.colorBgTextActive},["&".concat(t,"-disabled:hover ").concat(t,"-item-link")]:{backgroundColor:"transparent"}}},["\n &".concat(t,"-mini ").concat(t,"-prev ").concat(t,"-item-link,\n &").concat(t,"-mini ").concat(t,"-next ").concat(t,"-item-link\n ")]:{backgroundColor:"transparent",borderColor:"transparent","&::after":{height:e.itemSizeSM,lineHeight:(0,tS.bf)(e.itemSizeSM)}},["&".concat(t,"-mini ").concat(t,"-jump-prev, &").concat(t,"-mini ").concat(t,"-jump-next")]:{height:e.itemSizeSM,marginInlineEnd:0,lineHeight:(0,tS.bf)(e.itemSizeSM)},["&".concat(t,"-mini ").concat(t,"-options")]:{marginInlineStart:e.paginationMiniOptionsMarginInlineStart,"&-size-changer":{top:e.miniOptionsSizeChangerTop},"&-quick-jumper":{height:e.itemSizeSM,lineHeight:(0,tS.bf)(e.itemSizeSM),input:Object.assign(Object.assign({},(0,nc.x0)(e)),{width:e.paginationMiniQuickJumperInputWidth,height:e.controlHeightSM})}}}},np=e=>{let{componentCls:t}=e;return{["\n &".concat(t,"-simple ").concat(t,"-prev,\n &").concat(t,"-simple ").concat(t,"-next\n ")]:{height:e.itemSizeSM,lineHeight:(0,tS.bf)(e.itemSizeSM),verticalAlign:"top",["".concat(t,"-item-link")]:{height:e.itemSizeSM,backgroundColor:"transparent",border:0,"&:hover":{backgroundColor:e.colorBgTextHover},"&:active":{backgroundColor:e.colorBgTextActive},"&::after":{height:e.itemSizeSM,lineHeight:(0,tS.bf)(e.itemSizeSM)}}},["&".concat(t,"-simple ").concat(t,"-simple-pager")]:{display:"inline-block",height:e.itemSizeSM,marginInlineEnd:e.marginXS,input:{boxSizing:"border-box",height:"100%",marginInlineEnd:e.marginXS,padding:"0 ".concat((0,tS.bf)(e.paginationItemPaddingInline)),textAlign:"center",backgroundColor:e.itemInputBg,border:"".concat((0,tS.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder),borderRadius:e.borderRadius,outline:"none",transition:"border-color ".concat(e.motionDurationMid),color:"inherit","&:hover":{borderColor:e.colorPrimary},"&:focus":{borderColor:e.colorPrimaryHover,boxShadow:"".concat((0,tS.bf)(e.inputOutlineOffset)," 0 ").concat((0,tS.bf)(e.controlOutlineWidth)," ").concat(e.controlOutline)},"&[disabled]":{color:e.colorTextDisabled,backgroundColor:e.colorBgContainerDisabled,borderColor:e.colorBorder,cursor:"not-allowed"}}}}},nh=e=>{let{componentCls:t}=e;return{["".concat(t,"-jump-prev, ").concat(t,"-jump-next")]:{outline:0,["".concat(t,"-item-container")]:{position:"relative",["".concat(t,"-item-link-icon")]:{color:e.colorPrimary,fontSize:e.fontSizeSM,opacity:0,transition:"all ".concat(e.motionDurationMid),"&-svg":{top:0,insetInlineEnd:0,bottom:0,insetInlineStart:0,margin:"auto"}},["".concat(t,"-item-ellipsis")]:{position:"absolute",top:0,insetInlineEnd:0,bottom:0,insetInlineStart:0,display:"block",margin:"auto",color:e.colorTextDisabled,fontFamily:"Arial, Helvetica, sans-serif",letterSpacing:e.paginationEllipsisLetterSpacing,textAlign:"center",textIndent:e.paginationEllipsisTextIndent,opacity:1,transition:"all ".concat(e.motionDurationMid)}},"&:hover":{["".concat(t,"-item-link-icon")]:{opacity:1},["".concat(t,"-item-ellipsis")]:{opacity:0}}},["\n ".concat(t,"-prev,\n ").concat(t,"-jump-prev,\n ").concat(t,"-jump-next\n ")]:{marginInlineEnd:e.marginXS},["\n ".concat(t,"-prev,\n ").concat(t,"-next,\n ").concat(t,"-jump-prev,\n ").concat(t,"-jump-next\n ")]:{display:"inline-block",minWidth:e.itemSize,height:e.itemSize,color:e.colorText,fontFamily:e.fontFamily,lineHeight:"".concat((0,tS.bf)(e.itemSize)),textAlign:"center",verticalAlign:"middle",listStyle:"none",borderRadius:e.borderRadius,cursor:"pointer",transition:"all ".concat(e.motionDurationMid)},["".concat(t,"-prev, ").concat(t,"-next")]:{fontFamily:"Arial, Helvetica, sans-serif",outline:0,button:{color:e.colorText,cursor:"pointer",userSelect:"none"},["".concat(t,"-item-link")]:{display:"block",width:"100%",height:"100%",padding:0,fontSize:e.fontSizeSM,textAlign:"center",backgroundColor:"transparent",border:"".concat((0,tS.bf)(e.lineWidth)," ").concat(e.lineType," transparent"),borderRadius:e.borderRadius,outline:"none",transition:"all ".concat(e.motionDurationMid)},["&:hover ".concat(t,"-item-link")]:{backgroundColor:e.colorBgTextHover},["&:active ".concat(t,"-item-link")]:{backgroundColor:e.colorBgTextActive},["&".concat(t,"-disabled:hover")]:{["".concat(t,"-item-link")]:{backgroundColor:"transparent"}}},["".concat(t,"-slash")]:{marginInlineEnd:e.paginationSlashMarginInlineEnd,marginInlineStart:e.paginationSlashMarginInlineStart},["".concat(t,"-options")]:{display:"inline-block",marginInlineStart:e.margin,verticalAlign:"middle","&-size-changer.-select":{display:"inline-block",width:"auto"},"&-quick-jumper":{display:"inline-block",height:e.controlHeight,marginInlineStart:e.marginXS,lineHeight:(0,tS.bf)(e.controlHeight),verticalAlign:"top",input:Object.assign(Object.assign(Object.assign({},(0,nc.ik)(e)),(0,nu.$U)(e,{borderColor:e.colorBorder,hoverBorderColor:e.colorPrimaryHover,activeBorderColor:e.colorPrimary,activeShadow:e.activeShadow})),{"&[disabled]":Object.assign({},(0,nu.Xy)(e)),width:e.calc(e.controlHeightLG).mul(1.25).equal(),height:e.controlHeight,boxSizing:"border-box",margin:0,marginInlineStart:e.marginXS,marginInlineEnd:e.marginXS})}}}},nm=e=>{let{componentCls:t}=e;return{["".concat(t,"-item")]:{display:"inline-block",minWidth:e.itemSize,height:e.itemSize,marginInlineEnd:e.marginXS,fontFamily:e.fontFamily,lineHeight:(0,tS.bf)(e.calc(e.itemSize).sub(2).equal()),textAlign:"center",verticalAlign:"middle",listStyle:"none",backgroundColor:"transparent",border:"".concat((0,tS.bf)(e.lineWidth)," ").concat(e.lineType," transparent"),borderRadius:e.borderRadius,outline:0,cursor:"pointer",userSelect:"none",a:{display:"block",padding:"0 ".concat((0,tS.bf)(e.paginationItemPaddingInline)),color:e.colorText,"&:hover":{textDecoration:"none"}},["&:not(".concat(t,"-item-active)")]:{"&:hover":{transition:"all ".concat(e.motionDurationMid),backgroundColor:e.colorBgTextHover},"&:active":{backgroundColor:e.colorBgTextActive}},"&-active":{fontWeight:e.fontWeightStrong,backgroundColor:e.itemActiveBg,borderColor:e.colorPrimary,a:{color:e.colorPrimary},"&:hover":{borderColor:e.colorPrimaryHover},"&:hover a":{color:e.colorPrimaryHover}}}}},ng=e=>{let{componentCls:t}=e;return{[t]:Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({},(0,tk.Wf)(e)),{"ul, ol":{margin:0,padding:0,listStyle:"none"},"&::after":{display:"block",clear:"both",height:0,overflow:"hidden",visibility:"hidden",content:'""'},["".concat(t,"-total-text")]:{display:"inline-block",height:e.itemSize,marginInlineEnd:e.marginXS,lineHeight:(0,tS.bf)(e.calc(e.itemSize).sub(2).equal()),verticalAlign:"middle"}}),nm(e)),nh(e)),np(e)),nf(e)),nd(e)),{["@media only screen and (max-width: ".concat(e.screenLG,"px)")]:{["".concat(t,"-item")]:{"&-after-jump-prev, &-before-jump-next":{display:"none"}}},["@media only screen and (max-width: ".concat(e.screenSM,"px)")]:{["".concat(t,"-options")]:{display:"none"}}}),["&".concat(e.componentCls,"-rtl")]:{direction:"rtl"}}},nv=e=>{let{componentCls:t}=e;return{["".concat(t,":not(").concat(t,"-disabled)")]:{["".concat(t,"-item")]:Object.assign({},(0,tk.Qy)(e)),["".concat(t,"-jump-prev, ").concat(t,"-jump-next")]:{"&:focus-visible":Object.assign({["".concat(t,"-item-link-icon")]:{opacity:1},["".concat(t,"-item-ellipsis")]:{opacity:0}},(0,tk.oN)(e))},["".concat(t,"-prev, ").concat(t,"-next")]:{["&:focus-visible ".concat(t,"-item-link")]:Object.assign({},(0,tk.oN)(e))}}}},ny=e=>Object.assign({itemBg:e.colorBgContainer,itemSize:e.controlHeight,itemSizeSM:e.controlHeightSM,itemActiveBg:e.colorBgContainer,itemLinkBg:e.colorBgContainer,itemActiveColorDisabled:e.colorTextDisabled,itemActiveBgDisabled:e.controlItemBgActiveDisabled,itemInputBg:e.colorBgContainer,miniOptionsSizeChangerTop:0},(0,ns.T)(e)),nb=e=>(0,tE.TS)(e,{inputOutlineOffset:0,paginationMiniOptionsMarginInlineStart:e.calc(e.marginXXS).div(2).equal(),paginationMiniQuickJumperInputWidth:e.calc(e.controlHeightLG).mul(1.1).equal(),paginationItemPaddingInline:e.calc(e.marginXXS).mul(1.5).equal(),paginationEllipsisLetterSpacing:e.calc(e.marginXXS).div(2).equal(),paginationSlashMarginInlineStart:e.marginXXS,paginationSlashMarginInlineEnd:e.marginSM,paginationEllipsisTextIndent:"0.13em"},(0,ns.e)(e));var nx=(0,tC.I$)("Pagination",e=>{let t=nb(e);return[ng(t),nv(t)]},ny),nw=n(29961);let nS=e=>{let{componentCls:t}=e;return{["".concat(t).concat(t,"-bordered").concat(t,"-disabled:not(").concat(t,"-mini)")]:{"&, &:hover":{["".concat(t,"-item-link")]:{borderColor:e.colorBorder}},"&:focus-visible":{["".concat(t,"-item-link")]:{borderColor:e.colorBorder}},["".concat(t,"-item, ").concat(t,"-item-link")]:{backgroundColor:e.colorBgContainerDisabled,borderColor:e.colorBorder,["&:hover:not(".concat(t,"-item-active)")]:{backgroundColor:e.colorBgContainerDisabled,borderColor:e.colorBorder,a:{color:e.colorTextDisabled}},["&".concat(t,"-item-active")]:{backgroundColor:e.itemActiveBgDisabled}},["".concat(t,"-prev, ").concat(t,"-next")]:{"&:hover button":{backgroundColor:e.colorBgContainerDisabled,borderColor:e.colorBorder,color:e.colorTextDisabled},["".concat(t,"-item-link")]:{backgroundColor:e.colorBgContainerDisabled,borderColor:e.colorBorder}}},["".concat(t).concat(t,"-bordered:not(").concat(t,"-mini)")]:{["".concat(t,"-prev, ").concat(t,"-next")]:{"&:hover button":{borderColor:e.colorPrimaryHover,backgroundColor:e.itemBg},["".concat(t,"-item-link")]:{backgroundColor:e.itemLinkBg,borderColor:e.colorBorder},["&:hover ".concat(t,"-item-link")]:{borderColor:e.colorPrimary,backgroundColor:e.itemBg,color:e.colorPrimary},["&".concat(t,"-disabled")]:{["".concat(t,"-item-link")]:{borderColor:e.colorBorder,color:e.colorTextDisabled}}},["".concat(t,"-item")]:{backgroundColor:e.itemBg,border:"".concat((0,tS.bf)(e.lineWidth)," ").concat(e.lineType," ").concat(e.colorBorder),["&:hover:not(".concat(t,"-item-active)")]:{borderColor:e.colorPrimary,backgroundColor:e.itemBg,a:{color:e.colorPrimary}},"&-active":{borderColor:e.colorPrimary}}}}};var nk=(0,tC.bk)(["Pagination","bordered"],e=>[nS(nb(e))],ny),nE=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n},nC=e=>{let{prefixCls:t,selectPrefixCls:n,className:r,rootClassName:o,style:a,size:l,locale:c,selectComponentClass:s,responsive:u,showSizeChanger:d}=e,f=nE(e,["prefixCls","selectPrefixCls","className","rootClassName","style","size","locale","selectComponentClass","responsive","showSizeChanger"]),{xs:p}=tG(u),[,h]=(0,nw.ZP)(),{getPrefixCls:m,direction:g,pagination:v={}}=i.useContext(tv.E_),y=m("pagination",t),[b,x,w]=nx(y),S=null!=d?d:v.showSizeChanger,k=i.useMemo(()=>{let e=i.createElement("span",{className:"".concat(y,"-item-ellipsis")},"•••"),t=i.createElement("button",{className:"".concat(y,"-item-link"),type:"button",tabIndex:-1},"rtl"===g?i.createElement(t2.Z,null):i.createElement(t1.Z,null));return{prevIcon:t,nextIcon:i.createElement("button",{className:"".concat(y,"-item-link"),type:"button",tabIndex:-1},"rtl"===g?i.createElement(t1.Z,null):i.createElement(t2.Z,null)),jumpPrevIcon:i.createElement("a",{className:"".concat(y,"-item-link")},i.createElement("div",{className:"".concat(y,"-item-container")},"rtl"===g?i.createElement(t0,{className:"".concat(y,"-item-link-icon")}):i.createElement(tQ,{className:"".concat(y,"-item-link-icon")}),e)),jumpNextIcon:i.createElement("a",{className:"".concat(y,"-item-link")},i.createElement("div",{className:"".concat(y,"-item-container")},"rtl"===g?i.createElement(tQ,{className:"".concat(y,"-item-link-icon")}):i.createElement(t0,{className:"".concat(y,"-item-link-icon")}),e))}},[g,y]),[E]=(0,no.Z)("Pagination",nr.Z),C=Object.assign(Object.assign({},E),c),O=(0,tK.Z)(l),j="small"===O||!!(p&&!O&&u),N=m("select",n),M=P()({["".concat(y,"-mini")]:j,["".concat(y,"-rtl")]:"rtl"===g,["".concat(y,"-bordered")]:h.wireframe},null==v?void 0:v.className,r,o,x,w),I=Object.assign(Object.assign({},null==v?void 0:v.style),a);return b(i.createElement(i.Fragment,null,h.wireframe&&i.createElement(nk,{prefixCls:y}),i.createElement(nn,Object.assign({},k,f,{style:I,prefixCls:y,selectPrefixCls:N,className:M,selectComponentClass:s||(j?na:nl),locale:C,showSizeChanger:S}))))},nO=n(87908);function nj(e,t){return"key"in e&&void 0!==e.key&&null!==e.key?e.key:e.dataIndex?Array.isArray(e.dataIndex)?e.dataIndex.join("."):e.dataIndex:t}function nP(e,t){return t?"".concat(t,"-").concat(e):"".concat(e)}function nN(e,t){return"function"==typeof e?e(t):e}var nM={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M349 838c0 17.7 14.2 32 31.8 32h262.4c17.6 0 31.8-14.3 31.8-32V642H349v196zm531.1-684H143.9c-24.5 0-39.8 26.7-27.5 48l221.3 376h348.8l221.3-376c12.1-21.3-3.2-48-27.7-48z"}}]},name:"filter",theme:"filled"},nI=i.forwardRef(function(e,t){return i.createElement(tY.Z,(0,m.Z)({},e,{ref:t,icon:nM}))}),nR=n(73002),nT=n(85180),nA=n(45937),n_=n(88208);function nD(e){if(null==e)throw TypeError("Cannot destructure "+e)}var nZ=n(47970),nL=["className","style","motion","motionNodes","motionType","onMotionStart","onMotionEnd","active","treeNodeRequiredProps"],nz=function(e,t){var n,r,o,a,l,s=e.className,d=e.style,f=e.motion,p=e.motionNodes,h=e.motionType,g=e.onMotionStart,v=e.onMotionEnd,y=e.active,b=e.treeNodeRequiredProps,x=(0,z.Z)(e,nL),w=i.useState(!0),S=(0,c.Z)(w,2),k=S[0],E=S[1],C=i.useContext(eX).prefixCls,O=p&&"hide"!==h;(0,u.Z)(function(){p&&O!==k&&E(O)},[p]);var j=i.useRef(!1),N=function(){p&&!j.current&&(j.current=!0,v())};return(n=function(){p&&g()},r=i.useState(!1),a=(o=(0,c.Z)(r,2))[0],l=o[1],(0,u.Z)(function(){if(a)return n(),function(){N()}},[a]),(0,u.Z)(function(){return l(!0),function(){l(!1)}},[]),p)?i.createElement(nZ.ZP,(0,m.Z)({ref:t,visible:k},f,{motionAppear:"show"===h,onVisibleChanged:function(e){O===e&&N()}}),function(e,t){var n=e.className,r=e.style;return i.createElement("div",{ref:t,className:P()("".concat(C,"-treenode-motion"),n),style:r},p.map(function(e){var t=(0,m.Z)({},(nD(e.data),e.data)),n=e.title,r=e.key,o=e.isStart,a=e.isEnd;delete t.children;var l=e3(r,b);return i.createElement(tt,(0,m.Z)({},t,l,{title:n,active:y,data:e.data,key:r,isStart:o,isEnd:a}))}))}):i.createElement(tt,(0,m.Z)({domRef:t,className:s,style:d},x,{active:y}))};nz.displayName="MotionTreeNode";var nB=i.forwardRef(nz);function nF(e,t,n){var r=e.findIndex(function(e){return e.key===n}),o=e[r+1],i=t.findIndex(function(e){return e.key===n});if(o){var a=t.findIndex(function(e){return e.key===o.key});return t.slice(i+1,a)}return t.slice(i+1)}var nH=["prefixCls","data","selectable","checkable","expandedKeys","selectedKeys","checkedKeys","loadedKeys","loadingKeys","halfCheckedKeys","keyEntities","disabled","dragging","dragOverNodeKey","dropPosition","motion","height","itemHeight","virtual","focusable","activeItem","focused","tabIndex","onKeyDown","onFocus","onBlur","onActiveChange","onListChangeStart","onListChangeEnd"],nq={width:0,height:0,display:"flex",overflow:"hidden",opacity:0,border:0,padding:0,margin:0},nW=function(){},nK="RC_TREE_MOTION_".concat(Math.random()),nV={key:nK},nU={key:nK,level:0,index:0,pos:"0",node:nV,nodes:[nV]},nG={parent:null,children:[],pos:nU.pos,data:nV,title:null,key:nK,isStart:[],isEnd:[]};function nX(e,t,n,r){return!1!==t&&n?e.slice(0,Math.ceil(n/r)+1):e}function n$(e){return e0(e.key,e.pos)}var nY=i.forwardRef(function(e,t){var n=e.prefixCls,r=e.data,o=(e.selectable,e.checkable,e.expandedKeys),a=e.selectedKeys,l=e.checkedKeys,s=e.loadedKeys,d=e.loadingKeys,f=e.halfCheckedKeys,p=e.keyEntities,h=e.disabled,g=e.dragging,v=e.dragOverNodeKey,y=e.dropPosition,b=e.motion,x=e.height,w=e.itemHeight,S=e.virtual,k=e.focusable,E=e.activeItem,C=e.focused,O=e.tabIndex,j=e.onKeyDown,P=e.onFocus,N=e.onBlur,M=e.onActiveChange,I=e.onListChangeStart,R=e.onListChangeEnd,T=(0,z.Z)(e,nH),A=i.useRef(null),_=i.useRef(null);i.useImperativeHandle(t,function(){return{scrollTo:function(e){A.current.scrollTo(e)},getIndentWidth:function(){return _.current.offsetWidth}}});var D=i.useState(o),Z=(0,c.Z)(D,2),L=Z[0],B=Z[1],F=i.useState(r),H=(0,c.Z)(F,2),q=H[0],W=H[1],K=i.useState(r),V=(0,c.Z)(K,2),U=V[0],G=V[1],X=i.useState([]),$=(0,c.Z)(X,2),Y=$[0],Q=$[1],J=i.useState(null),ee=(0,c.Z)(J,2),et=ee[0],en=ee[1],er=i.useRef(r);function eo(){var e=er.current;W(e),G(e),Q([]),en(null),R()}er.current=r,(0,u.Z)(function(){B(o);var e=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],n=e.length,r=t.length;if(1!==Math.abs(n-r))return{add:!1,key:null};function o(e,t){var n=new Map;e.forEach(function(e){n.set(e,!0)});var r=t.filter(function(e){return!n.has(e)});return 1===r.length?r[0]:null}return n ").concat(t);return t}(E)),i.createElement("div",null,i.createElement("input",{style:nq,disabled:!1===k||h,tabIndex:!1!==k?O:null,onKeyDown:j,onFocus:P,onBlur:N,value:"",onChange:nW,"aria-label":"for screen reader"})),i.createElement("div",{className:"".concat(n,"-treenode"),"aria-hidden":!0,style:{position:"absolute",pointerEvents:"none",visibility:"hidden",height:0,overflow:"hidden",border:0,padding:0}},i.createElement("div",{className:"".concat(n,"-indent")},i.createElement("div",{ref:_,className:"".concat(n,"-indent-unit")}))),i.createElement(eA.Z,(0,m.Z)({},T,{data:ei,itemKey:n$,height:x,fullHeight:!1,virtual:S,itemHeight:w,prefixCls:"".concat(n,"-list"),ref:A,onVisibleChange:function(e,t){var n=new Set(e);t.filter(function(e){return!n.has(e)}).some(function(e){return n$(e)===nK})&&eo()}}),function(e){var t=e.pos,n=(0,m.Z)({},(nD(e.data),e.data)),r=e.title,o=e.key,a=e.isStart,l=e.isEnd,c=e0(o,t);delete n.key,delete n.children;var s=e3(c,ea);return i.createElement(nB,(0,m.Z)({},n,s,{title:r,active:!!E&&o===E.key,pos:t,data:e.data,isStart:a,isEnd:l,motion:b,motionNodes:o===nK?Y:null,motionType:et,onMotionStart:I,onMotionEnd:eo,treeNodeRequiredProps:ea,onMouseMove:function(){M(null)}}))}))});nY.displayName="NodeList";var nQ=function(e){(0,eU.Z)(n,e);var t=(0,eG.Z)(n);function n(){var e;(0,eW.Z)(this,n);for(var r=arguments.length,o=Array(r),a=0;a0&&void 0!==arguments[0]?arguments[0]:[];t.forEach(function(t){var n=t.key,o=t.children;r.push(n),e(o)})}(a[c].children),r),indent:e.listRef.current.getIndentWidth()}),e.setExpandedKeys(s),window.addEventListener("dragend",e.onWindowDragEnd),null==l||l({event:t,node:e5(n.props)})},e.onNodeDragEnter=function(t,n){var r=e.state,o=r.expandedKeys,i=r.keyEntities,a=r.dragChildrenKeys,l=r.flattenNodes,c=r.indent,s=e.props,u=s.onDragEnter,d=s.onExpand,f=s.allowDrop,p=s.direction,h=n.props,m=h.pos,g=h.eventKey,v=(0,eV.Z)(e).dragNode;if(e.currentMouseOverDroppableNodeKey!==g&&(e.currentMouseOverDroppableNodeKey=g),!v){e.resetDragState();return}var y=ti(t,v,n,c,e.dragStartMousePosition,f,l,i,o,p),b=y.dropPosition,x=y.dropLevelOffset,w=y.dropTargetKey,S=y.dropContainerKey,k=y.dropTargetPos,E=y.dropAllowed,C=y.dragOverNodeKey;if(-1!==a.indexOf(w)||!E||(e.delayedDragEnterLogic||(e.delayedDragEnterLogic={}),Object.keys(e.delayedDragEnterLogic).forEach(function(t){clearTimeout(e.delayedDragEnterLogic[t])}),v.props.eventKey!==n.props.eventKey&&(t.persist(),e.delayedDragEnterLogic[m]=window.setTimeout(function(){if(null!==e.state.draggingNodeKey){var r=(0,ec.Z)(o),a=i[n.props.eventKey];a&&(a.children||[]).length&&(r=tr(o,n.props.eventKey)),"expandedKeys"in e.props||e.setExpandedKeys(r),null==d||d(r,{node:e5(n.props),expanded:!0,nativeEvent:t.nativeEvent})}},800)),v.props.eventKey===w&&0===x)){e.resetDragState();return}e.setState({dragOverNodeKey:C,dropPosition:b,dropLevelOffset:x,dropTargetKey:w,dropContainerKey:S,dropTargetPos:k,dropAllowed:E}),null==u||u({event:t,node:e5(n.props),expandedKeys:o})},e.onNodeDragOver=function(t,n){var r=e.state,o=r.dragChildrenKeys,i=r.flattenNodes,a=r.keyEntities,l=r.expandedKeys,c=r.indent,s=e.props,u=s.onDragOver,d=s.allowDrop,f=s.direction,p=(0,eV.Z)(e).dragNode;if(p){var h=ti(t,p,n,c,e.dragStartMousePosition,d,i,a,l,f),m=h.dropPosition,g=h.dropLevelOffset,v=h.dropTargetKey,y=h.dropContainerKey,b=h.dropAllowed,x=h.dropTargetPos,w=h.dragOverNodeKey;-1===o.indexOf(v)&&b&&(p.props.eventKey===v&&0===g?null===e.state.dropPosition&&null===e.state.dropLevelOffset&&null===e.state.dropTargetKey&&null===e.state.dropContainerKey&&null===e.state.dropTargetPos&&!1===e.state.dropAllowed&&null===e.state.dragOverNodeKey||e.resetDragState():m===e.state.dropPosition&&g===e.state.dropLevelOffset&&v===e.state.dropTargetKey&&y===e.state.dropContainerKey&&x===e.state.dropTargetPos&&b===e.state.dropAllowed&&w===e.state.dragOverNodeKey||e.setState({dropPosition:m,dropLevelOffset:g,dropTargetKey:v,dropContainerKey:y,dropTargetPos:x,dropAllowed:b,dragOverNodeKey:w}),null==u||u({event:t,node:e5(n.props)}))}},e.onNodeDragLeave=function(t,n){e.currentMouseOverDroppableNodeKey!==n.props.eventKey||t.currentTarget.contains(t.relatedTarget)||(e.resetDragState(),e.currentMouseOverDroppableNodeKey=null);var r=e.props.onDragLeave;null==r||r({event:t,node:e5(n.props)})},e.onWindowDragEnd=function(t){e.onNodeDragEnd(t,null,!0),window.removeEventListener("dragend",e.onWindowDragEnd)},e.onNodeDragEnd=function(t,n){var r=e.props.onDragEnd;e.setState({dragOverNodeKey:null}),e.cleanDragState(),null==r||r({event:t,node:e5(n.props)}),e.dragNode=null,window.removeEventListener("dragend",e.onWindowDragEnd)},e.onNodeDrop=function(t,n){var r,o=arguments.length>2&&void 0!==arguments[2]&&arguments[2],i=e.state,a=i.dragChildrenKeys,l=i.dropPosition,c=i.dropTargetKey,s=i.dropTargetPos;if(i.dropAllowed){var u=e.props.onDrop;if(e.setState({dragOverNodeKey:null}),e.cleanDragState(),null!==c){var d=(0,C.Z)((0,C.Z)({},e3(c,e.getTreeNodeRequiredProps())),{},{active:(null===(r=e.getActiveItem())||void 0===r?void 0:r.key)===c,data:e.state.keyEntities[c].node}),f=-1!==a.indexOf(c);(0,I.ZP)(!f,"Can not drop to dragNode's children node. This is a bug of rc-tree. Please report an issue.");var p=to(s),h={event:t,node:e5(d),dragNode:e.dragNode?e5(e.dragNode.props):null,dragNodesKeys:[e.dragNode.props.eventKey].concat(a),dropToGap:0!==l,dropPosition:l+Number(p[p.length-1])};o||null==u||u(h),e.dragNode=null}}},e.cleanDragState=function(){null!==e.state.draggingNodeKey&&e.setState({draggingNodeKey:null,dropPosition:null,dropContainerKey:null,dropTargetKey:null,dropLevelOffset:null,dropAllowed:!0,dragOverNodeKey:null}),e.dragStartMousePosition=null,e.currentMouseOverDroppableNodeKey=null},e.triggerExpandActionExpand=function(t,n){var r=e.state,o=r.expandedKeys,i=r.flattenNodes,a=n.expanded,l=n.key;if(!n.isLeaf&&!t.shiftKey&&!t.metaKey&&!t.ctrlKey){var c=i.filter(function(e){return e.key===l})[0],s=e5((0,C.Z)((0,C.Z)({},e3(l,e.getTreeNodeRequiredProps())),{},{data:c.data}));e.setExpandedKeys(a?tn(o,l):tr(o,l)),e.onNodeExpand(t,s)}},e.onNodeClick=function(t,n){var r=e.props,o=r.onClick;"click"===r.expandAction&&e.triggerExpandActionExpand(t,n),null==o||o(t,n)},e.onNodeDoubleClick=function(t,n){var r=e.props,o=r.onDoubleClick;"doubleClick"===r.expandAction&&e.triggerExpandActionExpand(t,n),null==o||o(t,n)},e.onNodeSelect=function(t,n){var r=e.state.selectedKeys,o=e.state,i=o.keyEntities,a=o.fieldNames,l=e.props,c=l.onSelect,s=l.multiple,u=n.selected,d=n[a.key],f=!u,p=(r=f?s?tr(r,d):[d]:tn(r,d)).map(function(e){var t=i[e];return t?t.node:null}).filter(function(e){return e});e.setUncontrolledState({selectedKeys:r}),null==c||c(r,{event:"select",selected:f,node:n,selectedNodes:p,nativeEvent:t.nativeEvent})},e.onNodeCheck=function(t,n,r){var o,i=e.state,a=i.keyEntities,l=i.checkedKeys,c=i.halfCheckedKeys,s=e.props,u=s.checkStrictly,d=s.onCheck,f=n.key,p={event:"check",node:n,checked:r,nativeEvent:t.nativeEvent};if(u){var h=r?tr(l,f):tn(l,f);o={checked:h,halfChecked:tn(c,f)},p.checkedNodes=h.map(function(e){return a[e]}).filter(function(e){return e}).map(function(e){return e.node}),e.setUncontrolledState({checkedKeys:h})}else{var m=td([].concat((0,ec.Z)(l),[f]),!0,a),g=m.checkedKeys,v=m.halfCheckedKeys;if(!r){var y=new Set(g);y.delete(f);var b=td(Array.from(y),{checked:!1,halfCheckedKeys:v},a);g=b.checkedKeys,v=b.halfCheckedKeys}o=g,p.checkedNodes=[],p.checkedNodesPositions=[],p.halfCheckedKeys=v,g.forEach(function(e){var t=a[e];if(t){var n=t.node,r=t.pos;p.checkedNodes.push(n),p.checkedNodesPositions.push({node:n,pos:r})}}),e.setUncontrolledState({checkedKeys:g},!1,{halfCheckedKeys:v})}null==d||d(o,p)},e.onNodeLoad=function(t){var n=t.key,r=new Promise(function(r,o){e.setState(function(i){var a=i.loadedKeys,l=i.loadingKeys,c=void 0===l?[]:l,s=e.props,u=s.loadData,d=s.onLoad;return u&&-1===(void 0===a?[]:a).indexOf(n)&&-1===c.indexOf(n)?(u(t).then(function(){var o=tr(e.state.loadedKeys,n);null==d||d(o,{event:"load",node:t}),e.setUncontrolledState({loadedKeys:o}),e.setState(function(e){return{loadingKeys:tn(e.loadingKeys,n)}}),r()}).catch(function(t){if(e.setState(function(e){return{loadingKeys:tn(e.loadingKeys,n)}}),e.loadingRetryTimes[n]=(e.loadingRetryTimes[n]||0)+1,e.loadingRetryTimes[n]>=10){var i=e.state.loadedKeys;(0,I.ZP)(!1,"Retry for `loadData` many times but still failed. No more retry."),e.setUncontrolledState({loadedKeys:tr(i,n)}),r()}o(t)}),{loadingKeys:tr(c,n)}):null})});return r.catch(function(){}),r},e.onNodeMouseEnter=function(t,n){var r=e.props.onMouseEnter;null==r||r({event:t,node:n})},e.onNodeMouseLeave=function(t,n){var r=e.props.onMouseLeave;null==r||r({event:t,node:n})},e.onNodeContextMenu=function(t,n){var r=e.props.onRightClick;r&&(t.preventDefault(),r({event:t,node:n}))},e.onFocus=function(){var t=e.props.onFocus;e.setState({focused:!0});for(var n=arguments.length,r=Array(n),o=0;o1&&void 0!==arguments[1]&&arguments[1],r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:null;if(!e.destroyed){var o=!1,i=!0,a={};Object.keys(t).forEach(function(n){if(n in e.props){i=!1;return}o=!0,a[n]=t[n]}),o&&(!n||i)&&e.setState((0,C.Z)((0,C.Z)({},a),r))}},e.scrollTo=function(t){e.listRef.current.scrollTo(t)},e}return(0,eK.Z)(n,[{key:"componentDidMount",value:function(){this.destroyed=!1,this.onUpdated()}},{key:"componentDidUpdate",value:function(){this.onUpdated()}},{key:"onUpdated",value:function(){var e=this.props,t=e.activeKey,n=e.itemScrollOffset;void 0!==t&&t!==this.state.activeKey&&(this.setState({activeKey:t}),null!==t&&this.scrollTo({key:t,offset:void 0===n?0:n}))}},{key:"componentWillUnmount",value:function(){window.removeEventListener("dragend",this.onWindowDragEnd),this.destroyed=!0}},{key:"resetDragState",value:function(){this.setState({dragOverNodeKey:null,dropPosition:null,dropLevelOffset:null,dropTargetKey:null,dropContainerKey:null,dropTargetPos:null,dropAllowed:!1})}},{key:"render",value:function(){var e,t,n=this.state,r=n.focused,o=n.flattenNodes,a=n.keyEntities,l=n.draggingNodeKey,c=n.activeKey,s=n.dropLevelOffset,u=n.dropContainerKey,d=n.dropTargetKey,f=n.dropPosition,p=n.dragOverNodeKey,h=n.indent,g=this.props,v=g.prefixCls,y=g.className,b=g.style,x=g.showLine,w=g.focusable,S=g.tabIndex,k=g.selectable,C=g.showIcon,j=g.icon,N=g.switcherIcon,M=g.draggable,I=g.checkable,R=g.checkStrictly,T=g.disabled,A=g.motion,_=g.loadData,D=g.filterTreeNode,Z=g.height,L=g.itemHeight,z=g.virtual,B=g.titleRender,F=g.dropIndicatorRender,H=g.onContextMenu,q=g.onScroll,W=g.direction,K=g.rootClassName,V=g.rootStyle,G=(0,U.Z)(this.props,{aria:!0,data:!0});return M&&(t="object"===(0,E.Z)(M)?M:"function"==typeof M?{nodeDraggable:M}:{}),i.createElement(eX.Provider,{value:{prefixCls:v,selectable:k,showIcon:C,icon:j,switcherIcon:N,draggable:t,draggingNodeKey:l,checkable:I,checkStrictly:R,disabled:T,keyEntities:a,dropLevelOffset:s,dropContainerKey:u,dropTargetKey:d,dropPosition:f,dragOverNodeKey:p,indent:h,direction:W,dropIndicatorRender:F,loadData:_,filterTreeNode:D,titleRender:B,onNodeClick:this.onNodeClick,onNodeDoubleClick:this.onNodeDoubleClick,onNodeExpand:this.onNodeExpand,onNodeSelect:this.onNodeSelect,onNodeCheck:this.onNodeCheck,onNodeLoad:this.onNodeLoad,onNodeMouseEnter:this.onNodeMouseEnter,onNodeMouseLeave:this.onNodeMouseLeave,onNodeContextMenu:this.onNodeContextMenu,onNodeDragStart:this.onNodeDragStart,onNodeDragEnter:this.onNodeDragEnter,onNodeDragOver:this.onNodeDragOver,onNodeDragLeave:this.onNodeDragLeave,onNodeDragEnd:this.onNodeDragEnd,onNodeDrop:this.onNodeDrop}},i.createElement("div",{role:"tree",className:P()(v,y,K,(e={},(0,O.Z)(e,"".concat(v,"-show-line"),x),(0,O.Z)(e,"".concat(v,"-focused"),r),(0,O.Z)(e,"".concat(v,"-active-focused"),null!==c),e)),style:V},i.createElement(nY,(0,m.Z)({ref:this.listRef,prefixCls:v,style:b,data:o,disabled:T,selectable:k,checkable:!!I,motion:A,dragging:null!==l,height:Z,itemHeight:L,virtual:z,focusable:w,focused:r,tabIndex:void 0===S?0:S,activeItem:this.getActiveItem(),onFocus:this.onFocus,onBlur:this.onBlur,onKeyDown:this.onKeyDown,onActiveChange:this.onActiveChange,onListChangeStart:this.onListChangeStart,onListChangeEnd:this.onListChangeEnd,onContextMenu:H,onScroll:q},this.getTreeNodeRequiredProps(),G))))}}],[{key:"getDerivedStateFromProps",value:function(e,t){var n,r,o=t.prevProps,i={prevProps:e};function a(t){return!o&&t in e||o&&o[t]!==e[t]}var l=t.fieldNames;if(a("fieldNames")&&(l=e1(e.fieldNames),i.fieldNames=l),a("treeData")?n=e.treeData:a("children")&&((0,I.ZP)(!1,"`children` of Tree is deprecated. Please use `treeData` instead."),n=e2(e.children)),n){i.treeData=n;var c=e4(n,{fieldNames:l});i.keyEntities=(0,C.Z)((0,O.Z)({},nK,nU),c.keyEntities)}var s=i.keyEntities||t.keyEntities;if(a("expandedKeys")||o&&a("autoExpandParent"))i.expandedKeys=e.autoExpandParent||!o&&e.defaultExpandParent?tc(e.expandedKeys,s):e.expandedKeys;else if(!o&&e.defaultExpandAll){var u=(0,C.Z)({},s);delete u[nK],i.expandedKeys=Object.keys(u).map(function(e){return u[e].key})}else!o&&e.defaultExpandedKeys&&(i.expandedKeys=e.autoExpandParent||e.defaultExpandParent?tc(e.defaultExpandedKeys,s):e.defaultExpandedKeys);if(i.expandedKeys||delete i.expandedKeys,n||i.expandedKeys){var d=e6(n||t.treeData,i.expandedKeys||t.expandedKeys,l);i.flattenNodes=d}if(e.selectable&&(a("selectedKeys")?i.selectedKeys=ta(e.selectedKeys,e):!o&&e.defaultSelectedKeys&&(i.selectedKeys=ta(e.defaultSelectedKeys,e))),e.checkable&&(a("checkedKeys")?r=tl(e.checkedKeys)||{}:!o&&e.defaultCheckedKeys?r=tl(e.defaultCheckedKeys)||{}:n&&(r=tl(e.checkedKeys)||{checkedKeys:t.checkedKeys,halfCheckedKeys:t.halfCheckedKeys}),r)){var f=r,p=f.checkedKeys,h=void 0===p?[]:p,m=f.halfCheckedKeys,g=void 0===m?[]:m;if(!e.checkStrictly){var v=td(h,!0,s);h=v.checkedKeys,g=v.halfCheckedKeys}i.checkedKeys=h,i.halfCheckedKeys=g}return a("loadedKeys")&&(i.loadedKeys=e.loadedKeys),i}}]),n}(i.Component);nQ.defaultProps={prefixCls:"rc-tree",showLine:!1,showIcon:!0,selectable:!0,multiple:!1,checkable:!1,disabled:!1,checkStrictly:!1,draggable:!1,defaultExpandParent:!0,autoExpandParent:!1,defaultExpandAll:!1,defaultExpandedKeys:[],defaultCheckedKeys:[],defaultSelectedKeys:[],dropIndicatorRender:function(e){var t=e.dropPosition,n=e.dropLevelOffset,r=e.indent,o={pointerEvents:"none",position:"absolute",right:0,backgroundColor:"red",height:2};switch(t){case -1:o.top=0,o.left=-n*r;break;case 1:o.bottom=0,o.left=-n*r;break;case 0:o.bottom=0,o.left=r}return i.createElement("div",{style:o})},allowDrop:function(){return!0},expandAction:!1},nQ.TreeNode=tt;var nJ={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M854.6 288.6L639.4 73.4c-6-6-14.1-9.4-22.6-9.4H192c-17.7 0-32 14.3-32 32v832c0 17.7 14.3 32 32 32h640c17.7 0 32-14.3 32-32V311.3c0-8.5-3.4-16.7-9.4-22.7zM790.2 326H602V137.8L790.2 326zm1.8 562H232V136h302v216a42 42 0 0042 42h216v494z"}}]},name:"file",theme:"outlined"},n0=i.forwardRef(function(e,t){return i.createElement(tY.Z,(0,m.Z)({},e,{ref:t,icon:nJ}))}),n1={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M928 444H820V330.4c0-17.7-14.3-32-32-32H473L355.7 186.2a8.15 8.15 0 00-5.5-2.2H96c-17.7 0-32 14.3-32 32v592c0 17.7 14.3 32 32 32h698c13 0 24.8-7.9 29.7-20l134-332c1.5-3.8 2.3-7.9 2.3-12 0-17.7-14.3-32-32-32zM136 256h188.5l119.6 114.4H748V444H238c-13 0-24.8 7.9-29.7 20L136 643.2V256zm635.3 512H159l103.3-256h612.4L771.3 768z"}}]},name:"folder-open",theme:"outlined"},n2=i.forwardRef(function(e,t){return i.createElement(tY.Z,(0,m.Z)({},e,{ref:t,icon:n1}))}),n6={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M880 298.4H521L403.7 186.2a8.15 8.15 0 00-5.5-2.2H144c-17.7 0-32 14.3-32 32v592c0 17.7 14.3 32 32 32h736c17.7 0 32-14.3 32-32V330.4c0-17.7-14.3-32-32-32zM840 768H184V256h188.5l119.6 114.4H840V768z"}}]},name:"folder",theme:"outlined"},n4=i.forwardRef(function(e,t){return i.createElement(tY.Z,(0,m.Z)({},e,{ref:t,icon:n6}))}),n3={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M300 276.5a56 56 0 1056-97 56 56 0 00-56 97zm0 284a56 56 0 1056-97 56 56 0 00-56 97zM640 228a56 56 0 10112 0 56 56 0 00-112 0zm0 284a56 56 0 10112 0 56 56 0 00-112 0zM300 844.5a56 56 0 1056-97 56 56 0 00-56 97zM640 796a56 56 0 10112 0 56 56 0 00-112 0z"}}]},name:"holder",theme:"outlined"},n5=i.forwardRef(function(e,t){return i.createElement(tY.Z,(0,m.Z)({},e,{ref:t,icon:n3}))}),n8=n(68710),n7=n(63074);let n9=new tS.E4("ant-tree-node-fx-do-not-use",{"0%":{opacity:0},"100%":{opacity:1}}),re=(e,t)=>({[".".concat(e,"-switcher-icon")]:{display:"inline-block",fontSize:10,verticalAlign:"baseline",svg:{transition:"transform ".concat(t.motionDurationSlow)}}}),rt=(e,t)=>({[".".concat(e,"-drop-indicator")]:{position:"absolute",zIndex:1,height:2,backgroundColor:t.colorPrimary,borderRadius:1,pointerEvents:"none","&:after":{position:"absolute",top:-3,insetInlineStart:-6,width:8,height:8,backgroundColor:"transparent",border:"".concat((0,tS.bf)(t.lineWidthBold)," solid ").concat(t.colorPrimary),borderRadius:"50%",content:'""'}}}),rn=(e,t)=>{let{treeCls:n,treeNodeCls:r,treeNodePadding:o,titleHeight:i,nodeSelectedBg:a,nodeHoverBg:l}=t,c=t.paddingXS;return{[n]:Object.assign(Object.assign({},(0,tk.Wf)(t)),{background:t.colorBgContainer,borderRadius:t.borderRadius,transition:"background-color ".concat(t.motionDurationSlow),["&".concat(n,"-rtl")]:{["".concat(n,"-switcher")]:{"&_close":{["".concat(n,"-switcher-icon")]:{svg:{transform:"rotate(90deg)"}}}}},["&-focused:not(:hover):not(".concat(n,"-active-focused)")]:Object.assign({},(0,tk.oN)(t)),["".concat(n,"-list-holder-inner")]:{alignItems:"flex-start"},["&".concat(n,"-block-node")]:{["".concat(n,"-list-holder-inner")]:{alignItems:"stretch",["".concat(n,"-node-content-wrapper")]:{flex:"auto"},["".concat(r,".dragging")]:{position:"relative","&:after":{position:"absolute",top:0,insetInlineEnd:0,bottom:o,insetInlineStart:0,border:"1px solid ".concat(t.colorPrimary),opacity:0,animationName:n9,animationDuration:t.motionDurationSlow,animationPlayState:"running",animationFillMode:"forwards",content:'""',pointerEvents:"none"}}}},["".concat(r)]:{display:"flex",alignItems:"flex-start",padding:"0 0 ".concat((0,tS.bf)(o)," 0"),outline:"none","&-rtl":{direction:"rtl"},"&-disabled":{["".concat(n,"-node-content-wrapper")]:{color:t.colorTextDisabled,cursor:"not-allowed","&:hover":{background:"transparent"}}},["&-active ".concat(n,"-node-content-wrapper")]:{background:t.controlItemBgHover},["&:not(".concat(r,"-disabled).filter-node ").concat(n,"-title")]:{color:"inherit",fontWeight:500},"&-draggable":{cursor:"grab",["".concat(n,"-draggable-icon")]:{flexShrink:0,width:i,lineHeight:"".concat((0,tS.bf)(i)),textAlign:"center",visibility:"visible",opacity:.2,transition:"opacity ".concat(t.motionDurationSlow),["".concat(r,":hover &")]:{opacity:.45}},["&".concat(r,"-disabled")]:{["".concat(n,"-draggable-icon")]:{visibility:"hidden"}}}},["".concat(n,"-indent")]:{alignSelf:"stretch",whiteSpace:"nowrap",userSelect:"none","&-unit":{display:"inline-block",width:i}},["".concat(n,"-draggable-icon")]:{visibility:"hidden"},["".concat(n,"-switcher")]:Object.assign(Object.assign({},re(e,t)),{position:"relative",flex:"none",alignSelf:"stretch",width:i,margin:0,lineHeight:"".concat((0,tS.bf)(i)),textAlign:"center",cursor:"pointer",userSelect:"none",transition:"all ".concat(t.motionDurationSlow),borderRadius:t.borderRadius,"&-noop":{cursor:"unset"},["&:not(".concat(n,"-switcher-noop):hover")]:{backgroundColor:t.colorBgTextHover},"&_close":{["".concat(n,"-switcher-icon")]:{svg:{transform:"rotate(-90deg)"}}},"&-loading-icon":{color:t.colorPrimary},"&-leaf-line":{position:"relative",zIndex:1,display:"inline-block",width:"100%",height:"100%","&:before":{position:"absolute",top:0,insetInlineEnd:t.calc(i).div(2).equal(),bottom:t.calc(o).mul(-1).equal(),marginInlineStart:-1,borderInlineEnd:"1px solid ".concat(t.colorBorder),content:'""'},"&:after":{position:"absolute",width:t.calc(t.calc(i).div(2).equal()).mul(.8).equal(),height:t.calc(i).div(2).equal(),borderBottom:"1px solid ".concat(t.colorBorder),content:'""'}}}),["".concat(n,"-checkbox")]:{top:"initial",marginInlineEnd:c,alignSelf:"flex-start",marginTop:t.marginXXS},["".concat(n,"-node-content-wrapper, ").concat(n,"-checkbox + span")]:{position:"relative",zIndex:"auto",minHeight:i,margin:0,padding:"0 ".concat((0,tS.bf)(t.calc(t.paddingXS).div(2).equal())),color:"inherit",lineHeight:"".concat((0,tS.bf)(i)),background:"transparent",borderRadius:t.borderRadius,cursor:"pointer",transition:"all ".concat(t.motionDurationMid,", border 0s, line-height 0s, box-shadow 0s"),"&:hover":{backgroundColor:l},["&".concat(n,"-node-selected")]:{backgroundColor:a},["".concat(n,"-iconEle")]:{display:"inline-block",width:i,height:i,lineHeight:"".concat((0,tS.bf)(i)),textAlign:"center",verticalAlign:"top","&:empty":{display:"none"}}},["".concat(n,"-unselectable ").concat(n,"-node-content-wrapper:hover")]:{backgroundColor:"transparent"},["".concat(n,"-node-content-wrapper")]:Object.assign({lineHeight:"".concat((0,tS.bf)(i)),userSelect:"none"},rt(e,t)),["".concat(r,".drop-container")]:{"> [draggable]":{boxShadow:"0 0 0 2px ".concat(t.colorPrimary)}},"&-show-line":{["".concat(n,"-indent")]:{"&-unit":{position:"relative",height:"100%","&:before":{position:"absolute",top:0,insetInlineEnd:t.calc(i).div(2).equal(),bottom:t.calc(o).mul(-1).equal(),borderInlineEnd:"1px solid ".concat(t.colorBorder),content:'""'},"&-end":{"&:before":{display:"none"}}}},["".concat(n,"-switcher")]:{background:"transparent","&-line-icon":{verticalAlign:"-0.15em"}}},["".concat(r,"-leaf-last")]:{["".concat(n,"-switcher")]:{"&-leaf-line":{"&:before":{top:"auto !important",bottom:"auto !important",height:"".concat((0,tS.bf)(t.calc(i).div(2).equal())," !important")}}}}})}},rr=e=>{let{treeCls:t,treeNodeCls:n,treeNodePadding:r,directoryNodeSelectedBg:o,directoryNodeSelectedColor:i}=e;return{["".concat(t).concat(t,"-directory")]:{[n]:{position:"relative","&:before":{position:"absolute",top:0,insetInlineEnd:0,bottom:r,insetInlineStart:0,transition:"background-color ".concat(e.motionDurationMid),content:'""',pointerEvents:"none"},"&:hover":{"&:before":{background:e.controlItemBgHover}},"> *":{zIndex:1},["".concat(t,"-switcher")]:{transition:"color ".concat(e.motionDurationMid)},["".concat(t,"-node-content-wrapper")]:{borderRadius:0,userSelect:"none","&:hover":{background:"transparent"},["&".concat(t,"-node-selected")]:{color:i,background:"transparent"}},"&-selected":{"\n &:hover::before,\n &::before\n ":{background:o},["".concat(t,"-switcher")]:{color:i},["".concat(t,"-node-content-wrapper")]:{color:i,background:"transparent"}}}}}},ro=(e,t)=>{let n=".".concat(e),r=t.calc(t.paddingXS).div(2).equal(),o=(0,tE.TS)(t,{treeCls:n,treeNodeCls:"".concat(n,"-treenode"),treeNodePadding:r});return[rn(e,o),rr(o)]},ri=e=>{let{controlHeightSM:t}=e;return{titleHeight:t,nodeHoverBg:e.controlItemBgHover,nodeSelectedBg:e.controlItemBgActive}};var ra=(0,tC.I$)("Tree",(e,t)=>{let{prefixCls:n}=t;return[{[e.componentCls]:tj("".concat(n,"-checkbox"),e)},ro(n,e),(0,n7.Z)(e)]},e=>{let{colorTextLightSolid:t,colorPrimary:n}=e;return Object.assign(Object.assign({},ri(e)),{directoryNodeSelectedColor:t,directoryNodeSelectedBg:n})});function rl(e){let{dropPosition:t,dropLevelOffset:n,prefixCls:r,indent:o,direction:a="ltr"}=e,l="ltr"===a?"left":"right",c={[l]:-n*o+4,["ltr"===a?"right":"left"]:0};switch(t){case -1:c.top=-3;break;case 1:c.bottom=-3;break;default:c.bottom=-3,c[l]=o+4}return i.createElement("div",{style:c,className:"".concat(r,"-drop-indicator")})}var rc={icon:{tag:"svg",attrs:{viewBox:"0 0 1024 1024",focusable:"false"},children:[{tag:"path",attrs:{d:"M840.4 300H183.6c-19.7 0-30.7 20.8-18.5 35l328.4 380.8c9.4 10.9 27.5 10.9 37 0L858.9 335c12.2-14.2 1.2-35-18.5-35z"}}]},name:"caret-down",theme:"filled"},rs=i.forwardRef(function(e,t){return i.createElement(tY.Z,(0,m.Z)({},e,{ref:t,icon:rc}))}),ru=n(61935),rd={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M328 544h368c4.4 0 8-3.6 8-8v-48c0-4.4-3.6-8-8-8H328c-4.4 0-8 3.6-8 8v48c0 4.4 3.6 8 8 8z"}},{tag:"path",attrs:{d:"M880 112H144c-17.7 0-32 14.3-32 32v736c0 17.7 14.3 32 32 32h736c17.7 0 32-14.3 32-32V144c0-17.7-14.3-32-32-32zm-40 728H184V184h656v656z"}}]},name:"minus-square",theme:"outlined"},rf=i.forwardRef(function(e,t){return i.createElement(tY.Z,(0,m.Z)({},e,{ref:t,icon:rd}))}),rp={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M328 544h152v152c0 4.4 3.6 8 8 8h48c4.4 0 8-3.6 8-8V544h152c4.4 0 8-3.6 8-8v-48c0-4.4-3.6-8-8-8H544V328c0-4.4-3.6-8-8-8h-48c-4.4 0-8 3.6-8 8v152H328c-4.4 0-8 3.6-8 8v48c0 4.4 3.6 8 8 8z"}},{tag:"path",attrs:{d:"M880 112H144c-17.7 0-32 14.3-32 32v736c0 17.7 14.3 32 32 32h736c17.7 0 32-14.3 32-32V144c0-17.7-14.3-32-32-32zm-40 728H184V184h656v656z"}}]},name:"plus-square",theme:"outlined"},rh=i.forwardRef(function(e,t){return i.createElement(tY.Z,(0,m.Z)({},e,{ref:t,icon:rp}))}),rm=n(19722),rg=e=>{let t;let{prefixCls:n,switcherIcon:r,treeNodeProps:o,showLine:a}=e,{isLeaf:l,expanded:c,loading:s}=o;if(s)return i.createElement(ru.Z,{className:"".concat(n,"-switcher-loading-icon")});if(a&&"object"==typeof a&&(t=a.showLeafIcon),l){if(!a)return null;if("boolean"!=typeof t&&t){let e="function"==typeof t?t(o):t;return(0,rm.l$)(e)?(0,rm.Tm)(e,{className:P()(e.props.className||"","".concat(n,"-switcher-line-custom-icon"))}):e}return t?i.createElement(n0,{className:"".concat(n,"-switcher-line-icon")}):i.createElement("span",{className:"".concat(n,"-switcher-leaf-line")})}let u="".concat(n,"-switcher-icon"),d="function"==typeof r?r(o):r;return(0,rm.l$)(d)?(0,rm.Tm)(d,{className:P()(d.props.className||"",u)}):void 0!==d?d:a?c?i.createElement(rf,{className:"".concat(n,"-switcher-line-icon")}):i.createElement(rh,{className:"".concat(n,"-switcher-line-icon")}):i.createElement(rs,{className:u})};let rv=i.forwardRef((e,t)=>{var n;let{getPrefixCls:r,direction:o,virtual:a,tree:l}=i.useContext(tv.E_),{prefixCls:c,className:s,showIcon:u=!1,showLine:d,switcherIcon:f,blockNode:p=!1,children:h,checkable:m=!1,selectable:g=!0,draggable:v,motion:y,style:b}=e,x=r("tree",c),w=r(),S=null!=y?y:Object.assign(Object.assign({},(0,n8.Z)(w)),{motionAppear:!1}),k=Object.assign(Object.assign({},e),{checkable:m,selectable:g,showIcon:u,motion:S,blockNode:p,showLine:!!d,dropIndicatorRender:rl}),[E,C,O]=ra(x),[,j]=(0,nw.ZP)(),N=j.paddingXS/2+((null===(n=j.Tree)||void 0===n?void 0:n.titleHeight)||j.controlHeightSM),M=i.useMemo(()=>{if(!v)return!1;let e={};switch(typeof v){case"function":e.nodeDraggable=v;break;case"object":e=Object.assign({},v)}return!1!==e.icon&&(e.icon=e.icon||i.createElement(n5,null)),e},[v]);return E(i.createElement(nQ,Object.assign({itemHeight:N,ref:t,virtual:a},k,{style:Object.assign(Object.assign({},null==l?void 0:l.style),b),prefixCls:x,className:P()({["".concat(x,"-icon-hide")]:!u,["".concat(x,"-block-node")]:p,["".concat(x,"-unselectable")]:!g,["".concat(x,"-rtl")]:"rtl"===o},null==l?void 0:l.className,s,C,O),direction:o,checkable:m?i.createElement("span",{className:"".concat(x,"-checkbox-inner")}):m,selectable:g,switcherIcon:e=>i.createElement(rg,{prefixCls:x,switcherIcon:f,treeNodeProps:e,showLine:d}),draggable:M}),h))});function ry(e,t,n){let{key:r,children:o}=n;e.forEach(function(e){let i=e[r],a=e[o];!1!==t(i,e)&&ry(a||[],t,n)})}(r=o||(o={}))[r.None=0]="None",r[r.Start=1]="Start",r[r.End=2]="End";var rb=function(e,t){var n={};for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&0>t.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var o=0,r=Object.getOwnPropertySymbols(e);ot.indexOf(r[o])&&Object.prototype.propertyIsEnumerable.call(e,r[o])&&(n[r[o]]=e[r[o]]);return n};function rx(e){let{isLeaf:t,expanded:n}=e;return t?i.createElement(n0,null):n?i.createElement(n2,null):i.createElement(n4,null)}function rw(e){let{treeData:t,children:n}=e;return t||e2(n)}let rS=i.forwardRef((e,t)=>{var{defaultExpandAll:n,defaultExpandParent:r,defaultExpandedKeys:a}=e,l=rb(e,["defaultExpandAll","defaultExpandParent","defaultExpandedKeys"]);let c=i.useRef(),s=i.useRef(),u=()=>{let{keyEntities:e}=e4(rw(l));return n?Object.keys(e):r?tc(l.expandedKeys||a||[],e):l.expandedKeys||a},[d,f]=i.useState(l.selectedKeys||l.defaultSelectedKeys||[]),[p,h]=i.useState(()=>u());i.useEffect(()=>{"selectedKeys"in l&&f(l.selectedKeys)},[l.selectedKeys]),i.useEffect(()=>{"expandedKeys"in l&&h(l.expandedKeys)},[l.expandedKeys]);let{getPrefixCls:m,direction:g}=i.useContext(tv.E_),{prefixCls:v,className:y,showIcon:b=!0,expandAction:x="click"}=l,w=rb(l,["prefixCls","className","showIcon","expandAction"]),S=m("tree",v),k=P()("".concat(S,"-directory"),{["".concat(S,"-directory-rtl")]:"rtl"===g},y);return i.createElement(rv,Object.assign({icon:rx,ref:t,blockNode:!0},w,{showIcon:b,expandAction:x,prefixCls:S,className:k,expandedKeys:p,selectedKeys:d,onSelect:(e,t)=>{var n;let r;let{multiple:i,fieldNames:a}=l,{node:u,nativeEvent:d}=t,{key:h=""}=u,m=rw(l),g=Object.assign(Object.assign({},t),{selected:!0}),v=(null==d?void 0:d.ctrlKey)||(null==d?void 0:d.metaKey),y=null==d?void 0:d.shiftKey;i&&v?(r=e,c.current=h,s.current=r):i&&y?r=Array.from(new Set([].concat((0,ec.Z)(s.current||[]),(0,ec.Z)(function(e){let{treeData:t,expandedKeys:n,startKey:r,endKey:i,fieldNames:a}=e,l=[],c=o.None;return r&&r===i?[r]:r&&i?(ry(t,e=>{if(c===o.End)return!1;if(e===r||e===i){if(l.push(e),c===o.None)c=o.Start;else if(c===o.Start)return c=o.End,!1}else c===o.Start&&l.push(e);return n.includes(e)},e1(a)),l):[]}({treeData:m,expandedKeys:p,startKey:h,endKey:c.current,fieldNames:a}))))):(r=[h],c.current=h,s.current=r),g.selectedNodes=function(e,t,n){let r=(0,ec.Z)(t),o=[];return ry(e,(e,t)=>{let n=r.indexOf(e);return -1!==n&&(o.push(t),r.splice(n,1)),!!r.length},e1(n)),o}(m,r,a),null===(n=l.onSelect)||void 0===n||n.call(l,r,g),"selectedKeys"in l||f(r)},onExpand:(e,t)=>{var n;return"expandedKeys"in l||h(e),null===(n=l.onExpand)||void 0===n?void 0:n.call(l,e,t)}}))});rv.DirectoryTree=rS,rv.TreeNode=tt;var rk=n(29436),rE=n(56632),rC=function(e){let{value:t,onChange:n,filterSearch:r,tablePrefixCls:o,locale:a}=e;return r?i.createElement("div",{className:"".concat(o,"-filter-dropdown-search")},i.createElement(rE.Z,{prefix:i.createElement(rk.Z,null),placeholder:a.filterSearchPlaceholder,onChange:n,value:t,htmlSize:1,className:"".concat(o,"-filter-dropdown-search-input")})):null};let rO=e=>{let{keyCode:t}=e;t===t6.Z.ENTER&&e.stopPropagation()},rj=i.forwardRef((e,t)=>i.createElement("div",{className:e.className,onClick:e=>e.stopPropagation(),onKeyDown:rO,ref:t},e.children));function rP(e){let t=[];return(e||[]).forEach(e=>{let{value:n,children:r}=e;t.push(n),r&&(t=[].concat((0,ec.Z)(t),(0,ec.Z)(rP(r))))}),t}function rN(e,t){return("string"==typeof t||"number"==typeof t)&&(null==t?void 0:t.toString().toLowerCase().includes(e.trim().toLowerCase()))}var rM=function(e){var t,n;let r,o;let{tablePrefixCls:a,prefixCls:l,column:c,dropdownPrefixCls:s,columnKey:u,filterMultiple:f,filterMode:p="menu",filterSearch:h=!1,filterState:m,triggerFilter:g,locale:v,children:y,getPopupContainer:b,rootClassName:x}=e,{filterDropdownOpen:w,onFilterDropdownOpenChange:S,filterResetToDefaultFilteredValue:k,defaultFilteredValue:E,filterDropdownVisible:C,onFilterDropdownVisibleChange:O}=c,[j,N]=i.useState(!1),M=!!(m&&((null===(t=m.filteredKeys)||void 0===t?void 0:t.length)||m.forceFiltered)),I=e=>{N(e),null==S||S(e),null==O||O(e)},R=null!==(n=null!=w?w:C)&&void 0!==n?n:j,T=null==m?void 0:m.filteredKeys,[A,_]=function(e){let t=i.useRef(e),n=(0,tV.Z)();return[()=>t.current,e=>{t.current=e,n()}]}(T||[]),D=e=>{let{selectedKeys:t}=e;_(t)};i.useEffect(()=>{j&&D({selectedKeys:T||[]})},[T]);let[Z,L]=i.useState([]),[z,B]=i.useState(""),F=e=>{let{value:t}=e.target;B(t)};i.useEffect(()=>{j||B("")},[j]);let H=e=>{let t=e&&e.length?e:null;if(null===t&&(!m||!m.filteredKeys)||(0,d.Z)(t,null==m?void 0:m.filteredKeys,!0))return null;g({column:c,key:u,filteredKeys:t})},q=()=>{I(!1),H(A())},W=function(){let{confirm:e,closeDropdown:t}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{confirm:!1,closeDropdown:!1};e&&H([]),t&&I(!1),B(""),k?_((E||[]).map(e=>String(e))):_([])},K=P()({["".concat(s,"-menu-without-submenu")]:!(c.filters||[]).some(e=>{let{children:t}=e;return t})}),V=e=>{let{filters:t}=e;return(t||[]).map((e,t)=>{let n=String(e.value),r={title:e.text,key:void 0!==e.value?n:String(t)};return e.children&&(r.children=V({filters:e.children})),r})},U=e=>{var t;return Object.assign(Object.assign({},e),{text:e.title,value:e.key,children:(null===(t=e.children)||void 0===t?void 0:t.map(e=>U(e)))||[]})};if("function"==typeof c.filterDropdown)r=c.filterDropdown({prefixCls:"".concat(s,"-custom"),setSelectedKeys:e=>D({selectedKeys:e}),selectedKeys:A(),confirm:function(){let{closeDropdown:e}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{closeDropdown:!0};e&&I(!1),H(A())},clearFilters:W,filters:c.filters,visible:R,close:()=>{I(!1)}});else if(c.filterDropdown)r=c.filterDropdown;else{let e=A()||[];r=i.createElement(i.Fragment,null,0===(c.filters||[]).length?i.createElement(nT.Z,{image:nT.Z.PRESENTED_IMAGE_SIMPLE,description:v.filterEmptyText,imageStyle:{height:24},style:{margin:0,padding:"16px 0"}}):"tree"===p?i.createElement(i.Fragment,null,i.createElement(rC,{filterSearch:h,value:z,onChange:F,tablePrefixCls:a,locale:v}),i.createElement("div",{className:"".concat(a,"-filter-dropdown-tree")},f?i.createElement(tM,{checked:e.length===rP(c.filters).length,indeterminate:e.length>0&&e.length