From 318b4813f2b45ecf98bd46104d560393768b832c Mon Sep 17 00:00:00 2001 From: mogith-pn <143642606+mogith-pn@users.noreply.github.com> Date: Tue, 30 Apr 2024 22:38:33 +0530 Subject: [PATCH 001/184] Clarifai-LiteLLM integration (#1) * intg v1 clarifai-litellm * Added more community models and testcase * Clarifai-updated markdown docs --- cookbook/liteLLM_clarifai_Demo.ipynb | 151 ++++++++++++++ docs/my-website/docs/providers/clarifai.md | 177 +++++++++++++++++ litellm/__init__.py | 70 +++++++ litellm/llms/clarifai.py | 216 +++++++++++++++++++++ litellm/llms/prompt_templates/factory.py | 3 + litellm/main.py | 50 +++++ litellm/tests/test_clarifai_completion.py | 67 +++++++ 7 files changed, 734 insertions(+) create mode 100644 cookbook/liteLLM_clarifai_Demo.ipynb create mode 100644 docs/my-website/docs/providers/clarifai.md create mode 100644 litellm/llms/clarifai.py create mode 100644 litellm/tests/test_clarifai_completion.py diff --git a/cookbook/liteLLM_clarifai_Demo.ipynb b/cookbook/liteLLM_clarifai_Demo.ipynb new file mode 100644 index 000000000..4e3b4dbb0 --- /dev/null +++ b/cookbook/liteLLM_clarifai_Demo.ipynb @@ -0,0 +1,151 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# LiteLLM Clarifai \n", + "This notebook walks you through on how to use liteLLM integration of Clarifai and call LLM model from clarifai with response in openAI output format." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pre-Requisites" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#install necessary packages\n", + "!pip install litellm\n", + "!pip install clarifai" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To obtain Clarifai Personal Access Token follow the steps mentioned in the [link](https://docs.clarifai.com/clarifai-basics/authentication/personal-access-tokens/)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "## Set Clarifai Credentials\n", + "import os\n", + "os.environ[\"CLARIFAI_API_KEY\"]= \"YOUR_CLARIFAI_PAT\" # Clarifai PAT" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Mistral-large" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import litellm\n", + "\n", + "litellm.set_verbose=False" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mistral large response : ModelResponse(id='chatcmpl-6eed494d-7ae2-4870-b9c2-6a64d50a6151', choices=[Choices(finish_reason='stop', index=1, message=Message(content=\"In the grand tapestry of time, where tales unfold,\\nLies the chronicle of ages, a sight to behold.\\nA tale of empires rising, and kings of old,\\nOf civilizations lost, and stories untold.\\n\\nOnce upon a yesterday, in a time so vast,\\nHumans took their first steps, casting shadows in the past.\\nFrom the cradle of mankind, a journey they embarked,\\nThrough stone and bronze and iron, their skills they sharpened and marked.\\n\\nEgyptians built pyramids, reaching for the skies,\\nWhile Greeks sought wisdom, truth, in philosophies that lie.\\nRoman legions marched, their empire to expand,\\nAnd in the East, the Silk Road joined the world, hand in hand.\\n\\nThe Middle Ages came, with knights in shining armor,\\nFeudal lords and serfs, a time of both clamor and calm order.\\nThen Renaissance bloomed, like a flower in the sun,\\nA rebirth of art and science, a new age had begun.\\n\\nAcross the vast oceans, explorers sailed with courage bold,\\nDiscovering new lands, stories of adventure, untold.\\nIndustrial Revolution churned, progress in its wake,\\nMachines and factories, a whole new world to make.\\n\\nTwo World Wars raged, a testament to man's strife,\\nYet from the ashes rose hope, a renewed will for life.\\nInto the modern era, technology took flight,\\nConnecting every corner, bathed in digital light.\\n\\nHistory, a symphony, a melody of time,\\nA testament to human will, resilience so sublime.\\nIn every page, a lesson, in every tale, a guide,\\nFor understanding our past, shapes our future's tide.\", role='assistant'))], created=1713896412, model='https://api.clarifai.com/v2/users/mistralai/apps/completion/models/mistral-large/outputs', object='chat.completion', system_fingerprint=None, usage=Usage(prompt_tokens=13, completion_tokens=338, total_tokens=351))\n" + ] + } + ], + "source": [ + "from litellm import completion\n", + "\n", + "messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n", + "response=completion(\n", + " model=\"clarifai/mistralai.completion.mistral-large\",\n", + " messages=messages,\n", + " )\n", + "\n", + "print(f\"Mistral large response : {response}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Claude-2.1 " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Claude-2.1 response : ModelResponse(id='chatcmpl-d126c919-4db4-4aa3-ac8f-7edea41e0b93', choices=[Choices(finish_reason='stop', index=1, message=Message(content=\" Here's a poem I wrote about history:\\n\\nThe Tides of Time\\n\\nThe tides of time ebb and flow,\\nCarrying stories of long ago.\\nFigures and events come into light,\\nShaping the future with all their might.\\n\\nKingdoms rise, empires fall, \\nLeaving traces that echo down every hall.\\nRevolutions bring change with a fiery glow,\\nToppling structures from long ago.\\n\\nExplorers traverse each ocean and land,\\nSeeking treasures they don't understand.\\nWhile artists and writers try to make their mark,\\nHoping their works shine bright in the dark.\\n\\nThe cycle repeats again and again,\\nAs humanity struggles to learn from its pain.\\nThough the players may change on history's stage,\\nThe themes stay the same from age to age.\\n\\nWar and peace, life and death,\\nLove and strife with every breath.\\nThe tides of time continue their dance,\\nAs we join in, by luck or by chance.\\n\\nSo we study the past to light the way forward, \\nHeeding warnings from stories told and heard.\\nThe future unfolds from this unending flow -\\nWhere the tides of time ultimately go.\", role='assistant'))], created=1713896579, model='https://api.clarifai.com/v2/users/anthropic/apps/completion/models/claude-2_1/outputs', object='chat.completion', system_fingerprint=None, usage=Usage(prompt_tokens=12, completion_tokens=232, total_tokens=244))\n" + ] + } + ], + "source": [ + "from litellm import completion\n", + "\n", + "messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n", + "response=completion(\n", + " model=\"clarifai/anthropic.completion.claude-2_1\",\n", + " messages=messages,\n", + " )\n", + "\n", + "print(f\"Claude-2.1 response : {response}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/my-website/docs/providers/clarifai.md b/docs/my-website/docs/providers/clarifai.md new file mode 100644 index 000000000..acc8c54be --- /dev/null +++ b/docs/my-website/docs/providers/clarifai.md @@ -0,0 +1,177 @@ + +# Clarifai +Anthropic, OpenAI, Mistral, Llama and Gemini LLMs are Supported on Clarifai. + +## Pre-Requisites + +`pip install clarifai` + +`pip install litellm` + +## Required Environment Variables +To obtain your Clarifai Personal access token follow this [link](https://docs.clarifai.com/clarifai-basics/authentication/personal-access-tokens/). Optionally the PAT can also be passed in `completion` function. + +```python +os.environ["CALRIFAI_API_KEY"] = "YOUR_CLARIFAI_PAT" # CLARIFAI_PAT +``` + +## Usage + +```python +import os +from litellm import completion + +os.environ["CLARIFAI_API_KEY"] = "" + +response = completion( + model="clarifai/mistralai.completion.mistral-large", + messages=[{ "content": "Tell me a joke about physics?","role": "user"}] +) +``` + +**Output** +```json +{ + "id": "chatcmpl-572701ee-9ab2-411c-ac75-46c1ba18e781", + "choices": [ + { + "finish_reason": "stop", + "index": 1, + "message": { + "content": "Sure, here's a physics joke for you:\n\nWhy can't you trust an atom?\n\nBecause they make up everything!", + "role": "assistant" + } + } + ], + "created": 1714410197, + "model": "https://api.clarifai.com/v2/users/mistralai/apps/completion/models/mistral-large/outputs", + "object": "chat.completion", + "system_fingerprint": null, + "usage": { + "prompt_tokens": 14, + "completion_tokens": 24, + "total_tokens": 38 + } + } +``` + +## Clarifai models +liteLLM supports non-streaming requests to all models on [Clarifai community](https://clarifai.com/explore/models?filterData=%5B%7B%22field%22%3A%22use_cases%22%2C%22value%22%3A%5B%22llm%22%5D%7D%5D&page=1&perPage=24) + +Example Usage - Note: liteLLM supports all models deployed on Clarifai + +## Llama LLMs +| Model Name | Function Call | +---------------------------|---------------------------------| +| clarifai/meta.Llama-2.llama2-7b-chat | `completion('clarifai/meta.Llama-2.llama2-7b-chat', messages)` +| clarifai/meta.Llama-2.llama2-13b-chat | `completion('clarifai/meta.Llama-2.llama2-13b-chat', messages)` +| clarifai/meta.Llama-2.llama2-70b-chat | `completion('clarifai/meta.Llama-2.llama2-70b-chat', messages)` | +| clarifai/meta.Llama-2.codeLlama-70b-Python | `completion('clarifai/meta.Llama-2.codeLlama-70b-Python', messages)`| +| clarifai/meta.Llama-2.codeLlama-70b-Instruct | `completion('clarifai/meta.Llama-2.codeLlama-70b-Instruct', messages)` | + +## Mistal LLMs +| Model Name | Function Call | +|---------------------------------------------|------------------------------------------------------------------------| +| clarifai/mistralai.completion.mixtral-8x22B | `completion('clarifai/mistralai.completion.mixtral-8x22B', messages)` | +| clarifai/mistralai.completion.mistral-large | `completion('clarifai/mistralai.completion.mistral-large', messages)` | +| clarifai/mistralai.completion.mistral-medium | `completion('clarifai/mistralai.completion.mistral-medium', messages)` | +| clarifai/mistralai.completion.mistral-small | `completion('clarifai/mistralai.completion.mistral-small', messages)` | +| clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1 | `completion('clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1', messages)` +| clarifai/mistralai.completion.mistral-7B-OpenOrca | `completion('clarifai/mistralai.completion.mistral-7B-OpenOrca', messages)` | +| clarifai/mistralai.completion.openHermes-2-mistral-7B | `completion('clarifai/mistralai.completion.openHermes-2-mistral-7B', messages)` | + + +## Jurassic LLMs +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/ai21.complete.Jurassic2-Grande | `completion('clarifai/ai21.complete.Jurassic2-Grande', messages)` | +| clarifai/ai21.complete.Jurassic2-Grande-Instruct | `completion('clarifai/ai21.complete.Jurassic2-Grande-Instruct', messages)` | +| clarifai/ai21.complete.Jurassic2-Jumbo-Instruct | `completion('clarifai/ai21.complete.Jurassic2-Jumbo-Instruct', messages)` | +| clarifai/ai21.complete.Jurassic2-Jumbo | `completion('clarifai/ai21.complete.Jurassic2-Jumbo', messages)` | +| clarifai/ai21.complete.Jurassic2-Large | `completion('clarifai/ai21.complete.Jurassic2-Large', messages)` | + +## Wizard LLMs + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/wizardlm.generate.wizardCoder-Python-34B | `completion('clarifai/wizardlm.generate.wizardCoder-Python-34B', messages)` | +| clarifai/wizardlm.generate.wizardLM-70B | `completion('clarifai/wizardlm.generate.wizardLM-70B', messages)` | +| clarifai/wizardlm.generate.wizardLM-13B | `completion('clarifai/wizardlm.generate.wizardLM-13B', messages)` | +| clarifai/wizardlm.generate.wizardCoder-15B | `completion('clarifai/wizardlm.generate.wizardCoder-15B', messages)` | + +## Anthropic models + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/anthropic.completion.claude-v1 | `completion('clarifai/anthropic.completion.claude-v1', messages)` | +| clarifai/anthropic.completion.claude-instant-1_2 | `completion('clarifai/anthropic.completion.claude-instant-1_2', messages)` | +| clarifai/anthropic.completion.claude-instant | `completion('clarifai/anthropic.completion.claude-instant', messages)` | +| clarifai/anthropic.completion.claude-v2 | `completion('clarifai/anthropic.completion.claude-v2', messages)` | +| clarifai/anthropic.completion.claude-2_1 | `completion('clarifai/anthropic.completion.claude-2_1', messages)` | +| clarifai/anthropic.completion.claude-3-opus | `completion('clarifai/anthropic.completion.claude-3-opus', messages)` | +| clarifai/anthropic.completion.claude-3-sonnet | `completion('clarifai/anthropic.completion.claude-3-sonnet', messages)` | + +## OpenAI GPT LLMs + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/openai.chat-completion.GPT-4 | `completion('clarifai/openai.chat-completion.GPT-4', messages)` | +| clarifai/openai.chat-completion.GPT-3_5-turbo | `completion('clarifai/openai.chat-completion.GPT-3_5-turbo', messages)` | +| clarifai/openai.chat-completion.gpt-4-turbo | `completion('clarifai/openai.chat-completion.gpt-4-turbo', messages)` | +| clarifai/openai.completion.gpt-3_5-turbo-instruct | `completion('clarifai/openai.completion.gpt-3_5-turbo-instruct', messages)` | + +## GCP LLMs + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/gcp.generate.gemini-1_5-pro | `completion('clarifai/gcp.generate.gemini-1_5-pro', messages)` | +| clarifai/gcp.generate.imagen-2 | `completion('clarifai/gcp.generate.imagen-2', messages)` | +| clarifai/gcp.generate.code-gecko | `completion('clarifai/gcp.generate.code-gecko', messages)` | +| clarifai/gcp.generate.code-bison | `completion('clarifai/gcp.generate.code-bison', messages)` | +| clarifai/gcp.generate.text-bison | `completion('clarifai/gcp.generate.text-bison', messages)` | +| clarifai/gcp.generate.gemma-2b-it | `completion('clarifai/gcp.generate.gemma-2b-it', messages)` | +| clarifai/gcp.generate.gemma-7b-it | `completion('clarifai/gcp.generate.gemma-7b-it', messages)` | +| clarifai/gcp.generate.gemini-pro | `completion('clarifai/gcp.generate.gemini-pro', messages)` | +| clarifai/gcp.generate.gemma-1_1-7b-it | `completion('clarifai/gcp.generate.gemma-1_1-7b-it', messages)` | + +## Cohere LLMs +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/cohere.generate.cohere-generate-command | `completion('clarifai/cohere.generate.cohere-generate-command', messages)` | + clarifai/cohere.generate.command-r-plus' | `completion('clarifai/clarifai/cohere.generate.command-r-plus', messages)`| + +## Databricks LLMs + +| Model Name | Function Call | +|---------------------------------------------------|---------------------------------------------------------------------| +| clarifai/databricks.drbx.dbrx-instruct | `completion('clarifai/databricks.drbx.dbrx-instruct', messages)` | +| clarifai/databricks.Dolly-v2.dolly-v2-12b | `completion('clarifai/databricks.Dolly-v2.dolly-v2-12b', messages)`| + +## Microsoft LLMs + +| Model Name | Function Call | +|---------------------------------------------------|---------------------------------------------------------------------| +| clarifai/microsoft.text-generation.phi-2 | `completion('clarifai/microsoft.text-generation.phi-2', messages)` | +| clarifai/microsoft.text-generation.phi-1_5 | `completion('clarifai/microsoft.text-generation.phi-1_5', messages)`| + +## Salesforce models + +| Model Name | Function Call | +|-----------------------------------------------------------|-------------------------------------------------------------------------------| +| clarifai/salesforce.blip.general-english-image-caption-blip-2 | `completion('clarifai/salesforce.blip.general-english-image-caption-blip-2', messages)` | +| clarifai/salesforce.xgen.xgen-7b-8k-instruct | `completion('clarifai/salesforce.xgen.xgen-7b-8k-instruct', messages)` | + + +## Other Top performing LLMs + +| Model Name | Function Call | +|---------------------------------------------------|---------------------------------------------------------------------| +| clarifai/deci.decilm.deciLM-7B-instruct | `completion('clarifai/deci.decilm.deciLM-7B-instruct', messages)` | +| clarifai/upstage.solar.solar-10_7b-instruct | `completion('clarifai/upstage.solar.solar-10_7b-instruct', messages)` | +| clarifai/openchat.openchat.openchat-3_5-1210 | `completion('clarifai/openchat.openchat.openchat-3_5-1210', messages)` | +| clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B | `completion('clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B', messages)` | +| clarifai/fblgit.una-cybertron.una-cybertron-7b-v2 | `completion('clarifai/fblgit.una-cybertron.una-cybertron-7b-v2', messages)` | +| clarifai/tiiuae.falcon.falcon-40b-instruct | `completion('clarifai/tiiuae.falcon.falcon-40b-instruct', messages)` | +| clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat | `completion('clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat', messages)` | +| clarifai/bigcode.code.StarCoder | `completion('clarifai/bigcode.code.StarCoder', messages)` | +| clarifai/mosaicml.mpt.mpt-7b-instruct | `completion('clarifai/mosaicml.mpt.mpt-7b-instruct', messages)` | diff --git a/litellm/__init__.py b/litellm/__init__.py index 5ef78dce4..8523ee577 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -49,6 +49,7 @@ azure_key: Optional[str] = None anthropic_key: Optional[str] = None replicate_key: Optional[str] = None cohere_key: Optional[str] = None +clarifai_key: Optional[str] = None maritalk_key: Optional[str] = None ai21_key: Optional[str] = None openrouter_key: Optional[str] = None @@ -366,6 +367,73 @@ replicate_models: List = [ "replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad", ] +clarifai_models: List = [ + 'clarifai/meta.Llama-3.Llama-3-8B-Instruct', + 'clarifai/gcp.generate.gemma-1_1-7b-it', + 'clarifai/mistralai.completion.mixtral-8x22B', + 'clarifai/cohere.generate.command-r-plus', + 'clarifai/databricks.drbx.dbrx-instruct', + 'clarifai/mistralai.completion.mistral-large', + 'clarifai/mistralai.completion.mistral-medium', + 'clarifai/mistralai.completion.mistral-small', + 'clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1', + 'clarifai/gcp.generate.gemma-2b-it', + 'clarifai/gcp.generate.gemma-7b-it', + 'clarifai/deci.decilm.deciLM-7B-instruct', + 'clarifai/mistralai.completion.mistral-7B-Instruct', + 'clarifai/gcp.generate.gemini-pro', + 'clarifai/anthropic.completion.claude-v1', + 'clarifai/anthropic.completion.claude-instant-1_2', + 'clarifai/anthropic.completion.claude-instant', + 'clarifai/anthropic.completion.claude-v2', + 'clarifai/anthropic.completion.claude-2_1', + 'clarifai/meta.Llama-2.codeLlama-70b-Python', + 'clarifai/meta.Llama-2.codeLlama-70b-Instruct', + 'clarifai/openai.completion.gpt-3_5-turbo-instruct', + 'clarifai/meta.Llama-2.llama2-7b-chat', + 'clarifai/meta.Llama-2.llama2-13b-chat', + 'clarifai/meta.Llama-2.llama2-70b-chat', + 'clarifai/openai.chat-completion.gpt-4-turbo', + 'clarifai/microsoft.text-generation.phi-2', + 'clarifai/meta.Llama-2.llama2-7b-chat-vllm', + 'clarifai/upstage.solar.solar-10_7b-instruct', + 'clarifai/openchat.openchat.openchat-3_5-1210', + 'clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B', + 'clarifai/gcp.generate.text-bison', + 'clarifai/meta.Llama-2.llamaGuard-7b', + 'clarifai/fblgit.una-cybertron.una-cybertron-7b-v2', + 'clarifai/openai.chat-completion.GPT-4', + 'clarifai/openai.chat-completion.GPT-3_5-turbo', + 'clarifai/ai21.complete.Jurassic2-Grande', + 'clarifai/ai21.complete.Jurassic2-Grande-Instruct', + 'clarifai/ai21.complete.Jurassic2-Jumbo-Instruct', + 'clarifai/ai21.complete.Jurassic2-Jumbo', + 'clarifai/ai21.complete.Jurassic2-Large', + 'clarifai/cohere.generate.cohere-generate-command', + 'clarifai/wizardlm.generate.wizardCoder-Python-34B', + 'clarifai/wizardlm.generate.wizardLM-70B', + 'clarifai/tiiuae.falcon.falcon-40b-instruct', + 'clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat', + 'clarifai/gcp.generate.code-gecko', + 'clarifai/gcp.generate.code-bison', + 'clarifai/mistralai.completion.mistral-7B-OpenOrca', + 'clarifai/mistralai.completion.openHermes-2-mistral-7B', + 'clarifai/wizardlm.generate.wizardLM-13B', + 'clarifai/huggingface-research.zephyr.zephyr-7B-alpha', + 'clarifai/wizardlm.generate.wizardCoder-15B', + 'clarifai/microsoft.text-generation.phi-1_5', + 'clarifai/databricks.Dolly-v2.dolly-v2-12b', + 'clarifai/bigcode.code.StarCoder', + 'clarifai/salesforce.xgen.xgen-7b-8k-instruct', + 'clarifai/mosaicml.mpt.mpt-7b-instruct', + 'clarifai/anthropic.completion.claude-3-opus', + 'clarifai/anthropic.completion.claude-3-sonnet', + 'clarifai/gcp.generate.gemini-1_5-pro', + 'clarifai/gcp.generate.imagen-2', + 'clarifai/salesforce.blip.general-english-image-caption-blip-2', +] + + huggingface_models: List = [ "meta-llama/Llama-2-7b-hf", "meta-llama/Llama-2-7b-chat-hf", @@ -470,6 +538,7 @@ provider_list: List = [ "text-completion-openai", "cohere", "cohere_chat", + "clarifai", "anthropic", "replicate", "huggingface", @@ -608,6 +677,7 @@ from .llms.anthropic import AnthropicConfig from .llms.anthropic_text import AnthropicTextConfig from .llms.replicate import ReplicateConfig from .llms.cohere import CohereConfig +from .llms.clarifai import ClarifaiConfig from .llms.ai21 import AI21Config from .llms.together_ai import TogetherAIConfig from .llms.cloudflare import CloudflareConfig diff --git a/litellm/llms/clarifai.py b/litellm/llms/clarifai.py new file mode 100644 index 000000000..2a7d77c61 --- /dev/null +++ b/litellm/llms/clarifai.py @@ -0,0 +1,216 @@ +import os, types, traceback +import json +import requests +import time +from typing import Callable, Optional +from litellm.utils import ModelResponse, Usage, Choices, Message +import litellm +import httpx +from .prompt_templates.factory import prompt_factory, custom_prompt + + +class ClarifaiError(Exception): + def __init__(self, status_code, message, url): + self.status_code = status_code + self.message = message + self.request = httpx.Request( + method="POST", url=url + ) + self.response = httpx.Response(status_code=status_code, request=self.request) + super().__init__( + self.message + ) + +class ClarifaiConfig: + """ + Reference: https://clarifai.com/meta/Llama-2/models/llama2-70b-chat + TODO fill in the details + """ + max_tokens: Optional[int] = None + temperature: Optional[int] = None + top_k: Optional[int] = None + + def __init__( + self, + max_tokens: Optional[int] = None, + temperature: Optional[int] = None, + top_k: Optional[int] = None, + ) -> None: + locals_ = locals() + for key, value in locals_.items(): + if key != "self" and value is not None: + setattr(self.__class__, key, value) + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + +def validate_environment(api_key): + headers = { + "accept": "application/json", + "content-type": "application/json", + } + if api_key: + headers["Authorization"] = f"Bearer {api_key}" + return headers + +def completions_to_model(payload): + # if payload["n"] != 1: + # raise HTTPException( + # status_code=422, + # detail="Only one generation is supported. Please set candidate_count to 1.", + # ) + + params = {} + if temperature := payload.get("temperature"): + params["temperature"] = temperature + if max_tokens := payload.get("max_tokens"): + params["max_tokens"] = max_tokens + return { + "inputs": [{"data": {"text": {"raw": payload["prompt"]}}}], + "model": {"output_info": {"params": params}}, +} + +def convert_model_to_url(model: str, api_base: str): + user_id, app_id, model_id = model.split(".") + return f"{api_base}/users/{user_id}/apps/{app_id}/models/{model_id}/outputs" + +def get_prompt_model_name(url: str): + clarifai_model_name = url.split("/")[-2] + if "claude" in clarifai_model_name: + return "anthropic", clarifai_model_name.replace("_", ".") + if ("llama" in clarifai_model_name)or ("mistral" in clarifai_model_name): + return "", "meta-llama/llama-2-chat" + else: + return "", clarifai_model_name + +def completion( + model: str, + messages: list, + api_base: str, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + api_key, + logging_obj, + custom_prompt_dict={}, + optional_params=None, + litellm_params=None, + logger_fn=None, +): + headers = validate_environment(api_key) + model = convert_model_to_url(model, api_base) + prompt = " ".join(message["content"] for message in messages) # TODO + + ## Load Config + config = litellm.ClarifaiConfig.get_config() + for k, v in config.items(): + if ( + k not in optional_params + ): + optional_params[k] = v + + custom_llm_provider, orig_model_name = get_prompt_model_name(model) + if custom_llm_provider == "anthropic": + prompt = prompt_factory( + model=orig_model_name, + messages=messages, + api_key=api_key, + custom_llm_provider="clarifai" + ) + else: + prompt = prompt_factory( + model=orig_model_name, + messages=messages, + api_key=api_key, + custom_llm_provider=custom_llm_provider + ) + # print(prompt); exit(0) + + data = { + "prompt": prompt, + **optional_params, + } + data = completions_to_model(data) + + + ## LOGGING + logging_obj.pre_call( + input=prompt, + api_key=api_key, + additional_args={ + "complete_input_dict": data, + "headers": headers, + "api_base": api_base, + }, + ) + + ## COMPLETION CALL + response = requests.post( + model, + headers=headers, + data=json.dumps(data), + ) + # print(response.content); exit() + """ + {"status":{"code":10000,"description":"Ok","req_id":"d914cf7e097487997910650cde954a37"},"outputs":[{"id":"c2baa668174b4547bd4d2e9f8996198d","status":{"code":10000,"description":"Ok"},"created_at":"2024-02-07T10:57:52.917990493Z","model":{"id":"GPT-4","name":"GPT-4","created_at":"2023-06-08T17:40:07.964967Z","modified_at":"2023-12-04T11:39:54.587604Z","app_id":"chat-completion","model_version":{"id":"5d7a50b44aec4a01a9c492c5a5fcf387","created_at":"2023-11-09T19:57:56.961259Z","status":{"code":21100,"description":"Model is trained and ready"},"completed_at":"2023-11-09T20:00:48.933172Z","visibility":{"gettable":50},"app_id":"chat-completion","user_id":"openai","metadata":{}},"user_id":"openai","model_type_id":"text-to-text","visibility":{"gettable":50},"toolkits":[],"use_cases":[],"languages":[],"languages_full":[],"check_consents":[],"workflow_recommended":false,"image":{"url":"https://data.clarifai.com/small/users/openai/apps/chat-completion/inputs/image/34326a9914d361bb93ae8e5381689755","hosted":{"prefix":"https://data.clarifai.com","suffix":"users/openai/apps/chat-completion/inputs/image/34326a9914d361bb93ae8e5381689755","sizes":["small"],"crossorigin":"use-credentials"}}},"input":{"id":"fba1f22a332743f083ddae0a7eb443ae","data":{"text":{"raw":"what\'s the weather in SF","url":"https://samples.clarifai.com/placeholder.gif"}}},"data":{"text":{"raw":"As an AI, I\'m unable to provide real-time information or updates. Please check a reliable weather website or app for the current weather in San Francisco.","text_info":{"encoding":"UnknownTextEnc"}}}}]} + """ + if response.status_code != 200: + raise ClarifaiError(status_code=response.status_code, message=response.text, url=model) + if "stream" in optional_params and optional_params["stream"] == True: + return response.iter_lines() + else: + logging_obj.post_call( + input=prompt, + api_key=api_key, + original_response=response.text, + additional_args={"complete_input_dict": data}, + ) + ## RESPONSE OBJECT + completion_response = response.json() + # print(completion_response) + try: + choices_list = [] + for idx, item in enumerate(completion_response["outputs"]): + if len(item["data"]["text"]["raw"]) > 0: + message_obj = Message(content=item["data"]["text"]["raw"]) + else: + message_obj = Message(content=None) + choice_obj = Choices( + finish_reason="stop", + index=idx + 1, #check + message=message_obj, + ) + choices_list.append(choice_obj) + model_response["choices"] = choices_list + except Exception as e: + raise ClarifaiError( + message=traceback.format_exc(), status_code=response.status_code, url=model + ) + + # Calculate Usage + prompt_tokens = len(encoding.encode(prompt)) + completion_tokens = len( + encoding.encode(model_response["choices"][0]["message"].get("content")) + ) + model_response["model"] = model + model_response["usage"] = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, + ) + return model_response \ No newline at end of file diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 8afda252a..14f1018b9 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -1306,6 +1306,9 @@ def prompt_factory( return anthropic_pt(messages=messages) elif "mistral." in model: return mistral_instruct_pt(messages=messages) + elif custom_llm_provider == "clarifai": + if "claude" in model: + return anthropic_pt(messages=messages) elif custom_llm_provider == "perplexity": for message in messages: message.pop("name", None) diff --git a/litellm/main.py b/litellm/main.py index 593fc7eae..334fe7b6e 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -53,6 +53,7 @@ from .llms import ( ollama, ollama_chat, cloudflare, + clarifai, cohere, cohere_chat, petals, @@ -1150,6 +1151,55 @@ def completion( ) response = model_response + elif ("clarifai" in model + or custom_llm_provider == "clarifai" + or model in litellm.clarifai_models + ): + clarifai_key = None + clarifai_key = ( + api_key + or litellm.clarifai_key + or litellm.api_key + or get_secret("CLARIFAI_API_KEY") + or get_secret("CLARIFAI_API_TOKEN") + ) + + api_base = ( + api_base + or litellm.api_base + or get_secret("CLARIFAI_API_BASE") + or "https://api.clarifai.com/v2" + ) + + custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict + model_response = clarifai.completion( + model=model, + messages=messages, + api_base=api_base, + model_response=model_response, + print_verbose=print_verbose, + optional_params=optional_params, + litellm_params=litellm_params, + logger_fn=logger_fn, + encoding=encoding, # for calculating input/output tokens + api_key=clarifai_key, + logging_obj=logging, + custom_prompt_dict=custom_prompt_dict, + ) + + if "stream" in optional_params and optional_params["stream"] == True: + # don't try to access stream object, + + model_response = CustomStreamWrapper(model_response, model, logging_obj=logging, custom_llm_provider="replicate") + + if optional_params.get("stream", False) or acompletion == True: + ## LOGGING + logging.post_call( + input=messages, + api_key=clarifai_key, + original_response=model_response, + ) + response = model_response elif custom_llm_provider == "anthropic": api_key = ( diff --git a/litellm/tests/test_clarifai_completion.py b/litellm/tests/test_clarifai_completion.py new file mode 100644 index 000000000..2c2626398 --- /dev/null +++ b/litellm/tests/test_clarifai_completion.py @@ -0,0 +1,67 @@ +import sys, os +import traceback +from dotenv import load_dotenv + +load_dotenv() +import os, io + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import pytest +import litellm +from litellm import embedding, completion, completion_cost, Timeout, ModelResponse +from litellm import RateLimitError + +# litellm.num_retries = 3 +litellm.cache = None +litellm.success_callback = [] +user_message = "Write a short poem about the sky" +messages = [{"content": user_message, "role": "user"}] + +@pytest.fixture(autouse=True) +def reset_callbacks(): + print("\npytest fixture - resetting callbacks") + litellm.success_callback = [] + litellm._async_success_callback = [] + litellm.failure_callback = [] + litellm.callbacks = [] + +def test_completion_clarifai_claude_2_1(): + print("calling clarifai claude completion") + import os + + clarifai_pat = os.environ["CLARIFAI_API_KEY"] + + try: + response = completion( + model="clarifai/anthropic.completion.claude-2_1", + messages=messages, + max_tokens=10, + temperature=0.1, + ) + print(response) + + except RateLimitError: + pass + + except Exception as e: + pytest.fail(f"Error occured: {e}") + + +def test_completion_clarifai_mistral_large(): + try: + litellm.set_verbose = True + response: ModelResponse = completion( + model="clarifai/mistralai.completion.mistral-small", + messages=messages, + max_tokens=10, + temperature=0.78, + ) + # Add any assertions here to check the response + assert len(response.choices) > 0 + assert len(response.choices[0].message.content) > 0 + except RateLimitError: + pass + except Exception as e: + pytest.fail(f"Error occurred: {e}") From ff8d1bc68cad51e323558a9ca5033ec0cb1c91ce Mon Sep 17 00:00:00 2001 From: jinno Date: Thu, 2 May 2024 21:39:51 +0900 Subject: [PATCH 002/184] fix(exceptions.py): import openai Exceptions --- litellm/exceptions.py | 47 +++++++++++++++++-------------------------- 1 file changed, 18 insertions(+), 29 deletions(-) diff --git a/litellm/exceptions.py b/litellm/exceptions.py index d8b0a7c55..7c3471acf 100644 --- a/litellm/exceptions.py +++ b/litellm/exceptions.py @@ -9,25 +9,12 @@ ## LiteLLM versions of the OpenAI Exception Types -from openai import ( - AuthenticationError, - BadRequestError, - NotFoundError, - RateLimitError, - APIStatusError, - OpenAIError, - APIError, - APITimeoutError, - APIConnectionError, - APIResponseValidationError, - UnprocessableEntityError, - PermissionDeniedError, -) +import openai import httpx from typing import Optional -class AuthenticationError(AuthenticationError): # type: ignore +class AuthenticationError(openai.AuthenticationError): # type: ignore def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 401 self.message = message @@ -39,7 +26,7 @@ class AuthenticationError(AuthenticationError): # type: ignore # raise when invalid models passed, example gpt-8 -class NotFoundError(NotFoundError): # type: ignore +class NotFoundError(openai.NotFoundError): # type: ignore def __init__(self, message, model, llm_provider, response: httpx.Response): self.status_code = 404 self.message = message @@ -50,7 +37,7 @@ class NotFoundError(NotFoundError): # type: ignore ) # Call the base class constructor with the parameters it needs -class BadRequestError(BadRequestError): # type: ignore +class BadRequestError(openai.BadRequestError): # type: ignore def __init__( self, message, model, llm_provider, response: Optional[httpx.Response] = None ): @@ -69,7 +56,7 @@ class BadRequestError(BadRequestError): # type: ignore ) # Call the base class constructor with the parameters it needs -class UnprocessableEntityError(UnprocessableEntityError): # type: ignore +class UnprocessableEntityError(openai.UnprocessableEntityError): # type: ignore def __init__(self, message, model, llm_provider, response: httpx.Response): self.status_code = 422 self.message = message @@ -80,7 +67,7 @@ class UnprocessableEntityError(UnprocessableEntityError): # type: ignore ) # Call the base class constructor with the parameters it needs -class Timeout(APITimeoutError): # type: ignore +class Timeout(openai.APITimeoutError): # type: ignore def __init__(self, message, model, llm_provider): request = httpx.Request(method="POST", url="https://api.openai.com/v1") super().__init__( @@ -96,7 +83,7 @@ class Timeout(APITimeoutError): # type: ignore return str(self.message) -class PermissionDeniedError(PermissionDeniedError): # type:ignore +class PermissionDeniedError(openai.PermissionDeniedError): # type:ignore def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 403 self.message = message @@ -107,7 +94,7 @@ class PermissionDeniedError(PermissionDeniedError): # type:ignore ) # Call the base class constructor with the parameters it needs -class RateLimitError(RateLimitError): # type: ignore +class RateLimitError(openai.RateLimitError): # type: ignore def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 429 self.message = message @@ -148,7 +135,7 @@ class ContentPolicyViolationError(BadRequestError): # type: ignore ) # Call the base class constructor with the parameters it needs -class ServiceUnavailableError(APIStatusError): # type: ignore +class ServiceUnavailableError(openai.APIStatusError): # type: ignore def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 503 self.message = message @@ -160,7 +147,7 @@ class ServiceUnavailableError(APIStatusError): # type: ignore # raise this when the API returns an invalid response object - https://github.com/openai/openai-python/blob/1be14ee34a0f8e42d3f9aa5451aa4cb161f1781f/openai/api_requestor.py#L401 -class APIError(APIError): # type: ignore +class APIError(openai.APIError): # type: ignore def __init__( self, status_code, message, llm_provider, model, request: httpx.Request ): @@ -172,7 +159,7 @@ class APIError(APIError): # type: ignore # raised if an invalid request (not get, delete, put, post) is made -class APIConnectionError(APIConnectionError): # type: ignore +class APIConnectionError(openai.APIConnectionError): # type: ignore def __init__(self, message, llm_provider, model, request: httpx.Request): self.message = message self.llm_provider = llm_provider @@ -182,7 +169,7 @@ class APIConnectionError(APIConnectionError): # type: ignore # raised if an invalid request (not get, delete, put, post) is made -class APIResponseValidationError(APIResponseValidationError): # type: ignore +class APIResponseValidationError(openai.APIResponseValidationError): # type: ignore def __init__(self, message, llm_provider, model): self.message = message self.llm_provider = llm_provider @@ -192,7 +179,7 @@ class APIResponseValidationError(APIResponseValidationError): # type: ignore super().__init__(response=response, body=None, message=message) -class OpenAIError(OpenAIError): # type: ignore +class OpenAIError(openai.OpenAIError): # type: ignore def __init__(self, original_exception): self.status_code = original_exception.http_status super().__init__( @@ -214,12 +201,14 @@ class BudgetExceededError(Exception): ## DEPRECATED ## -class InvalidRequestError(BadRequestError): # type: ignore - def __init__(self, message, model, llm_provider): +class InvalidRequestError(openai.BadRequestError): # type: ignore + def __init__( + self, message, model, llm_provider, response: Optional[httpx.Response] = None + ): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider super().__init__( - self.message, f"{self.model}" + self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs From 6cec252b076012fd3ea5a8b06d603e05c38dd789 Mon Sep 17 00:00:00 2001 From: Lunik Date: Thu, 2 May 2024 23:12:48 +0200 Subject: [PATCH 003/184] =?UTF-8?q?=E2=9C=A8=20feat:=20Add=20Azure=20Conte?= =?UTF-8?q?nt-Safety=20Proxy=20hooks?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- litellm/proxy/hooks/azure_content_safety.py | 157 ++++++++++++++++++++ litellm/proxy/proxy_server.py | 17 +++ 2 files changed, 174 insertions(+) create mode 100644 litellm/proxy/hooks/azure_content_safety.py diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py new file mode 100644 index 000000000..161e35cde --- /dev/null +++ b/litellm/proxy/hooks/azure_content_safety.py @@ -0,0 +1,157 @@ +from litellm.integrations.custom_logger import CustomLogger +from litellm.caching import DualCache +from litellm.proxy._types import UserAPIKeyAuth +import litellm, traceback, sys, uuid +from fastapi import HTTPException +from litellm._logging import verbose_proxy_logger + + +class _PROXY_AzureContentSafety( + CustomLogger +): # https://docs.litellm.ai/docs/observability/custom_callback#callback-class + # Class variables or attributes + + def __init__(self, endpoint, api_key, thresholds=None): + try: + from azure.ai.contentsafety.aio import ContentSafetyClient + from azure.core.credentials import AzureKeyCredential + from azure.ai.contentsafety.models import TextCategory + from azure.ai.contentsafety.models import AnalyzeTextOptions + from azure.core.exceptions import HttpResponseError + except Exception as e: + raise Exception( + f"\033[91mAzure Content-Safety not installed, try running 'pip install azure-ai-contentsafety' to fix this error: {e}\n{traceback.format_exc()}\033[0m" + ) + self.endpoint = endpoint + self.api_key = api_key + self.text_category = TextCategory + self.analyze_text_options = AnalyzeTextOptions + self.azure_http_error = HttpResponseError + + self.thresholds = self._configure_thresholds(thresholds) + + self.client = ContentSafetyClient( + self.endpoint, AzureKeyCredential(self.api_key) + ) + + def _configure_thresholds(self, thresholds=None): + default_thresholds = { + self.text_category.HATE: 6, + self.text_category.SELF_HARM: 6, + self.text_category.SEXUAL: 6, + self.text_category.VIOLENCE: 6, + } + + if thresholds is None: + return default_thresholds + + for key, default in default_thresholds.items(): + if key not in thresholds: + thresholds[key] = default + + return thresholds + + def print_verbose(self, print_statement): + try: + verbose_proxy_logger.debug(print_statement) + if litellm.set_verbose: + print(print_statement) # noqa + except: + pass + + def _severity(self, severity): + if severity >= 6: + return "high" + elif severity >= 4: + return "medium" + elif severity >= 2: + return "low" + else: + return "safe" + + def _compute_result(self, response): + result = {} + + category_severity = { + item.category: item.severity for item in response.categories_analysis + } + for category in self.text_category: + severity = category_severity.get(category) + if severity is not None: + result[category] = { + "filtered": severity >= self.thresholds[category], + "severity": self._severity(severity), + } + + return result + + async def test_violation(self, content: str, source: str = None): + self.print_verbose(f"Testing Azure Content-Safety for: {content}") + + # Construct a request + request = self.analyze_text_options(text=content) + + # Analyze text + try: + response = await self.client.analyze_text(request) + except self.azure_http_error as e: + self.print_verbose( + f"Error in Azure Content-Safety: {traceback.format_exc()}" + ) + traceback.print_exc() + raise + + result = self._compute_result(response) + self.print_verbose(f"Azure Content-Safety Result: {result}") + + for key, value in result.items(): + if value["filtered"]: + raise HTTPException( + status_code=400, + detail={ + "error": "Violated content safety policy", + "source": source, + "category": key, + "severity": value["severity"], + }, + ) + + async def async_pre_call_hook( + self, + user_api_key_dict: UserAPIKeyAuth, + cache: DualCache, + data: dict, + call_type: str, # "completion", "embeddings", "image_generation", "moderation" + ): + self.print_verbose(f"Inside Azure Content-Safety Pre-Call Hook") + try: + if call_type == "completion" and "messages" in data: + for m in data["messages"]: + if "content" in m and isinstance(m["content"], str): + await self.test_violation(content=m["content"], source="input") + + except HTTPException as e: + raise e + except Exception as e: + traceback.print_exc() + + async def async_post_call_success_hook( + self, + user_api_key_dict: UserAPIKeyAuth, + response, + ): + self.print_verbose(f"Inside Azure Content-Safety Post-Call Hook") + if isinstance(response, litellm.ModelResponse) and isinstance( + response.choices[0], litellm.utils.Choices + ): + await self.test_violation( + content=response.choices[0].message.content, source="output" + ) + + async def async_post_call_streaming_hook( + self, + user_api_key_dict: UserAPIKeyAuth, + response: str, + ): + self.print_verbose(f"Inside Azure Content-Safety Call-Stream Hook") + await self.test_violation(content=response, source="output") diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 9cc871966..9c74659cc 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -2235,6 +2235,23 @@ class ProxyConfig: batch_redis_obj = _PROXY_BatchRedisRequests() imported_list.append(batch_redis_obj) + elif ( + isinstance(callback, str) + and callback == "azure_content_safety" + ): + from litellm.proxy.hooks.azure_content_safety import ( + _PROXY_AzureContentSafety, + ) + + azure_content_safety_params = litellm_settings["azure_content_safety_params"] + for k, v in azure_content_safety_params.items(): + if v is not None and isinstance(v, str) and v.startswith("os.environ/"): + azure_content_safety_params[k] = litellm.get_secret(v) + + azure_content_safety_obj = _PROXY_AzureContentSafety( + **azure_content_safety_params, + ) + imported_list.append(azure_content_safety_obj) else: imported_list.append( get_instance_fn( From 406c9820d1950cf60256a50989212bd6d470cd3e Mon Sep 17 00:00:00 2001 From: Lunik Date: Thu, 2 May 2024 23:28:21 +0200 Subject: [PATCH 004/184] =?UTF-8?q?=E2=9E=95=20feat:=20Add=20python=20requ?= =?UTF-8?q?irements=20for=20Azure=20Content-Safety=20callback?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements.txt b/requirements.txt index fbf7a28c7..7e2fa3c18 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,6 +26,8 @@ fastapi-sso==0.10.0 # admin UI, SSO pyjwt[crypto]==2.8.0 python-multipart==0.0.9 # admin UI Pillow==10.3.0 +azure-ai-contentsafety==1.0.0 # for azure content safety +azure-identity==1.15.0 # for azure content safety ### LITELLM PACKAGE DEPENDENCIES python-dotenv==1.0.0 # for env From 723ef9963e9c9468d98a25ab5aa8ed3f2499ccac Mon Sep 17 00:00:00 2001 From: mogith-pn <143642606+mogith-pn@users.noreply.github.com> Date: Fri, 3 May 2024 14:03:38 +0000 Subject: [PATCH 005/184] Clarifai - Added streaming and async completion support --- cookbook/liteLLM_clarifai_Demo.ipynb | 38 +++- litellm/llms/clarifai.py | 206 +++++++++++++++++----- litellm/main.py | 9 +- litellm/tests/test_clarifai_completion.py | 28 ++- litellm/tests/test_streaming.py | 3 +- litellm/utils.py | 28 +++ 6 files changed, 259 insertions(+), 53 deletions(-) diff --git a/cookbook/liteLLM_clarifai_Demo.ipynb b/cookbook/liteLLM_clarifai_Demo.ipynb index 4e3b4dbb0..40ef2fcf9 100644 --- a/cookbook/liteLLM_clarifai_Demo.ipynb +++ b/cookbook/liteLLM_clarifai_Demo.ipynb @@ -119,6 +119,42 @@ "print(f\"Claude-2.1 response : {response}\")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### OpenAI GPT-4 (Streaming)\n", + "Though clarifai doesn't support streaming, still you can call stream and get the response in standard StreamResponse format of liteLLM" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ModelResponse(id='chatcmpl-40ae19af-3bf0-4eb4-99f2-33aec3ba84af', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=\"In the quiet corners of time's grand hall,\\nLies the tale of rise and fall.\\nFrom ancient ruins to modern sprawl,\\nHistory, the greatest story of them all.\\n\\nEmpires have risen, empires have decayed,\\nThrough the eons, memories have stayed.\\nIn the book of time, history is laid,\\nA tapestry of events, meticulously displayed.\\n\\nThe pyramids of Egypt, standing tall,\\nThe Roman Empire's mighty sprawl.\\nFrom Alexander's conquest, to the Berlin Wall,\\nHistory, a silent witness to it all.\\n\\nIn the shadow of the past we tread,\\nWhere once kings and prophets led.\\nTheir stories in our hearts are spread,\\nEchoes of their words, in our minds are read.\\n\\nBattles fought and victories won,\\nActs of courage under the sun.\\nTales of love, of deeds done,\\nIn history's grand book, they all run.\\n\\nHeroes born, legends made,\\nIn the annals of time, they'll never fade.\\nTheir triumphs and failures all displayed,\\nIn the eternal march of history's parade.\\n\\nThe ink of the past is forever dry,\\nBut its lessons, we cannot deny.\\nIn its stories, truths lie,\\nIn its wisdom, we rely.\\n\\nHistory, a mirror to our past,\\nA guide for the future vast.\\nThrough its lens, we're ever cast,\\nIn the drama of life, forever vast.\", role='assistant', function_call=None, tool_calls=None), logprobs=None)], created=1714744515, model='https://api.clarifai.com/v2/users/openai/apps/chat-completion/models/GPT-4/outputs', object='chat.completion.chunk', system_fingerprint=None)\n", + "ModelResponse(id='chatcmpl-40ae19af-3bf0-4eb4-99f2-33aec3ba84af', choices=[StreamingChoices(finish_reason='stop', index=0, delta=Delta(content=None, role=None, function_call=None, tool_calls=None), logprobs=None)], created=1714744515, model='https://api.clarifai.com/v2/users/openai/apps/chat-completion/models/GPT-4/outputs', object='chat.completion.chunk', system_fingerprint=None)\n" + ] + } + ], + "source": [ + "from litellm import completion\n", + "\n", + "messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n", + "response = completion(\n", + " model=\"clarifai/openai.chat-completion.GPT-4\",\n", + " messages=messages,\n", + " stream=True,\n", + " api_key = \"c75cc032415e45368be331fdd2c06db0\")\n", + "\n", + "for chunk in response:\n", + " print(chunk)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -143,7 +179,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.10" + "version": "3.10.13" } }, "nbformat": 4, diff --git a/litellm/llms/clarifai.py b/litellm/llms/clarifai.py index 2a7d77c61..e07a8d9e8 100644 --- a/litellm/llms/clarifai.py +++ b/litellm/llms/clarifai.py @@ -3,9 +3,10 @@ import json import requests import time from typing import Callable, Optional -from litellm.utils import ModelResponse, Usage, Choices, Message +from litellm.utils import ModelResponse, Usage, Choices, Message, CustomStreamWrapper import litellm import httpx +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from .prompt_templates.factory import prompt_factory, custom_prompt @@ -84,6 +85,63 @@ def completions_to_model(payload): "inputs": [{"data": {"text": {"raw": payload["prompt"]}}}], "model": {"output_info": {"params": params}}, } + +def process_response( + model, + prompt, + response, + model_response, + api_key, + data, + encoding, + logging_obj + ): + logging_obj.post_call( + input=prompt, + api_key=api_key, + original_response=response.text, + additional_args={"complete_input_dict": data}, + ) + ## RESPONSE OBJECT + try: + completion_response = response.json() + except Exception: + raise ClarifaiError( + message=response.text, status_code=response.status_code, url=model + ) + # print(completion_response) + try: + choices_list = [] + for idx, item in enumerate(completion_response["outputs"]): + if len(item["data"]["text"]["raw"]) > 0: + message_obj = Message(content=item["data"]["text"]["raw"]) + else: + message_obj = Message(content=None) + choice_obj = Choices( + finish_reason="stop", + index=idx + 1, #check + message=message_obj, + ) + choices_list.append(choice_obj) + model_response["choices"] = choices_list + + except Exception as e: + raise ClarifaiError( + message=traceback.format_exc(), status_code=response.status_code, url=model + ) + + # Calculate Usage + prompt_tokens = len(encoding.encode(prompt)) + completion_tokens = len( + encoding.encode(model_response["choices"][0]["message"].get("content")) + ) + model_response["model"] = model + model_response["usage"] = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, + ) + return model_response def convert_model_to_url(model: str, api_base: str): user_id, app_id, model_id = model.split(".") @@ -98,6 +156,40 @@ def get_prompt_model_name(url: str): else: return "", clarifai_model_name +async def async_completion( + model: str, + prompt: str, + api_base: str, + custom_prompt_dict: dict, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + api_key, + logging_obj, + data=None, + optional_params=None, + litellm_params=None, + logger_fn=None, + headers={}): + + async_handler = AsyncHTTPHandler( + timeout=httpx.Timeout(timeout=600.0, connect=5.0) + ) + response = await async_handler.post( + api_base, headers=headers, data=json.dumps(data) + ) + + return process_response( + model=model, + prompt=prompt, + response=response, + model_response=model_response, + api_key=api_key, + data=data, + encoding=encoding, + logging_obj=logging_obj, + ) + def completion( model: str, messages: list, @@ -108,6 +200,7 @@ def completion( api_key, logging_obj, custom_prompt_dict={}, + acompletion=False, optional_params=None, litellm_params=None, logger_fn=None, @@ -158,59 +251,78 @@ def completion( "api_base": api_base, }, ) - - ## COMPLETION CALL - response = requests.post( + if acompletion==True: + return async_completion( + model=model, + prompt=prompt, + api_base=api_base, + custom_prompt_dict=custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + encoding=encoding, + api_key=api_key, + logging_obj=logging_obj, + data=data, + optional_params=optional_params, + litellm_params=litellm_params, + logger_fn=logger_fn, + headers=headers, + ) + else: + ## COMPLETION CALL + response = requests.post( model, headers=headers, data=json.dumps(data), ) # print(response.content); exit() - """ - {"status":{"code":10000,"description":"Ok","req_id":"d914cf7e097487997910650cde954a37"},"outputs":[{"id":"c2baa668174b4547bd4d2e9f8996198d","status":{"code":10000,"description":"Ok"},"created_at":"2024-02-07T10:57:52.917990493Z","model":{"id":"GPT-4","name":"GPT-4","created_at":"2023-06-08T17:40:07.964967Z","modified_at":"2023-12-04T11:39:54.587604Z","app_id":"chat-completion","model_version":{"id":"5d7a50b44aec4a01a9c492c5a5fcf387","created_at":"2023-11-09T19:57:56.961259Z","status":{"code":21100,"description":"Model is trained and ready"},"completed_at":"2023-11-09T20:00:48.933172Z","visibility":{"gettable":50},"app_id":"chat-completion","user_id":"openai","metadata":{}},"user_id":"openai","model_type_id":"text-to-text","visibility":{"gettable":50},"toolkits":[],"use_cases":[],"languages":[],"languages_full":[],"check_consents":[],"workflow_recommended":false,"image":{"url":"https://data.clarifai.com/small/users/openai/apps/chat-completion/inputs/image/34326a9914d361bb93ae8e5381689755","hosted":{"prefix":"https://data.clarifai.com","suffix":"users/openai/apps/chat-completion/inputs/image/34326a9914d361bb93ae8e5381689755","sizes":["small"],"crossorigin":"use-credentials"}}},"input":{"id":"fba1f22a332743f083ddae0a7eb443ae","data":{"text":{"raw":"what\'s the weather in SF","url":"https://samples.clarifai.com/placeholder.gif"}}},"data":{"text":{"raw":"As an AI, I\'m unable to provide real-time information or updates. Please check a reliable weather website or app for the current weather in San Francisco.","text_info":{"encoding":"UnknownTextEnc"}}}}]} - """ + if response.status_code != 200: raise ClarifaiError(status_code=response.status_code, message=response.text, url=model) + if "stream" in optional_params and optional_params["stream"] == True: - return response.iter_lines() - else: - logging_obj.post_call( - input=prompt, - api_key=api_key, - original_response=response.text, - additional_args={"complete_input_dict": data}, - ) - ## RESPONSE OBJECT - completion_response = response.json() - # print(completion_response) - try: - choices_list = [] - for idx, item in enumerate(completion_response["outputs"]): - if len(item["data"]["text"]["raw"]) > 0: - message_obj = Message(content=item["data"]["text"]["raw"]) - else: - message_obj = Message(content=None) - choice_obj = Choices( - finish_reason="stop", - index=idx + 1, #check - message=message_obj, - ) - choices_list.append(choice_obj) - model_response["choices"] = choices_list - except Exception as e: - raise ClarifaiError( - message=traceback.format_exc(), status_code=response.status_code, url=model + completion_stream = response.iter_lines() + stream_response = CustomStreamWrapper( + completion_stream=completion_stream, + model=model, + custom_llm_provider="clarifai", + logging_obj=logging_obj, ) + return stream_response + + else: + return process_response( + model=model, + prompt=prompt, + response=response, + model_response=model_response, + api_key=api_key, + data=data, + encoding=encoding, + logging_obj=logging_obj) + - # Calculate Usage - prompt_tokens = len(encoding.encode(prompt)) - completion_tokens = len( - encoding.encode(model_response["choices"][0]["message"].get("content")) - ) - model_response["model"] = model - model_response["usage"] = Usage( - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=prompt_tokens + completion_tokens, - ) - return model_response \ No newline at end of file +class ModelResponseIterator: + def __init__(self, model_response): + self.model_response = model_response + self.is_done = False + + # Sync iterator + def __iter__(self): + return self + + def __next__(self): + if self.is_done: + raise StopIteration + self.is_done = True + return self.model_response + + # Async iterator + def __aiter__(self): + return self + + async def __anext__(self): + if self.is_done: + raise StopAsyncIteration + self.is_done = True + return self.model_response \ No newline at end of file diff --git a/litellm/main.py b/litellm/main.py index 0bc802a9c..396ac1779 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1185,6 +1185,7 @@ def completion( print_verbose=print_verbose, optional_params=optional_params, litellm_params=litellm_params, + acompletion=acompletion, logger_fn=logger_fn, encoding=encoding, # for calculating input/output tokens api_key=clarifai_key, @@ -1194,8 +1195,12 @@ def completion( if "stream" in optional_params and optional_params["stream"] == True: # don't try to access stream object, - - model_response = CustomStreamWrapper(model_response, model, logging_obj=logging, custom_llm_provider="replicate") + ## LOGGING + logging.post_call( + input=messages, + api_key=api_key, + original_response=model_response, + ) if optional_params.get("stream", False) or acompletion == True: ## LOGGING diff --git a/litellm/tests/test_clarifai_completion.py b/litellm/tests/test_clarifai_completion.py index 2c2626398..347e513bc 100644 --- a/litellm/tests/test_clarifai_completion.py +++ b/litellm/tests/test_clarifai_completion.py @@ -1,6 +1,7 @@ import sys, os import traceback from dotenv import load_dotenv +import asyncio, logging load_dotenv() import os, io @@ -10,7 +11,7 @@ sys.path.insert( ) # Adds the parent directory to the system path import pytest import litellm -from litellm import embedding, completion, completion_cost, Timeout, ModelResponse +from litellm import embedding, completion, acompletion, acreate, completion_cost, Timeout, ModelResponse from litellm import RateLimitError # litellm.num_retries = 3 @@ -65,3 +66,28 @@ def test_completion_clarifai_mistral_large(): pass except Exception as e: pytest.fail(f"Error occurred: {e}") + +@pytest.mark.asyncio +def test_async_completion_clarifai(): + import asyncio + + litellm.set_verbose = True + + async def test_get_response(): + user_message = "Hello, how are you?" + messages = [{"content": user_message, "role": "user"}] + try: + response = await acompletion( + model="clarifai/openai.chat-completion.GPT-4", + messages=messages, + timeout=10, + api_key=os.getenv("CLARIFAI_API_KEY"), + ) + print(f"response: {response}") + except litellm.Timeout as e: + pass + except Exception as e: + pytest.fail(f"An exception occurred: {e}") + + + asyncio.run(test_get_response()) diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py index d0d8a720a..bb9e0e16b 100644 --- a/litellm/tests/test_streaming.py +++ b/litellm/tests/test_streaming.py @@ -391,8 +391,7 @@ def test_completion_claude_stream(): print(f"completion_response: {complete_response}") except Exception as e: pytest.fail(f"Error occurred: {e}") - - + # test_completion_claude_stream() def test_completion_claude_2_stream(): litellm.set_verbose = True diff --git a/litellm/utils.py b/litellm/utils.py index e5f7f9d11..56518f9f9 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2807,6 +2807,7 @@ def client(original_function): ) else: return result + return result # Prints Exactly what was passed to litellm function - don't execute any logic here - it should just print @@ -2910,6 +2911,7 @@ def client(original_function): model_response_object=ModelResponse(), stream=kwargs.get("stream", False), ) + if kwargs.get("stream", False) == True: cached_result = CustomStreamWrapper( completion_stream=cached_result, @@ -9905,6 +9907,27 @@ class CustomStreamWrapper: return {"text": "", "is_finished": False} except Exception as e: raise e + + def handle_clarifai_completion_chunk(self, chunk): + try: + if isinstance(chunk, dict): + parsed_response = chunk + if isinstance(chunk, (str, bytes)): + if isinstance(chunk, bytes): + parsed_response = chunk.decode("utf-8") + else: + parsed_response = chunk + data_json = json.loads(parsed_response) + text = data_json.get("outputs", "")[0].get("data", "").get("text", "").get("raw","") + prompt_tokens = len(encoding.encode(data_json.get("outputs", "")[0].get("input","").get("data", "").get("text", "").get("raw",""))) + completion_tokens = len(encoding.encode(text)) + return { + "text": text, + "is_finished": True, + } + except: + traceback.print_exc() + return "" def model_response_creator(self): model_response = ModelResponse(stream=True, model=self.model) @@ -9949,6 +9972,11 @@ class CustomStreamWrapper: completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] + elif ( + self.custom_llm_provider and self.custom_llm_provider == "clarifai" + ): + response_obj = self.handle_clarifai_completion_chunk(chunk) + completion_obj["content"] = response_obj["text"] elif self.model == "replicate" or self.custom_llm_provider == "replicate": response_obj = self.handle_replicate_chunk(chunk) completion_obj["content"] = response_obj["text"] From e7405f105c886bad2ad6ed7a1eed0b47af29dcd0 Mon Sep 17 00:00:00 2001 From: Lunik Date: Fri, 3 May 2024 20:50:37 +0200 Subject: [PATCH 006/184] =?UTF-8?q?=E2=9C=85=20ci:=20Add=20tests?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- litellm/proxy/hooks/azure_content_safety.py | 8 +- litellm/tests/test_azure_content_safety.py | 267 ++++++++++++++++++++ 2 files changed, 271 insertions(+), 4 deletions(-) create mode 100644 litellm/tests/test_azure_content_safety.py diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py index 161e35cde..d0d23363b 100644 --- a/litellm/proxy/hooks/azure_content_safety.py +++ b/litellm/proxy/hooks/azure_content_safety.py @@ -36,10 +36,10 @@ class _PROXY_AzureContentSafety( def _configure_thresholds(self, thresholds=None): default_thresholds = { - self.text_category.HATE: 6, - self.text_category.SELF_HARM: 6, - self.text_category.SEXUAL: 6, - self.text_category.VIOLENCE: 6, + self.text_category.HATE: 4, + self.text_category.SELF_HARM: 4, + self.text_category.SEXUAL: 4, + self.text_category.VIOLENCE: 4, } if thresholds is None: diff --git a/litellm/tests/test_azure_content_safety.py b/litellm/tests/test_azure_content_safety.py new file mode 100644 index 000000000..f7d9d8268 --- /dev/null +++ b/litellm/tests/test_azure_content_safety.py @@ -0,0 +1,267 @@ +# What is this? +## Unit test for azure content safety +import sys, os, asyncio, time, random +from datetime import datetime +import traceback +from dotenv import load_dotenv +from fastapi import HTTPException + +load_dotenv() +import os + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import pytest +import litellm +from litellm.proxy.hooks.azure_content_safety import _PROXY_AzureContentSafety +from litellm import Router, mock_completion +from litellm.proxy.utils import ProxyLogging +from litellm.proxy._types import UserAPIKeyAuth +from litellm.caching import DualCache + + +@pytest.mark.asyncio +async def test_strict_input_filtering_01(): + """ + - have a response with a filtered input + - call the pre call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 2}, + ) + + data = { + "messages": [ + {"role": "system", "content": "You are an helpfull assistant"}, + {"role": "user", "content": "Fuck yourself you stupid bitch"}, + ] + } + + with pytest.raises(HTTPException) as exc_info: + await azure_content_safety.async_pre_call_hook( + user_api_key_dict=UserAPIKeyAuth(), + cache=DualCache(), + data=data, + call_type="completion", + ) + + assert exc_info.value.detail["source"] == "input" + assert exc_info.value.detail["category"] == "Hate" + assert exc_info.value.detail["severity"] == "low" + + +@pytest.mark.asyncio +async def test_strict_input_filtering_02(): + """ + - have a response with a filtered input + - call the pre call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 2}, + ) + + data = { + "messages": [ + {"role": "system", "content": "You are an helpfull assistant"}, + {"role": "user", "content": "Hello how are you ?"}, + ] + } + + await azure_content_safety.async_pre_call_hook( + user_api_key_dict=UserAPIKeyAuth(), + cache=DualCache(), + data=data, + call_type="completion", + ) + + +@pytest.mark.asyncio +async def test_loose_input_filtering_01(): + """ + - have a response with a filtered input + - call the pre call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 8}, + ) + + data = { + "messages": [ + {"role": "system", "content": "You are an helpfull assistant"}, + {"role": "user", "content": "Fuck yourself you stupid bitch"}, + ] + } + + await azure_content_safety.async_pre_call_hook( + user_api_key_dict=UserAPIKeyAuth(), + cache=DualCache(), + data=data, + call_type="completion", + ) + + +@pytest.mark.asyncio +async def test_loose_input_filtering_02(): + """ + - have a response with a filtered input + - call the pre call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 8}, + ) + + data = { + "messages": [ + {"role": "system", "content": "You are an helpfull assistant"}, + {"role": "user", "content": "Hello how are you ?"}, + ] + } + + await azure_content_safety.async_pre_call_hook( + user_api_key_dict=UserAPIKeyAuth(), + cache=DualCache(), + data=data, + call_type="completion", + ) + + +@pytest.mark.asyncio +async def test_strict_output_filtering_01(): + """ + - have a response with a filtered output + - call the post call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 2}, + ) + + response = mock_completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": "You are a song writer expert. You help users to write songs about any topic in any genre.", + }, + { + "role": "user", + "content": "Help me write a rap text song. Add some insults to make it more credible.", + }, + ], + mock_response="I'm the king of the mic, you're just a fucking dick. Don't fuck with me your stupid bitch.", + ) + + with pytest.raises(HTTPException) as exc_info: + await azure_content_safety.async_post_call_success_hook( + user_api_key_dict=UserAPIKeyAuth(), response=response + ) + + assert exc_info.value.detail["source"] == "output" + assert exc_info.value.detail["category"] == "Hate" + assert exc_info.value.detail["severity"] == "low" + + +@pytest.mark.asyncio +async def test_strict_output_filtering_02(): + """ + - have a response with a filtered output + - call the post call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 2}, + ) + + response = mock_completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": "You are a song writer expert. You help users to write songs about any topic in any genre.", + }, + { + "role": "user", + "content": "Help me write a rap text song. Add some insults to make it more credible.", + }, + ], + mock_response="I'm unable to help with you with hate speech", + ) + + await azure_content_safety.async_post_call_success_hook( + user_api_key_dict=UserAPIKeyAuth(), response=response + ) + + +@pytest.mark.asyncio +async def test_loose_output_filtering_01(): + """ + - have a response with a filtered output + - call the post call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 8}, + ) + + response = mock_completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": "You are a song writer expert. You help users to write songs about any topic in any genre.", + }, + { + "role": "user", + "content": "Help me write a rap text song. Add some insults to make it more credible.", + }, + ], + mock_response="I'm the king of the mic, you're just a fucking dick. Don't fuck with me your stupid bitch.", + ) + + await azure_content_safety.async_post_call_success_hook( + user_api_key_dict=UserAPIKeyAuth(), response=response + ) + + +@pytest.mark.asyncio +async def test_loose_output_filtering_02(): + """ + - have a response with a filtered output + - call the post call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 8}, + ) + + response = mock_completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": "You are a song writer expert. You help users to write songs about any topic in any genre.", + }, + { + "role": "user", + "content": "Help me write a rap text song. Add some insults to make it more credible.", + }, + ], + mock_response="I'm unable to help with you with hate speech", + ) + + await azure_content_safety.async_post_call_success_hook( + user_api_key_dict=UserAPIKeyAuth(), response=response + ) From 9ba9b3891fdba07d040808eaf25d7cf732027565 Mon Sep 17 00:00:00 2001 From: Lunik Date: Fri, 3 May 2024 20:51:40 +0200 Subject: [PATCH 007/184] =?UTF-8?q?=E2=9A=A1=EF=B8=8F=20perf:=20Remove=20t?= =?UTF-8?q?est=20violation=20on=20each=20stream=20chunk?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- litellm/proxy/hooks/azure_content_safety.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py index d0d23363b..2cea05c69 100644 --- a/litellm/proxy/hooks/azure_content_safety.py +++ b/litellm/proxy/hooks/azure_content_safety.py @@ -148,10 +148,10 @@ class _PROXY_AzureContentSafety( content=response.choices[0].message.content, source="output" ) - async def async_post_call_streaming_hook( - self, - user_api_key_dict: UserAPIKeyAuth, - response: str, - ): - self.print_verbose(f"Inside Azure Content-Safety Call-Stream Hook") - await self.test_violation(content=response, source="output") + #async def async_post_call_streaming_hook( + # self, + # user_api_key_dict: UserAPIKeyAuth, + # response: str, + #): + # self.print_verbose(f"Inside Azure Content-Safety Call-Stream Hook") + # await self.test_violation(content=response, source="output") From cb178723ca1df4c85596b1c4d1c062f2fc4b1cc0 Mon Sep 17 00:00:00 2001 From: Lunik Date: Sat, 4 May 2024 10:39:43 +0200 Subject: [PATCH 008/184] =?UTF-8?q?=F0=9F=93=9D=20doc:=20Azure=20content?= =?UTF-8?q?=20safety=20Proxy=20usage?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- docs/my-website/docs/proxy/logging.md | 86 ++++++++++++++++++++- litellm/proxy/hooks/azure_content_safety.py | 16 +--- litellm/tests/test_azure_content_safety.py | 4 +- 3 files changed, 90 insertions(+), 16 deletions(-) diff --git a/docs/my-website/docs/proxy/logging.md b/docs/my-website/docs/proxy/logging.md index 1c3b4f81c..f583765bb 100644 --- a/docs/my-website/docs/proxy/logging.md +++ b/docs/my-website/docs/proxy/logging.md @@ -17,6 +17,7 @@ Log Proxy Input, Output, Exceptions using Custom Callbacks, Langfuse, OpenTeleme - [Logging to Sentry](#logging-proxy-inputoutput---sentry) - [Logging to Traceloop (OpenTelemetry)](#logging-proxy-inputoutput-traceloop-opentelemetry) - [Logging to Athina](#logging-proxy-inputoutput-athina) +- [Moderation with Azure Content-Safety](#moderation-with-azure-content-safety) ## Custom Callback Class [Async] Use this when you want to run custom callbacks in `python` @@ -1003,4 +1004,87 @@ curl --location 'http://0.0.0.0:4000/chat/completions' \ } ] }' -``` \ No newline at end of file +``` + +## Moderation with Azure Content Safety + +[Azure Content-Safety](https://azure.microsoft.com/en-us/products/ai-services/ai-content-safety) is a Microsoft Azure service that provides content moderation APIs to detect potential offensive, harmful, or risky content in text. + +We will use the `--config` to set `litellm.success_callback = ["azure_content_safety"]` this will moderate all LLM calls using Azure Content Safety. + +**Step 0** Deploy Azure Content Safety + +Deploy an Azure Content-Safety instance from the Azure Portal and get the `endpoint` and `key`. + +**Step 1** Set Athina API key + +```shell +AZURE_CONTENT_SAFETY_KEU = "" +``` + +**Step 2**: Create a `config.yaml` file and set `litellm_settings`: `success_callback` +```yaml +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo +litellm_settings: + callbacks: ["azure_content_safety"] + azure_content_safety_params: + endpoint: "" + key: "os.environ/AZURE_CONTENT_SAFETY_KEY" +``` + +**Step 3**: Start the proxy, make a test request + +Start proxy +```shell +litellm --config config.yaml --debug +``` + +Test Request +``` +curl --location 'http://0.0.0.0:4000/chat/completions' \ + --header 'Content-Type: application/json' \ + --data ' { + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "Hi, how are you?" + } + ] + }' +``` + +An HTTP 400 error will be returned if the content is detected with a value greater than the threshold set in the `config.yaml`. +The details of the response will describe : +- The `source` : input text or llm generated text +- The `category` : the category of the content that triggered the moderation +- The `severity` : the severity from 0 to 10 + +**Step 4**: Customizing Azure Content Safety Thresholds + +You can customize the thresholds for each category by setting the `thresholds` in the `config.yaml` + +```yaml +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo +litellm_settings: + callbacks: ["azure_content_safety"] + azure_content_safety_params: + endpoint: "" + key: "os.environ/AZURE_CONTENT_SAFETY_KEY" + thresholds: + Hate: 6 + SelfHarm: 8 + Sexual: 6 + Violence: 4 +``` + +:::info +`thresholds` are not required by default, but you can tune the values to your needs. +Default values is `4` for all categories +::: \ No newline at end of file diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py index 2cea05c69..2735a8839 100644 --- a/litellm/proxy/hooks/azure_content_safety.py +++ b/litellm/proxy/hooks/azure_content_safety.py @@ -59,16 +59,6 @@ class _PROXY_AzureContentSafety( except: pass - def _severity(self, severity): - if severity >= 6: - return "high" - elif severity >= 4: - return "medium" - elif severity >= 2: - return "low" - else: - return "safe" - def _compute_result(self, response): result = {} @@ -80,7 +70,7 @@ class _PROXY_AzureContentSafety( if severity is not None: result[category] = { "filtered": severity >= self.thresholds[category], - "severity": self._severity(severity), + "severity": severity, } return result @@ -148,10 +138,10 @@ class _PROXY_AzureContentSafety( content=response.choices[0].message.content, source="output" ) - #async def async_post_call_streaming_hook( + # async def async_post_call_streaming_hook( # self, # user_api_key_dict: UserAPIKeyAuth, # response: str, - #): + # ): # self.print_verbose(f"Inside Azure Content-Safety Call-Stream Hook") # await self.test_violation(content=response, source="output") diff --git a/litellm/tests/test_azure_content_safety.py b/litellm/tests/test_azure_content_safety.py index f7d9d8268..3cc31003a 100644 --- a/litellm/tests/test_azure_content_safety.py +++ b/litellm/tests/test_azure_content_safety.py @@ -50,7 +50,7 @@ async def test_strict_input_filtering_01(): assert exc_info.value.detail["source"] == "input" assert exc_info.value.detail["category"] == "Hate" - assert exc_info.value.detail["severity"] == "low" + assert exc_info.value.detail["severity"] == 2 @pytest.mark.asyncio @@ -168,7 +168,7 @@ async def test_strict_output_filtering_01(): assert exc_info.value.detail["source"] == "output" assert exc_info.value.detail["category"] == "Hate" - assert exc_info.value.detail["severity"] == "low" + assert exc_info.value.detail["severity"] == 2 @pytest.mark.asyncio From ebbeb333c6820c100e9e6cf5dfd577adceda2714 Mon Sep 17 00:00:00 2001 From: Lunik Date: Sat, 4 May 2024 10:45:15 +0200 Subject: [PATCH 009/184] =?UTF-8?q?=E2=9C=8F=EF=B8=8F=20doc:=20typo=20in?= =?UTF-8?q?=20azure=20content=20safety?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- docs/my-website/docs/proxy/logging.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/my-website/docs/proxy/logging.md b/docs/my-website/docs/proxy/logging.md index f583765bb..eaa65f09c 100644 --- a/docs/my-website/docs/proxy/logging.md +++ b/docs/my-website/docs/proxy/logging.md @@ -3,7 +3,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# 🔎 Logging - Custom Callbacks, DataDog, Langfuse, s3 Bucket, Sentry, OpenTelemetry, Athina +# 🔎 Logging - Custom Callbacks, DataDog, Langfuse, s3 Bucket, Sentry, OpenTelemetry, Athina, Azure Content-Safety Log Proxy Input, Output, Exceptions using Custom Callbacks, Langfuse, OpenTelemetry, LangFuse, DynamoDB, s3 Bucket @@ -1019,7 +1019,7 @@ Deploy an Azure Content-Safety instance from the Azure Portal and get the `endpo **Step 1** Set Athina API key ```shell -AZURE_CONTENT_SAFETY_KEU = "" +AZURE_CONTENT_SAFETY_KEY = "" ``` **Step 2**: Create a `config.yaml` file and set `litellm_settings`: `success_callback` From 8783fd4895107cd2aa9ac5e7d9feec27c3b79ed5 Mon Sep 17 00:00:00 2001 From: Lunik Date: Sat, 4 May 2024 10:45:39 +0200 Subject: [PATCH 010/184] =?UTF-8?q?=E2=9C=A8=20feat:=20Use=208=20severity?= =?UTF-8?q?=20levels=20for=20azure=20content=20safety?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- litellm/proxy/hooks/azure_content_safety.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py index 2735a8839..fb9fc80ae 100644 --- a/litellm/proxy/hooks/azure_content_safety.py +++ b/litellm/proxy/hooks/azure_content_safety.py @@ -15,8 +15,11 @@ class _PROXY_AzureContentSafety( try: from azure.ai.contentsafety.aio import ContentSafetyClient from azure.core.credentials import AzureKeyCredential - from azure.ai.contentsafety.models import TextCategory - from azure.ai.contentsafety.models import AnalyzeTextOptions + from azure.ai.contentsafety.models import ( + TextCategory, + AnalyzeTextOptions, + AnalyzeTextOutputType, + ) from azure.core.exceptions import HttpResponseError except Exception as e: raise Exception( @@ -26,6 +29,7 @@ class _PROXY_AzureContentSafety( self.api_key = api_key self.text_category = TextCategory self.analyze_text_options = AnalyzeTextOptions + self.analyze_text_output_type = AnalyzeTextOutputType self.azure_http_error = HttpResponseError self.thresholds = self._configure_thresholds(thresholds) @@ -79,7 +83,10 @@ class _PROXY_AzureContentSafety( self.print_verbose(f"Testing Azure Content-Safety for: {content}") # Construct a request - request = self.analyze_text_options(text=content) + request = self.analyze_text_options( + text=content, + output_type=self.analyze_text_output_type.EIGHT_SEVERITY_LEVELS, + ) # Analyze text try: From 1639a51f241d906d2e1ce0b4343bd581206f7751 Mon Sep 17 00:00:00 2001 From: Lunik Date: Sat, 4 May 2024 11:04:23 +0200 Subject: [PATCH 011/184] =?UTF-8?q?=F0=9F=94=8A=20fix:=20Correctly=20use?= =?UTF-8?q?=20verbose=20logging?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- litellm/proxy/hooks/azure_content_safety.py | 22 +++++++-------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py index fb9fc80ae..433571c15 100644 --- a/litellm/proxy/hooks/azure_content_safety.py +++ b/litellm/proxy/hooks/azure_content_safety.py @@ -55,14 +55,6 @@ class _PROXY_AzureContentSafety( return thresholds - def print_verbose(self, print_statement): - try: - verbose_proxy_logger.debug(print_statement) - if litellm.set_verbose: - print(print_statement) # noqa - except: - pass - def _compute_result(self, response): result = {} @@ -80,7 +72,7 @@ class _PROXY_AzureContentSafety( return result async def test_violation(self, content: str, source: str = None): - self.print_verbose(f"Testing Azure Content-Safety for: {content}") + verbose_proxy_logger.debug("Testing Azure Content-Safety for: %s", content) # Construct a request request = self.analyze_text_options( @@ -92,14 +84,14 @@ class _PROXY_AzureContentSafety( try: response = await self.client.analyze_text(request) except self.azure_http_error as e: - self.print_verbose( - f"Error in Azure Content-Safety: {traceback.format_exc()}" + verbose_proxy_logger.debug( + "Error in Azure Content-Safety: %s", traceback.format_exc() ) traceback.print_exc() raise result = self._compute_result(response) - self.print_verbose(f"Azure Content-Safety Result: {result}") + verbose_proxy_logger.debug("Azure Content-Safety Result: %s", result) for key, value in result.items(): if value["filtered"]: @@ -120,7 +112,7 @@ class _PROXY_AzureContentSafety( data: dict, call_type: str, # "completion", "embeddings", "image_generation", "moderation" ): - self.print_verbose(f"Inside Azure Content-Safety Pre-Call Hook") + verbose_proxy_logger.debug("Inside Azure Content-Safety Pre-Call Hook") try: if call_type == "completion" and "messages" in data: for m in data["messages"]: @@ -137,7 +129,7 @@ class _PROXY_AzureContentSafety( user_api_key_dict: UserAPIKeyAuth, response, ): - self.print_verbose(f"Inside Azure Content-Safety Post-Call Hook") + verbose_proxy_logger.debug("Inside Azure Content-Safety Post-Call Hook") if isinstance(response, litellm.ModelResponse) and isinstance( response.choices[0], litellm.utils.Choices ): @@ -150,5 +142,5 @@ class _PROXY_AzureContentSafety( # user_api_key_dict: UserAPIKeyAuth, # response: str, # ): - # self.print_verbose(f"Inside Azure Content-Safety Call-Stream Hook") + # verbose_proxy_logger.debug("Inside Azure Content-Safety Call-Stream Hook") # await self.test_violation(content=response, source="output") From 9bcd93178f56379492bc4befdea4f4305dc28e91 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 May 2024 22:42:27 +0000 Subject: [PATCH 012/184] build(deps): bump next from 14.1.0 to 14.1.1 in /ui/litellm-dashboard Bumps [next](https://github.com/vercel/next.js) from 14.1.0 to 14.1.1. - [Release notes](https://github.com/vercel/next.js/releases) - [Changelog](https://github.com/vercel/next.js/blob/canary/release.js) - [Commits](https://github.com/vercel/next.js/compare/v14.1.0...v14.1.1) --- updated-dependencies: - dependency-name: next dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- ui/litellm-dashboard/package-lock.json | 88 +++++++++++++------------- ui/litellm-dashboard/package.json | 2 +- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/ui/litellm-dashboard/package-lock.json b/ui/litellm-dashboard/package-lock.json index 846189b27..80efdd2cf 100644 --- a/ui/litellm-dashboard/package-lock.json +++ b/ui/litellm-dashboard/package-lock.json @@ -17,7 +17,7 @@ "fs": "^0.0.1-security", "jsonwebtoken": "^9.0.2", "jwt-decode": "^4.0.0", - "next": "14.1.0", + "next": "14.1.1", "openai": "^4.28.0", "react": "^18", "react-copy-to-clipboard": "^5.1.0", @@ -412,9 +412,9 @@ } }, "node_modules/@next/env": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.0.tgz", - "integrity": "sha512-Py8zIo+02ht82brwwhTg36iogzFqGLPXlRGKQw5s+qP/kMNc4MAyDeEwBKDijk6zTIbegEgu8Qy7C1LboslQAw==" + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.1.tgz", + "integrity": "sha512-7CnQyD5G8shHxQIIg3c7/pSeYFeMhsNbpU/bmvH7ZnDql7mNRgg8O2JZrhrc/soFnfBnKP4/xXNiiSIPn2w8gA==" }, "node_modules/@next/eslint-plugin-next": { "version": "14.1.0", @@ -426,9 +426,9 @@ } }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.1.0.tgz", - "integrity": "sha512-nUDn7TOGcIeyQni6lZHfzNoo9S0euXnu0jhsbMOmMJUBfgsnESdjN97kM7cBqQxZa8L/bM9om/S5/1dzCrW6wQ==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.1.1.tgz", + "integrity": "sha512-yDjSFKQKTIjyT7cFv+DqQfW5jsD+tVxXTckSe1KIouKk75t1qZmj/mV3wzdmFb0XHVGtyRjDMulfVG8uCKemOQ==", "cpu": [ "arm64" ], @@ -441,9 +441,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.1.0.tgz", - "integrity": "sha512-1jgudN5haWxiAl3O1ljUS2GfupPmcftu2RYJqZiMJmmbBT5M1XDffjUtRUzP4W3cBHsrvkfOFdQ71hAreNQP6g==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.1.1.tgz", + "integrity": "sha512-KCQmBL0CmFmN8D64FHIZVD9I4ugQsDBBEJKiblXGgwn7wBCSe8N4Dx47sdzl4JAg39IkSN5NNrr8AniXLMb3aw==", "cpu": [ "x64" ], @@ -456,9 +456,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.1.0.tgz", - "integrity": "sha512-RHo7Tcj+jllXUbK7xk2NyIDod3YcCPDZxj1WLIYxd709BQ7WuRYl3OWUNG+WUfqeQBds6kvZYlc42NJJTNi4tQ==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.1.1.tgz", + "integrity": "sha512-YDQfbWyW0JMKhJf/T4eyFr4b3tceTorQ5w2n7I0mNVTFOvu6CGEzfwT3RSAQGTi/FFMTFcuspPec/7dFHuP7Eg==", "cpu": [ "arm64" ], @@ -471,9 +471,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.1.0.tgz", - "integrity": "sha512-v6kP8sHYxjO8RwHmWMJSq7VZP2nYCkRVQ0qolh2l6xroe9QjbgV8siTbduED4u0hlk0+tjS6/Tuy4n5XCp+l6g==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.1.1.tgz", + "integrity": "sha512-fiuN/OG6sNGRN/bRFxRvV5LyzLB8gaL8cbDH5o3mEiVwfcMzyE5T//ilMmaTrnA8HLMS6hoz4cHOu6Qcp9vxgQ==", "cpu": [ "arm64" ], @@ -486,9 +486,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.1.0.tgz", - "integrity": "sha512-zJ2pnoFYB1F4vmEVlb/eSe+VH679zT1VdXlZKX+pE66grOgjmKJHKacf82g/sWE4MQ4Rk2FMBCRnX+l6/TVYzQ==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.1.1.tgz", + "integrity": "sha512-rv6AAdEXoezjbdfp3ouMuVqeLjE1Bin0AuE6qxE6V9g3Giz5/R3xpocHoAi7CufRR+lnkuUjRBn05SYJ83oKNQ==", "cpu": [ "x64" ], @@ -501,9 +501,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.1.0.tgz", - "integrity": "sha512-rbaIYFt2X9YZBSbH/CwGAjbBG2/MrACCVu2X0+kSykHzHnYH5FjHxwXLkcoJ10cX0aWCEynpu+rP76x0914atg==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.1.1.tgz", + "integrity": "sha512-YAZLGsaNeChSrpz/G7MxO3TIBLaMN8QWMr3X8bt6rCvKovwU7GqQlDu99WdvF33kI8ZahvcdbFsy4jAFzFX7og==", "cpu": [ "x64" ], @@ -516,9 +516,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.1.0.tgz", - "integrity": "sha512-o1N5TsYc8f/HpGt39OUQpQ9AKIGApd3QLueu7hXk//2xq5Z9OxmV6sQfNp8C7qYmiOlHYODOGqNNa0e9jvchGQ==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.1.1.tgz", + "integrity": "sha512-1L4mUYPBMvVDMZg1inUYyPvFSduot0g73hgfD9CODgbr4xiTYe0VOMTZzaRqYJYBA9mana0x4eaAaypmWo1r5A==", "cpu": [ "arm64" ], @@ -531,9 +531,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.1.0.tgz", - "integrity": "sha512-XXIuB1DBRCFwNO6EEzCTMHT5pauwaSj4SWs7CYnME57eaReAKBXCnkUE80p/pAZcewm7hs+vGvNqDPacEXHVkw==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.1.1.tgz", + "integrity": "sha512-jvIE9tsuj9vpbbXlR5YxrghRfMuG0Qm/nZ/1KDHc+y6FpnZ/apsgh+G6t15vefU0zp3WSpTMIdXRUsNl/7RSuw==", "cpu": [ "ia32" ], @@ -546,9 +546,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.1.0.tgz", - "integrity": "sha512-9WEbVRRAqJ3YFVqEZIxUqkiO8l1nool1LmNxygr5HWF8AcSYsEpneUDhmjUVJEzO2A04+oPtZdombzzPPkTtgg==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.1.1.tgz", + "integrity": "sha512-S6K6EHDU5+1KrBDLko7/c1MNy/Ya73pIAmvKeFwsF4RmBFJSO7/7YeD4FnZ4iBdzE69PpQ4sOMU9ORKeNuxe8A==", "cpu": [ "x64" ], @@ -4907,11 +4907,11 @@ "dev": true }, "node_modules/next": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/next/-/next-14.1.0.tgz", - "integrity": "sha512-wlzrsbfeSU48YQBjZhDzOwhWhGsy+uQycR8bHAOt1LY1bn3zZEcDyHQOEoN3aWzQ8LHCAJ1nqrWCc9XF2+O45Q==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/next/-/next-14.1.1.tgz", + "integrity": "sha512-McrGJqlGSHeaz2yTRPkEucxQKe5Zq7uPwyeHNmJaZNY4wx9E9QdxmTp310agFRoMuIYgQrCrT3petg13fSVOww==", "dependencies": { - "@next/env": "14.1.0", + "@next/env": "14.1.1", "@swc/helpers": "0.5.2", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", @@ -4926,15 +4926,15 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.1.0", - "@next/swc-darwin-x64": "14.1.0", - "@next/swc-linux-arm64-gnu": "14.1.0", - "@next/swc-linux-arm64-musl": "14.1.0", - "@next/swc-linux-x64-gnu": "14.1.0", - "@next/swc-linux-x64-musl": "14.1.0", - "@next/swc-win32-arm64-msvc": "14.1.0", - "@next/swc-win32-ia32-msvc": "14.1.0", - "@next/swc-win32-x64-msvc": "14.1.0" + "@next/swc-darwin-arm64": "14.1.1", + "@next/swc-darwin-x64": "14.1.1", + "@next/swc-linux-arm64-gnu": "14.1.1", + "@next/swc-linux-arm64-musl": "14.1.1", + "@next/swc-linux-x64-gnu": "14.1.1", + "@next/swc-linux-x64-musl": "14.1.1", + "@next/swc-win32-arm64-msvc": "14.1.1", + "@next/swc-win32-ia32-msvc": "14.1.1", + "@next/swc-win32-x64-msvc": "14.1.1" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", diff --git a/ui/litellm-dashboard/package.json b/ui/litellm-dashboard/package.json index 108d70d98..0fb9f1a53 100644 --- a/ui/litellm-dashboard/package.json +++ b/ui/litellm-dashboard/package.json @@ -18,7 +18,7 @@ "fs": "^0.0.1-security", "jsonwebtoken": "^9.0.2", "jwt-decode": "^4.0.0", - "next": "14.1.0", + "next": "14.1.1", "openai": "^4.28.0", "react": "^18", "react-copy-to-clipboard": "^5.1.0", From c45085b728ab2994db7cf0412a9aa076d1c11b77 Mon Sep 17 00:00:00 2001 From: Rajan Paneru Date: Fri, 10 May 2024 10:06:50 +0930 Subject: [PATCH 013/184] Based on the data-type using json The value of response_obj["choices"][0]["message"] is Message object and dict Added a conditional to use .json only iff it is Message Object --- litellm/integrations/langfuse.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index caf5437b2..140dced9d 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -135,7 +135,9 @@ class LangFuseLogger: response_obj, litellm.ModelResponse ): input = prompt - output = response_obj["choices"][0]["message"].json() + output = response_obj["choices"][0]["message"] + if isinstance(output, litellm.Message): + output = output.json() elif response_obj is not None and isinstance( response_obj, litellm.TextCompletionResponse ): From 170fd11c8208f57739da1410d546bd6a9cb996ff Mon Sep 17 00:00:00 2001 From: Simon Sanchez Viloria Date: Fri, 10 May 2024 11:53:33 +0200 Subject: [PATCH 014/184] (fix) watsonx.py: Fixed linting errors and make sure stream chunk always return usage --- litellm/llms/watsonx.py | 365 ++++++++++++++++++++++++++++++---------- litellm/utils.py | 6 +- 2 files changed, 279 insertions(+), 92 deletions(-) diff --git a/litellm/llms/watsonx.py b/litellm/llms/watsonx.py index 082cdb325..ad4aff4b6 100644 --- a/litellm/llms/watsonx.py +++ b/litellm/llms/watsonx.py @@ -1,12 +1,25 @@ from enum import Enum import json, types, time # noqa: E401 from contextlib import asynccontextmanager, contextmanager -from typing import AsyncGenerator, Callable, Dict, Generator, Optional, Any, Union, List +from typing import ( + Callable, + Dict, + Generator, + AsyncGenerator, + Iterator, + AsyncIterator, + Optional, + Any, + Union, + List, + ContextManager, + AsyncContextManager, +) import httpx # type: ignore import requests # type: ignore import litellm -from litellm.utils import Logging, ModelResponse, Usage, get_secret +from litellm.utils import ModelResponse, Usage, get_secret from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from .base import BaseLLM @@ -189,6 +202,7 @@ class WatsonXAIEndpoint(str, Enum): ) EMBEDDINGS = "/ml/v1/text/embeddings" PROMPTS = "/ml/v1/prompts" + AVAILABLE_MODELS = "/ml/v1/foundation_model_specs" class IBMWatsonXAI(BaseLLM): @@ -378,12 +392,12 @@ class IBMWatsonXAI(BaseLLM): model_response: ModelResponse, print_verbose: Callable, encoding, - logging_obj: Logging, - optional_params: Optional[dict] = None, - acompletion: bool = None, - litellm_params: Optional[dict] = None, + logging_obj, + optional_params=None, + acompletion=None, + litellm_params=None, logger_fn=None, - timeout: Optional[float] = None, + timeout=None, ): """ Send a text generation request to the IBM Watsonx.ai API. @@ -403,8 +417,6 @@ class IBMWatsonXAI(BaseLLM): prompt = convert_messages_to_prompt( model, messages, provider, custom_prompt_dict ) - - manage_response = self._make_response_manager(async_=(acompletion is True), logging_obj=logging_obj) def process_text_gen_response(json_resp: dict) -> ModelResponse: if "results" not in json_resp: @@ -419,62 +431,72 @@ class IBMWatsonXAI(BaseLLM): model_response["finish_reason"] = json_resp["results"][0]["stop_reason"] model_response["created"] = int(time.time()) model_response["model"] = model - setattr( - model_response, - "usage", - Usage( - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=prompt_tokens + completion_tokens, - ), + usage = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, ) + setattr(model_response, "usage", usage) return model_response + def process_stream_response( + stream_resp: Union[Iterator[str], AsyncIterator], + ) -> litellm.CustomStreamWrapper: + streamwrapper = litellm.CustomStreamWrapper( + stream_resp, + model=model, + custom_llm_provider="watsonx", + logging_obj=logging_obj, + ) + return streamwrapper + + # create the function to manage the request to watsonx.ai + # manage_request = self._make_request_manager( + # async_=(acompletion is True), logging_obj=logging_obj + # ) + self.request_manager = RequestManager(logging_obj) + def handle_text_request(request_params: dict) -> ModelResponse: - with manage_response( - request_params, input=prompt, timeout=timeout, + with self.request_manager.request( + request_params, + input=prompt, + timeout=timeout, ) as resp: json_resp = resp.json() return process_text_gen_response(json_resp) async def handle_text_request_async(request_params: dict) -> ModelResponse: - async with manage_response( - request_params, input=prompt, timeout=timeout, + async with self.request_manager.async_request( + request_params, + input=prompt, + timeout=timeout, ) as resp: json_resp = resp.json() return process_text_gen_response(json_resp) - def handle_stream_request( - request_params: dict, - ) -> litellm.CustomStreamWrapper: + def handle_stream_request(request_params: dict) -> litellm.CustomStreamWrapper: # stream the response - generated chunks will be handled # by litellm.utils.CustomStreamWrapper.handle_watsonx_stream - with manage_response( - request_params, stream=True, input=prompt, timeout=timeout, + with self.request_manager.request( + request_params, + stream=True, + input=prompt, + timeout=timeout, ) as resp: - streamwrapper = litellm.CustomStreamWrapper( - resp.iter_lines(), - model=model, - custom_llm_provider="watsonx", - logging_obj=logging_obj, - ) + streamwrapper = process_stream_response(resp.iter_lines()) return streamwrapper - async def handle_stream_request_async( - request_params: dict, - ) -> litellm.CustomStreamWrapper: + async def handle_stream_request_async(request_params: dict) -> litellm.CustomStreamWrapper: # stream the response - generated chunks will be handled # by litellm.utils.CustomStreamWrapper.handle_watsonx_stream - async with manage_response( - request_params, stream=True, input=prompt, timeout=timeout, + async with self.request_manager.async_request( + request_params, + stream=True, + input=prompt, + timeout=timeout, ) as resp: - streamwrapper = litellm.CustomStreamWrapper( - resp.aiter_lines(), - model=model, - custom_llm_provider="watsonx", - logging_obj=logging_obj, - ) + streamwrapper = process_stream_response(resp.aiter_lines()) return streamwrapper try: @@ -486,13 +508,13 @@ class IBMWatsonXAI(BaseLLM): optional_params=optional_params, print_verbose=print_verbose, ) - if stream and acompletion: + if stream and (acompletion is True): # stream and async text generation return handle_stream_request_async(req_params) elif stream: # streaming text generation return handle_stream_request(req_params) - elif acompletion: + elif (acompletion is True): # async text generation return handle_text_request_async(req_params) else: @@ -554,43 +576,48 @@ class IBMWatsonXAI(BaseLLM): "json": payload, "params": request_params, } - manage_response = self._make_response_manager(async_=(aembedding is True), logging_obj=logging_obj) - + # manage_request = self._make_request_manager( + # async_=(aembedding is True), logging_obj=logging_obj + # ) + request_manager = RequestManager(logging_obj) + def process_embedding_response(json_resp: dict) -> ModelResponse: results = json_resp.get("results", []) embedding_response = [] for idx, result in enumerate(results): embedding_response.append( - {"object": "embedding", "index": idx, "embedding": result["embedding"]} + { + "object": "embedding", + "index": idx, + "embedding": result["embedding"], + } ) model_response["object"] = "list" model_response["data"] = embedding_response model_response["model"] = model input_tokens = json_resp.get("input_token_count", 0) model_response.usage = Usage( - prompt_tokens=input_tokens, completion_tokens=0, total_tokens=input_tokens + prompt_tokens=input_tokens, + completion_tokens=0, + total_tokens=input_tokens, ) return model_response - - def handle_embedding_request(request_params: dict) -> ModelResponse: - with manage_response( - request_params, input=input - ) as resp: + + def handle_embedding(request_params: dict) -> ModelResponse: + with request_manager.request(request_params, input=input) as resp: json_resp = resp.json() return process_embedding_response(json_resp) - - async def handle_embedding_request_async(request_params: dict) -> ModelResponse: - async with manage_response( - request_params, input=input - ) as resp: + + async def handle_aembedding(request_params: dict) -> ModelResponse: + async with request_manager.async_request(request_params, input=input) as resp: json_resp = resp.json() return process_embedding_response(json_resp) - + try: - if aembedding: - return handle_embedding_request_async(req_params) + if aembedding is True: + return handle_embedding(req_params) else: - return handle_embedding_request(req_params) + return handle_aembedding(req_params) except WatsonXAIError as e: raise e except Exception as e: @@ -616,64 +643,88 @@ class IBMWatsonXAI(BaseLLM): iam_access_token = json_data["access_token"] self.token = iam_access_token return iam_access_token - - def _make_response_manager( - self, - async_: bool, - logging_obj: Logging - ) -> Callable[..., Generator[Union[requests.Response, httpx.Response], None, None]]: + + def get_available_models(self, *, ids_only: bool = True, **params): + api_params = self._get_api_params(params) + headers = { + "Authorization": f"Bearer {api_params['token']}", + "Content-Type": "application/json", + "Accept": "application/json", + } + request_params = dict(version=api_params["api_version"]) + url = api_params["url"].rstrip("/") + WatsonXAIEndpoint.AVAILABLE_MODELS + req_params = dict(method="GET", url=url, headers=headers, params=request_params) + # manage_request = self._make_request_manager(async_=False, logging_obj=None) + with RequestManager(logging_obj=None).request(req_params) as resp: + json_resp = resp.json() + if not ids_only: + return json_resp + return [res["model_id"] for res in json_resp["resources"]] + + def _make_request_manager( + self, async_: bool, logging_obj=None + ) -> Callable[ + ..., + Union[ContextManager[requests.Response], AsyncContextManager[httpx.Response]], + ]: """ Returns a context manager that manages the response from the request. if async_ is True, returns an async context manager, otherwise returns a regular context manager. Usage: ```python - manage_response = self._make_response_manager(async_=True, logging_obj=logging_obj) - async with manage_response(request_params) as resp: + manage_request = self._make_request_manager(async_=True, logging_obj=logging_obj) + async with manage_request(request_params) as resp: ... # or - manage_response = self._make_response_manager(async_=False, logging_obj=logging_obj) - with manage_response(request_params) as resp: + manage_request = self._make_request_manager(async_=False, logging_obj=logging_obj) + with manage_request(request_params) as resp: ... ``` """ def pre_call( request_params: dict, - input:Optional[Any]=None, + input: Optional[Any] = None, ): + if logging_obj is None: + return request_str = ( f"response = {'await ' if async_ else ''}{request_params['method']}(\n" f"\turl={request_params['url']},\n" - f"\tjson={request_params['json']},\n" + f"\tjson={request_params.get('json')},\n" f")" ) logging_obj.pre_call( input=input, api_key=request_params["headers"].get("Authorization"), additional_args={ - "complete_input_dict": request_params["json"], + "complete_input_dict": request_params.get("json"), "request_str": request_str, }, ) def post_call(resp, request_params): + if logging_obj is None: + return logging_obj.post_call( input=input, api_key=request_params["headers"].get("Authorization"), original_response=json.dumps(resp.json()), additional_args={ "status_code": resp.status_code, - "complete_input_dict": request_params.get("data", request_params.get("json")), + "complete_input_dict": request_params.get( + "data", request_params.get("json") + ), }, ) - + @contextmanager - def _manage_response( + def _manage_request( request_params: dict, stream: bool = False, input: Optional[Any] = None, - timeout: float = None, + timeout=None, ) -> Generator[requests.Response, None, None]: """ Returns a context manager that yields the response from the request. @@ -685,20 +736,23 @@ class IBMWatsonXAI(BaseLLM): request_params["stream"] = stream try: resp = requests.request(**request_params) - resp.raise_for_status() + if not resp.ok: + raise WatsonXAIError( + status_code=resp.status_code, + message=f"Error {resp.status_code} ({resp.reason}): {resp.text}", + ) yield resp except Exception as e: raise WatsonXAIError(status_code=500, message=str(e)) if not stream: post_call(resp, request_params) - - + @asynccontextmanager - async def _manage_response_async( + async def _manage_request_async( request_params: dict, stream: bool = False, input: Optional[Any] = None, - timeout: float = None, + timeout=None, ) -> AsyncGenerator[httpx.Response, None]: pre_call(request_params, input) if timeout: @@ -708,16 +762,23 @@ class IBMWatsonXAI(BaseLLM): try: # async with AsyncHTTPHandler(timeout=timeout) as client: self.async_handler = AsyncHTTPHandler( - timeout=httpx.Timeout(timeout=request_params.pop("timeout", 600.0), connect=5.0), + timeout=httpx.Timeout( + timeout=request_params.pop("timeout", 600.0), connect=5.0 + ), ) # async_handler.client.verify = False if "json" in request_params: - request_params['data'] = json.dumps(request_params.pop("json", {})) + request_params["data"] = json.dumps(request_params.pop("json", {})) method = request_params.pop("method") if method.upper() == "POST": resp = await self.async_handler.post(**request_params) else: resp = await self.async_handler.get(**request_params) + if resp.status_code not in [200, 201]: + raise WatsonXAIError( + status_code=resp.status_code, + message=f"Error {resp.status_code} ({resp.reason}): {resp.text}", + ) yield resp # await async_handler.close() except Exception as e: @@ -726,6 +787,132 @@ class IBMWatsonXAI(BaseLLM): post_call(resp, request_params) if async_: - return _manage_response_async + return _manage_request_async else: - return _manage_response + return _manage_request + +class RequestManager: + """ + Returns a context manager that manages the response from the request. + if async_ is True, returns an async context manager, otherwise returns a regular context manager. + + Usage: + ```python + request_params = dict(method="POST", url="https://api.example.com", headers={"Authorization" : "Bearer token"}, json={"key": "value"}) + request_manager = RequestManager(logging_obj=logging_obj) + async with request_manager.request(request_params) as resp: + ... + # or + with request_manager.async_request(request_params) as resp: + ... + ``` + """ + + def __init__(self, logging_obj=None): + self.logging_obj = logging_obj + + def pre_call( + self, + request_params: dict, + input: Optional[Any] = None, + ): + if self.logging_obj is None: + return + request_str = ( + f"response = {request_params['method']}(\n" + f"\turl={request_params['url']},\n" + f"\tjson={request_params.get('json')},\n" + f")" + ) + self.logging_obj.pre_call( + input=input, + api_key=request_params["headers"].get("Authorization"), + additional_args={ + "complete_input_dict": request_params.get("json"), + "request_str": request_str, + }, + ) + + def post_call(self, resp, request_params): + if self.logging_obj is None: + return + self.logging_obj.post_call( + input=input, + api_key=request_params["headers"].get("Authorization"), + original_response=json.dumps(resp.json()), + additional_args={ + "status_code": resp.status_code, + "complete_input_dict": request_params.get( + "data", request_params.get("json") + ), + }, + ) + + @contextmanager + def request( + self, + request_params: dict, + stream: bool = False, + input: Optional[Any] = None, + timeout=None, + ) -> Generator[requests.Response, None, None]: + """ + Returns a context manager that yields the response from the request. + """ + self.pre_call(request_params, input) + if timeout: + request_params["timeout"] = timeout + if stream: + request_params["stream"] = stream + try: + resp = requests.request(**request_params) + if not resp.ok: + raise WatsonXAIError( + status_code=resp.status_code, + message=f"Error {resp.status_code} ({resp.reason}): {resp.text}", + ) + yield resp + except Exception as e: + raise WatsonXAIError(status_code=500, message=str(e)) + if not stream: + self.post_call(resp, request_params) + + @asynccontextmanager + async def async_request( + self, + request_params: dict, + stream: bool = False, + input: Optional[Any] = None, + timeout=None, + ) -> AsyncGenerator[httpx.Response, None]: + self.pre_call(request_params, input) + if timeout: + request_params["timeout"] = timeout + if stream: + request_params["stream"] = stream + try: + # async with AsyncHTTPHandler(timeout=timeout) as client: + self.async_handler = AsyncHTTPHandler( + timeout=httpx.Timeout( + timeout=request_params.pop("timeout", 600.0), connect=5.0 + ), + ) + # async_handler.client.verify = False + if "json" in request_params: + request_params["data"] = json.dumps(request_params.pop("json", {})) + method = request_params.pop("method") + if method.upper() == "POST": + resp = await self.async_handler.post(**request_params) + else: + resp = await self.async_handler.get(**request_params) + if resp.status_code not in [200, 201]: + raise WatsonXAIError( + status_code=resp.status_code, + message=f"Error {resp.status_code} ({resp.reason}): {resp.text}", + ) + yield resp + # await async_handler.close() + except Exception as e: + raise WatsonXAIError(status_code=500, message=str(e)) + if not stream: + self.post_call(resp, request_params) \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index d1af1b44a..3d1b0c1a7 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -10285,7 +10285,7 @@ class CustomStreamWrapper: response = chunk.replace("data: ", "").strip() parsed_response = json.loads(response) else: - return {"text": "", "is_finished": False} + return {"text": "", "is_finished": False, "prompt_tokens": 0, "completion_tokens": 0} else: print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") raise ValueError( @@ -10300,8 +10300,8 @@ class CustomStreamWrapper: "text": text, "is_finished": is_finished, "finish_reason": finish_reason, - "prompt_tokens": results[0].get("input_token_count", None), - "completion_tokens": results[0].get("generated_token_count", None), + "prompt_tokens": results[0].get("input_token_count", 0), + "completion_tokens": results[0].get("generated_token_count", 0), } return {"text": "", "is_finished": False} except Exception as e: From d3d82827edbe9c5c3840795f1385c915629b957d Mon Sep 17 00:00:00 2001 From: Simon Sanchez Viloria Date: Fri, 10 May 2024 11:55:58 +0200 Subject: [PATCH 015/184] (test) Add tests for WatsonX completion/acompletion streaming --- litellm/tests/test_completion.py | 37 ++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 32b65faea..fa3e669f0 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -3089,6 +3089,24 @@ def test_completion_watsonx(): except Exception as e: pytest.fail(f"Error occurred: {e}") +def test_completion_stream_watsonx(): + litellm.set_verbose = True + model_name = "watsonx/ibm/granite-13b-chat-v2" + try: + response = completion( + model=model_name, + messages=messages, + stop=["stop"], + max_tokens=20, + stream=True + ) + for chunk in response: + print(chunk) + except litellm.APIError as e: + pass + except Exception as e: + pytest.fail(f"Error occurred: {e}") + @pytest.mark.parametrize( "provider, model, project, region_name, token", @@ -3153,6 +3171,25 @@ async def test_acompletion_watsonx(): except Exception as e: pytest.fail(f"Error occurred: {e}") +@pytest.mark.asyncio +async def test_acompletion_stream_watsonx(): + litellm.set_verbose = True + model_name = "watsonx/ibm/granite-13b-chat-v2" + print("testing watsonx") + try: + response = await litellm.acompletion( + model=model_name, + messages=messages, + temperature=0.2, + max_tokens=80, + stream=True + ) + # Add any assertions here to check the response + async for chunk in response: + print(chunk) + except Exception as e: + pytest.fail(f"Error occurred: {e}") + # test_completion_palm_stream() From 8eb842dcf584c9cf6ff9d7675899fb84f39e35e7 Mon Sep 17 00:00:00 2001 From: Rajan Paneru Date: Fri, 10 May 2024 22:04:44 +0930 Subject: [PATCH 016/184] revered the patch so that the fix can be applied in the main place --- litellm/integrations/langfuse.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 140dced9d..caf5437b2 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -135,9 +135,7 @@ class LangFuseLogger: response_obj, litellm.ModelResponse ): input = prompt - output = response_obj["choices"][0]["message"] - if isinstance(output, litellm.Message): - output = output.json() + output = response_obj["choices"][0]["message"].json() elif response_obj is not None and isinstance( response_obj, litellm.TextCompletionResponse ): From 65b07bcb8c737152f26afa6b51cb3919a05007c3 Mon Sep 17 00:00:00 2001 From: Rajan Paneru Date: Fri, 10 May 2024 22:12:32 +0930 Subject: [PATCH 017/184] Preserving the Pydantic Message Object Following statement replaces the Pydantic Message Object and initialize it with the dict model_response["choices"][0]["message"] = response_json["message"] We need to make sure message is always litellm.Message object As a fix, based on the code of ollama.py file, i am updating just the content intead of entire object for both sync and async functions --- litellm/llms/ollama_chat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index 866761905..d1ff4953f 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -300,7 +300,7 @@ def get_ollama_response( model_response["choices"][0]["message"] = message model_response["choices"][0]["finish_reason"] = "tool_calls" else: - model_response["choices"][0]["message"] = response_json["message"] + model_response["choices"][0]["message"]["content"] = response_json["message"]["content"] model_response["created"] = int(time.time()) model_response["model"] = "ollama/" + model prompt_tokens = response_json.get("prompt_eval_count", litellm.token_counter(messages=messages)) # type: ignore @@ -484,7 +484,7 @@ async def ollama_acompletion( model_response["choices"][0]["message"] = message model_response["choices"][0]["finish_reason"] = "tool_calls" else: - model_response["choices"][0]["message"] = response_json["message"] + model_response["choices"][0]["message"]["content"] = response_json["message"]["content"] model_response["created"] = int(time.time()) model_response["model"] = "ollama_chat/" + data["model"] From e83743f8e1b18b9961a5b4f328c69a1fef22ac7a Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 09:59:05 -0700 Subject: [PATCH 018/184] fix langfuse - log metadata on traces --- litellm/integrations/langfuse.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 1e957dfcf..5e1961b66 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -318,11 +318,11 @@ class LangFuseLogger: else: clean_metadata[key] = value - session_id = clean_metadata.pop("session_id", None) - trace_name = clean_metadata.pop("trace_name", None) - trace_id = clean_metadata.pop("trace_id", None) - existing_trace_id = clean_metadata.pop("existing_trace_id", None) - update_trace_keys = clean_metadata.pop("update_trace_keys", []) + session_id = clean_metadata.get("session_id", None) + trace_name = clean_metadata.get("trace_name", None) + trace_id = clean_metadata.get("trace_id", None) + existing_trace_id = clean_metadata.get("existing_trace_id", None) + update_trace_keys = clean_metadata.get("update_trace_keys", []) if trace_name is None and existing_trace_id is None: # just log `litellm-{call_type}` as the trace name @@ -342,6 +342,10 @@ class LangFuseLogger: if updated_trace_value is not None: trace_params[trace_param_key] = updated_trace_value + if "metadata" in update_trace_keys: + # log metadata in the trace + trace_params["metadata"] = clean_metadata + # Pop the trace specific keys that would have been popped if there were a new trace for key in list( filter(lambda key: key.startswith("trace_"), clean_metadata.keys()) @@ -363,6 +367,7 @@ class LangFuseLogger: "trace_version", clean_metadata.get("version", None) ), # If provided just version, it will applied to the trace as well, if applied a trace version it will take precedence "user_id": user_id, + "metadata": metadata, } for key in list( filter(lambda key: key.startswith("trace_"), clean_metadata.keys()) @@ -426,7 +431,6 @@ class LangFuseLogger: "url": url, "headers": clean_headers, } - trace = self.Langfuse.trace(**trace_params) generation_id = None From ebc927f1c8b550ebc888dced3b9aebd3d0572a42 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 11 May 2024 10:18:08 -0700 Subject: [PATCH 019/184] feat(router.py): allow setting model_region in litellm_params Closes https://github.com/BerriAI/litellm/issues/3580 --- litellm/__init__.py | 3 ++ litellm/proxy/_super_secret_config.yaml | 29 +++++---------- litellm/router.py | 12 +++--- litellm/tests/test_router.py | 49 +++++++++++++++++++++++++ litellm/utils.py | 3 ++ 5 files changed, 71 insertions(+), 25 deletions(-) diff --git a/litellm/__init__.py b/litellm/__init__.py index aedf42139..cd4aa5144 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -101,6 +101,9 @@ blocked_user_list: Optional[Union[str, List]] = None banned_keywords_list: Optional[Union[str, List]] = None llm_guard_mode: Literal["all", "key-specific", "request-specific"] = "all" ################## +### PREVIEW FEATURES ### +enable_preview_features: bool = False +################## logging: bool = True caching: bool = ( False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648 diff --git a/litellm/proxy/_super_secret_config.yaml b/litellm/proxy/_super_secret_config.yaml index 752cd281d..832e35113 100644 --- a/litellm/proxy/_super_secret_config.yaml +++ b/litellm/proxy/_super_secret_config.yaml @@ -1,25 +1,13 @@ model_list: - litellm_params: - api_base: https://openai-function-calling-workers.tasslexyz.workers.dev/ - api_key: my-fake-key - model: openai/my-fake-model - model_name: fake-openai-endpoint -- litellm_params: - api_base: https://openai-function-calling-workers.tasslexyz.workers.dev/ - api_key: my-fake-key-2 - model: openai/my-fake-model-2 - model_name: fake-openai-endpoint -- litellm_params: - api_base: https://openai-function-calling-workers.tasslexyz.workers.dev/ - api_key: my-fake-key-3 - model: openai/my-fake-model-3 - model_name: fake-openai-endpoint -- model_name: gpt-4 - litellm_params: - model: gpt-3.5-turbo -- litellm_params: - model: together_ai/codellama/CodeLlama-13b-Instruct-hf - model_name: CodeLlama-13b-Instruct + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: 2023-07-01-preview + model: azure/azure-embedding-model + model_info: + base_model: text-embedding-ada-002 + mode: embedding + model_name: text-embedding-ada-002 router_settings: redis_host: redis @@ -28,6 +16,7 @@ router_settings: litellm_settings: set_verbose: True + enable_preview_features: true # service_callback: ["prometheus_system"] # success_callback: ["prometheus"] # failure_callback: ["prometheus"] diff --git a/litellm/router.py b/litellm/router.py index f0d94908e..e0abc2e3b 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -2557,23 +2557,25 @@ class Router: # init OpenAI, Azure clients self.set_client(model=deployment.to_json(exclude_none=True)) - # set region (if azure model) - _auto_infer_region = os.environ.get("AUTO_INFER_REGION", False) - if _auto_infer_region == True or _auto_infer_region == "True": + # set region (if azure model) ## PREVIEW FEATURE ## + if litellm.enable_preview_features == True: print("Auto inferring region") # noqa """ Hiding behind a feature flag When there is a large amount of LLM deployments this makes startup times blow up """ try: - if "azure" in deployment.litellm_params.model: + if ( + "azure" in deployment.litellm_params.model + and deployment.litellm_params.region_name is None + ): region = litellm.utils.get_model_region( litellm_params=deployment.litellm_params, mode=None ) deployment.litellm_params.region_name = region except Exception as e: - verbose_router_logger.error( + verbose_router_logger.debug( "Unable to get the region for azure model - {}, {}".format( deployment.litellm_params.model, str(e) ) diff --git a/litellm/tests/test_router.py b/litellm/tests/test_router.py index 2d277d749..7c59acb79 100644 --- a/litellm/tests/test_router.py +++ b/litellm/tests/test_router.py @@ -687,6 +687,55 @@ def test_router_context_window_check_pre_call_check_out_group(): pytest.fail(f"Got unexpected exception on router! - {str(e)}") +@pytest.mark.parametrize("allowed_model_region", ["eu", None]) +def test_router_region_pre_call_check(allowed_model_region): + """ + If region based routing set + - check if only model in allowed region is allowed by '_pre_call_checks' + """ + model_list = [ + { + "model_name": "gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE"), + "base_model": "azure/gpt-35-turbo", + "region_name": "eu", + }, + "model_info": {"id": "1"}, + }, + { + "model_name": "gpt-3.5-turbo-large", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "gpt-3.5-turbo-1106", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "model_info": {"id": "2"}, + }, + ] + + router = Router(model_list=model_list, enable_pre_call_checks=True) + + _healthy_deployments = router._pre_call_checks( + model="gpt-3.5-turbo", + healthy_deployments=model_list, + messages=[{"role": "user", "content": "Hey!"}], + allowed_model_region=allowed_model_region, + ) + + if allowed_model_region is None: + assert len(_healthy_deployments) == 2 + else: + assert len(_healthy_deployments) == 1, "No models selected as healthy" + assert ( + _healthy_deployments[0]["model_info"]["id"] == "1" + ), "Incorrect model id picked. Got id={}, expected id=1".format( + _healthy_deployments[0]["model_info"]["id"] + ) + + ### FUNCTION CALLING diff --git a/litellm/utils.py b/litellm/utils.py index 9218f92a3..1c9c3df92 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5881,6 +5881,9 @@ def calculate_max_parallel_requests( def _is_region_eu(model_region: str) -> bool: + if model_region == "eu": + return True + EU_Regions = ["europe", "sweden", "switzerland", "france", "uk"] for region in EU_Regions: if "europe" in model_region.lower(): From ebb5c76e374e63cd0c70407df6f1b5c9541718f4 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 10:19:02 -0700 Subject: [PATCH 020/184] fix langfuse log clean metadata --- litellm/integrations/langfuse.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 5e1961b66..bc1d50190 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -367,7 +367,7 @@ class LangFuseLogger: "trace_version", clean_metadata.get("version", None) ), # If provided just version, it will applied to the trace as well, if applied a trace version it will take precedence "user_id": user_id, - "metadata": metadata, + "metadata": clean_metadata, } for key in list( filter(lambda key: key.startswith("trace_"), clean_metadata.keys()) From 97ba230b7ae21bb27ade2893ebd166ed3d0013f1 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 10:20:30 -0700 Subject: [PATCH 021/184] fix langfuse test --- litellm/tests/test_alangfuse.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/litellm/tests/test_alangfuse.py b/litellm/tests/test_alangfuse.py index 5a495550c..37737d40b 100644 --- a/litellm/tests/test_alangfuse.py +++ b/litellm/tests/test_alangfuse.py @@ -248,12 +248,14 @@ async def test_langfuse_logging_metadata(langfuse_client): "trace_name", "trace_id", "existing_trace_id", + "update_trace_keys", "trace_user_id", "session_id", "tags", "generation_name", "generation_id", "prompt", + "cache_hit", } trace_metadata = { "trace_actual_metadata_key": "trace_actual_metadata_value" @@ -339,6 +341,13 @@ async def test_langfuse_logging_metadata(langfuse_client): for generation_id, generation in zip(generation_ids, generations): assert generation.id == generation_id assert generation.trace_id == trace_id + print( + "diff in generation keys", + set(generation.metadata.keys()).difference( + expected_filtered_metadata_keys + ), + ) + assert set(generation.metadata.keys()).isdisjoint( expected_filtered_metadata_keys ) From 6577719bf8a78db887c1de254e00f01c83b01ebf Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 10:28:13 -0700 Subject: [PATCH 022/184] fix - langfuse trace --- litellm/tests/test_alangfuse.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/litellm/tests/test_alangfuse.py b/litellm/tests/test_alangfuse.py index 37737d40b..6f9b8d626 100644 --- a/litellm/tests/test_alangfuse.py +++ b/litellm/tests/test_alangfuse.py @@ -247,15 +247,11 @@ async def test_langfuse_logging_metadata(langfuse_client): expected_filtered_metadata_keys = { "trace_name", "trace_id", - "existing_trace_id", - "update_trace_keys", "trace_user_id", - "session_id", "tags", "generation_name", "generation_id", "prompt", - "cache_hit", } trace_metadata = { "trace_actual_metadata_key": "trace_actual_metadata_value" @@ -342,8 +338,8 @@ async def test_langfuse_logging_metadata(langfuse_client): assert generation.id == generation_id assert generation.trace_id == trace_id print( - "diff in generation keys", - set(generation.metadata.keys()).difference( + "common keys in trace", + set(generation.metadata.keys()).intersection( expected_filtered_metadata_keys ), ) From 6714854bb77910d11fea987df41fd37facca46a5 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 11 May 2024 11:04:00 -0700 Subject: [PATCH 023/184] feat(router.py): support region routing for bedrock, vertex ai, watsonx --- litellm/llms/azure.py | 8 ++- litellm/llms/bedrock.py | 10 +++ litellm/llms/vertex_ai.py | 17 +++++ litellm/llms/watsonx.py | 9 +++ litellm/router.py | 4 +- litellm/types/router.py | 4 ++ litellm/utils.py | 147 +++++++++++++++++++++++++++++++++++--- 7 files changed, 187 insertions(+), 12 deletions(-) diff --git a/litellm/llms/azure.py b/litellm/llms/azure.py index f416d1437..a56527a59 100644 --- a/litellm/llms/azure.py +++ b/litellm/llms/azure.py @@ -9,7 +9,7 @@ from litellm.utils import ( convert_to_model_response_object, TranscriptionResponse, ) -from typing import Callable, Optional, BinaryIO +from typing import Callable, Optional, BinaryIO, List from litellm import OpenAIConfig import litellm, json import httpx # type: ignore @@ -105,6 +105,12 @@ class AzureOpenAIConfig(OpenAIConfig): optional_params["azure_ad_token"] = value return optional_params + def get_eu_regions(self) -> List[str]: + """ + Source: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-model-availability + """ + return ["europe", "sweden", "switzerland", "france", "uk"] + def select_azure_base_url_or_endpoint(azure_client_params: dict): # azure_client_params = { diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index 08433ba18..d2a83703a 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -52,6 +52,16 @@ class AmazonBedrockGlobalConfig: optional_params[mapped_params[param]] = value return optional_params + def get_eu_regions(self) -> List[str]: + """ + Source: https://www.aws-services.info/bedrock.html + """ + return [ + "eu-west-1", + "eu-west-3", + "eu-central-1", + ] + class AmazonTitanConfig: """ diff --git a/litellm/llms/vertex_ai.py b/litellm/llms/vertex_ai.py index a61c07df0..d3bb2c78a 100644 --- a/litellm/llms/vertex_ai.py +++ b/litellm/llms/vertex_ai.py @@ -198,6 +198,23 @@ class VertexAIConfig: optional_params[mapped_params[param]] = value return optional_params + def get_eu_regions(self) -> List[str]: + """ + Source: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions + """ + return [ + "europe-central2", + "europe-north1", + "europe-southwest1", + "europe-west1", + "europe-west2", + "europe-west3", + "europe-west4", + "europe-west6", + "europe-west8", + "europe-west9", + ] + import asyncio diff --git a/litellm/llms/watsonx.py b/litellm/llms/watsonx.py index 99f2d18ba..a12676fa0 100644 --- a/litellm/llms/watsonx.py +++ b/litellm/llms/watsonx.py @@ -149,6 +149,15 @@ class IBMWatsonXAIConfig: optional_params[mapped_params[param]] = value return optional_params + def get_eu_regions(self) -> List[str]: + """ + Source: https://www.ibm.com/docs/en/watsonx/saas?topic=integrations-regional-availability + """ + return [ + "eu-de", + "eu-gb", + ] + def convert_messages_to_prompt(model, messages, provider, custom_prompt_dict): # handle anthropic prompts and amazon titan prompts diff --git a/litellm/router.py b/litellm/router.py index e0abc2e3b..0b5846db9 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -2329,7 +2329,7 @@ class Router: ) # cache for 1 hr else: - _api_key = api_key + _api_key = api_key # type: ignore if _api_key is not None and isinstance(_api_key, str): # only show first 5 chars of api_key _api_key = _api_key[:8] + "*" * 15 @@ -2953,7 +2953,7 @@ class Router: ): # check if in allowed_model_region if ( - _is_region_eu(model_region=_litellm_params["region_name"]) + _is_region_eu(litellm_params=LiteLLM_Params(**_litellm_params)) == False ): invalid_model_indices.append(idx) diff --git a/litellm/types/router.py b/litellm/types/router.py index dbf36f17c..e8f3ff641 100644 --- a/litellm/types/router.py +++ b/litellm/types/router.py @@ -132,6 +132,8 @@ class GenericLiteLLMParams(BaseModel): aws_access_key_id: Optional[str] = None aws_secret_access_key: Optional[str] = None aws_region_name: Optional[str] = None + ## IBM WATSONX ## + watsonx_region_name: Optional[str] = None ## CUSTOM PRICING ## input_cost_per_token: Optional[float] = None output_cost_per_token: Optional[float] = None @@ -161,6 +163,8 @@ class GenericLiteLLMParams(BaseModel): aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_region_name: Optional[str] = None, + ## IBM WATSONX ## + watsonx_region_name: Optional[str] = None, input_cost_per_token: Optional[float] = None, output_cost_per_token: Optional[float] = None, input_cost_per_second: Optional[float] = None, diff --git a/litellm/utils.py b/litellm/utils.py index 1c9c3df92..2704ccbcb 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -107,7 +107,18 @@ try: except Exception as e: verbose_logger.debug(f"Exception import enterprise features {str(e)}") -from typing import cast, List, Dict, Union, Optional, Literal, Any, BinaryIO, Iterable +from typing import ( + cast, + List, + Dict, + Union, + Optional, + Literal, + Any, + BinaryIO, + Iterable, + Tuple, +) from .caching import Cache from concurrent.futures import ThreadPoolExecutor @@ -5880,13 +5891,70 @@ def calculate_max_parallel_requests( return None -def _is_region_eu(model_region: str) -> bool: - if model_region == "eu": +def _get_model_region( + custom_llm_provider: str, litellm_params: LiteLLM_Params +) -> Optional[str]: + """ + Return the region for a model, for a given provider + """ + if custom_llm_provider == "vertex_ai": + # check 'vertex_location' + vertex_ai_location = ( + litellm_params.vertex_location + or litellm.vertex_location + or get_secret("VERTEXAI_LOCATION") + or get_secret("VERTEX_LOCATION") + ) + if vertex_ai_location is not None and isinstance(vertex_ai_location, str): + return vertex_ai_location + elif custom_llm_provider == "bedrock": + aws_region_name = litellm_params.aws_region_name + if aws_region_name is not None: + return aws_region_name + elif custom_llm_provider == "watsonx": + watsonx_region_name = litellm_params.watsonx_region_name + if watsonx_region_name is not None: + return watsonx_region_name + return litellm_params.region_name + + +def _is_region_eu(litellm_params: LiteLLM_Params) -> bool: + """ + Return true/false if a deployment is in the EU + """ + if litellm_params.region_name == "eu": return True - EU_Regions = ["europe", "sweden", "switzerland", "france", "uk"] - for region in EU_Regions: - if "europe" in model_region.lower(): + ## ELSE ## + """ + - get provider + - get provider regions + - return true if given region (get_provider_region) in eu region (config.get_eu_regions()) + """ + model, custom_llm_provider, _, _ = litellm.get_llm_provider( + model=litellm_params.model, litellm_params=litellm_params + ) + + model_region = _get_model_region( + custom_llm_provider=custom_llm_provider, litellm_params=litellm_params + ) + + if model_region is None: + return False + + if custom_llm_provider == "azure": + eu_regions = litellm.AzureOpenAIConfig().get_eu_regions() + elif custom_llm_provider == "vertex_ai": + eu_regions = litellm.VertexAIConfig().get_eu_regions() + elif custom_llm_provider == "bedrock": + eu_regions = litellm.AmazonBedrockGlobalConfig().get_eu_regions() + elif custom_llm_provider == "watsonx": + eu_regions = litellm.IBMWatsonXAIConfig().get_eu_regions() + else: + return False + + for region in eu_regions: + if region in model_region.lower(): return True return False @@ -6312,8 +6380,23 @@ def get_llm_provider( custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None, api_key: Optional[str] = None, -): + litellm_params: Optional[LiteLLM_Params] = None, +) -> Tuple[str, str, Optional[str], Optional[str]]: + """ + Returns the provider for a given model name - e.g. 'azure/chatgpt-v-2' -> 'azure' + + For router -> Can also give the whole litellm param dict -> this function will extract the relevant details + """ try: + ## IF LITELLM PARAMS GIVEN ## + if litellm_params is not None: + assert ( + custom_llm_provider is None and api_base is None and api_key is None + ), "Either pass in litellm_params or the custom_llm_provider/api_base/api_key. Otherwise, these values will be overriden." + custom_llm_provider = litellm_params.custom_llm_provider + api_base = litellm_params.api_base + api_key = litellm_params.api_key + dynamic_api_key = None # check if llm provider provided # AZURE AI-Studio Logic - Azure AI Studio supports AZURE/Cohere @@ -6374,7 +6457,8 @@ def get_llm_provider( api_base or get_secret("MISTRAL_AZURE_API_BASE") # for Azure AI Mistral or "https://api.mistral.ai/v1" - ) + ) # type: ignore + # if api_base does not end with /v1 we add it if api_base is not None and not api_base.endswith( "/v1" @@ -6397,10 +6481,30 @@ def get_llm_provider( or get_secret("TOGETHERAI_API_KEY") or get_secret("TOGETHER_AI_TOKEN") ) + if api_base is not None and not isinstance(api_base, str): + raise Exception( + "api base needs to be a string. api_base={}".format(api_base) + ) + if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): + raise Exception( + "dynamic_api_key needs to be a string. dynamic_api_key={}".format( + dynamic_api_key + ) + ) return model, custom_llm_provider, dynamic_api_key, api_base elif model.split("/", 1)[0] in litellm.provider_list: custom_llm_provider = model.split("/", 1)[0] model = model.split("/", 1)[1] + if api_base is not None and not isinstance(api_base, str): + raise Exception( + "api base needs to be a string. api_base={}".format(api_base) + ) + if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): + raise Exception( + "dynamic_api_key needs to be a string. dynamic_api_key={}".format( + dynamic_api_key + ) + ) return model, custom_llm_provider, dynamic_api_key, api_base # check if api base is a known openai compatible endpoint if api_base: @@ -6424,7 +6528,22 @@ def get_llm_provider( elif endpoint == "api.deepseek.com/v1": custom_llm_provider = "deepseek" dynamic_api_key = get_secret("DEEPSEEK_API_KEY") - return model, custom_llm_provider, dynamic_api_key, api_base + + if api_base is not None and not isinstance(api_base, str): + raise Exception( + "api base needs to be a string. api_base={}".format( + api_base + ) + ) + if dynamic_api_key is not None and not isinstance( + dynamic_api_key, str + ): + raise Exception( + "dynamic_api_key needs to be a string. dynamic_api_key={}".format( + dynamic_api_key + ) + ) + return model, custom_llm_provider, dynamic_api_key, api_base # type: ignore # check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.) ## openai - chatcompletion + text completion @@ -6515,6 +6634,16 @@ def get_llm_provider( ), llm_provider="", ) + if api_base is not None and not isinstance(api_base, str): + raise Exception( + "api base needs to be a string. api_base={}".format(api_base) + ) + if dynamic_api_key is not None and not isinstance(dynamic_api_key, str): + raise Exception( + "dynamic_api_key needs to be a string. dynamic_api_key={}".format( + dynamic_api_key + ) + ) return model, custom_llm_provider, dynamic_api_key, api_base except Exception as e: if isinstance(e, litellm.exceptions.BadRequestError): From 69452f003d63758e1590d658bf61423802377f63 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 11:21:53 -0700 Subject: [PATCH 024/184] ui - show token hashes on ui --- ...odel_prices_and_context_window_backup.json | 142 ++++++++++++++++-- .../src/components/view_key_spend_report.tsx | 141 ----------------- .../src/components/view_key_table.tsx | 27 ++-- 3 files changed, 142 insertions(+), 168 deletions(-) delete mode 100644 ui/litellm-dashboard/src/components/view_key_spend_report.tsx diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 10c70a858..1ade08fe3 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1571,6 +1571,135 @@ "litellm_provider": "replicate", "mode": "chat" }, + "openrouter/microsoft/wizardlm-2-8x22b:nitro": { + "max_tokens": 65536, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000001, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/google/gemini-pro-1.5": { + "max_tokens": 8192, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.0000075, + "input_cost_per_image": 0.00265, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true + }, + "openrouter/mistralai/mixtral-8x22b-instruct": { + "max_tokens": 65536, + "input_cost_per_token": 0.00000065, + "output_cost_per_token": 0.00000065, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/cohere/command-r-plus": { + "max_tokens": 128000, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000015, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/databricks/dbrx-instruct": { + "max_tokens": 32768, + "input_cost_per_token": 0.0000006, + "output_cost_per_token": 0.0000006, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/anthropic/claude-3-haiku": { + "max_tokens": 200000, + "input_cost_per_token": 0.00000025, + "output_cost_per_token": 0.00000125, + "input_cost_per_image": 0.0004, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true + }, + "openrouter/anthropic/claude-3-sonnet": { + "max_tokens": 200000, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000015, + "input_cost_per_image": 0.0048, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true + }, + "openrouter/mistralai/mistral-large": { + "max_tokens": 32000, + "input_cost_per_token": 0.000008, + "output_cost_per_token": 0.000024, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/cognitivecomputations/dolphin-mixtral-8x7b": { + "max_tokens": 32769, + "input_cost_per_token": 0.0000005, + "output_cost_per_token": 0.0000005, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/google/gemini-pro-vision": { + "max_tokens": 45875, + "input_cost_per_token": 0.000000125, + "output_cost_per_token": 0.000000375, + "input_cost_per_image": 0.0025, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true + }, + "openrouter/fireworks/firellava-13b": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000002, + "output_cost_per_token": 0.0000002, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/meta-llama/llama-3-8b-instruct:free": { + "max_tokens": 8192, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/meta-llama/llama-3-8b-instruct:extended": { + "max_tokens": 16384, + "input_cost_per_token": 0.000000225, + "output_cost_per_token": 0.00000225, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/meta-llama/llama-3-70b-instruct:nitro": { + "max_tokens": 8192, + "input_cost_per_token": 0.0000009, + "output_cost_per_token": 0.0000009, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/meta-llama/llama-3-70b-instruct": { + "max_tokens": 8192, + "input_cost_per_token": 0.00000059, + "output_cost_per_token": 0.00000079, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/openai/gpt-4-vision-preview": { + "max_tokens": 130000, + "input_cost_per_token": 0.00001, + "output_cost_per_token": 0.00003, + "input_cost_per_image": 0.01445, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true + }, "openrouter/openai/gpt-3.5-turbo": { "max_tokens": 4095, "input_cost_per_token": 0.0000015, @@ -1621,14 +1750,14 @@ "tool_use_system_prompt_tokens": 395 }, "openrouter/google/palm-2-chat-bison": { - "max_tokens": 8000, + "max_tokens": 25804, "input_cost_per_token": 0.0000005, "output_cost_per_token": 0.0000005, "litellm_provider": "openrouter", "mode": "chat" }, "openrouter/google/palm-2-codechat-bison": { - "max_tokens": 8000, + "max_tokens": 20070, "input_cost_per_token": 0.0000005, "output_cost_per_token": 0.0000005, "litellm_provider": "openrouter", @@ -1711,13 +1840,6 @@ "litellm_provider": "openrouter", "mode": "chat" }, - "openrouter/meta-llama/llama-3-70b-instruct": { - "max_tokens": 8192, - "input_cost_per_token": 0.0000008, - "output_cost_per_token": 0.0000008, - "litellm_provider": "openrouter", - "mode": "chat" - }, "j2-ultra": { "max_tokens": 8192, "max_input_tokens": 8192, @@ -3226,4 +3348,4 @@ "mode": "embedding" } -} +} \ No newline at end of file diff --git a/ui/litellm-dashboard/src/components/view_key_spend_report.tsx b/ui/litellm-dashboard/src/components/view_key_spend_report.tsx deleted file mode 100644 index 6741db2a9..000000000 --- a/ui/litellm-dashboard/src/components/view_key_spend_report.tsx +++ /dev/null @@ -1,141 +0,0 @@ -"use client"; - -import React, { useState, useEffect } from "react"; -import { - Button as Button2, - Modal, - Form, - Input, - InputNumber, - Select, - message, -} from "antd"; -import { - Button, - Text, - Card, - Table, - BarChart, - Title, - Subtitle, - BarList, - Metric, -} from "@tremor/react"; -import { keySpendLogsCall, PredictedSpendLogsCall } from "./networking"; - -interface ViewKeySpendReportProps { - token: string; - accessToken: string; - keySpend: number; - keyBudget: number; - keyName: string; -} - -type ResponseValueType = { - startTime: string; // Assuming startTime is a string, adjust it if it's of a different type - spend: number; // Assuming spend is a number, adjust it if it's of a different type - user: string; // Assuming user is a string, adjust it if it's of a different type -}; - -const ViewKeySpendReport: React.FC = ({ - token, - accessToken, - keySpend, - keyBudget, - keyName, -}) => { - const [isModalVisible, setIsModalVisible] = useState(false); - const [data, setData] = useState<{ day: string; spend: number }[] | null>( - null - ); - const [predictedSpendString, setPredictedSpendString] = useState(""); - const [userData, setUserData] = useState< - { name: string; value: number }[] | null - >(null); - - const showModal = () => { - console.log("Show Modal triggered"); - setIsModalVisible(true); - fetchData(); - }; - - const handleOk = () => { - setIsModalVisible(false); - }; - - const handleCancel = () => { - setIsModalVisible(false); - }; - - // call keySpendLogsCall and set the data - const fetchData = async () => { - try { - if (accessToken == null || token == null) { - return; - } - console.log(`accessToken: ${accessToken}; token: ${token}`); - const response = await keySpendLogsCall( - (accessToken = accessToken), - (token = token) - ); - console.log("Response:", response); - setData(response); - - // predict spend based on response - const predictedSpend = await PredictedSpendLogsCall(accessToken, response); - console.log("Response2:", predictedSpend); - - // append predictedSpend to data - const combinedData = [...response, ...predictedSpend.response]; - setData(combinedData); - setPredictedSpendString(predictedSpend.predicted_spend) - - console.log("Combined Data:", combinedData); - // setPredictedSpend(predictedSpend); - - } catch (error) { - console.error("There was an error fetching the data", error); - } - }; - - - if (!token) { - return null; - } - - return ( -
- - - Key Name: {keyName} - - Monthly Spend ${keySpend} - {predictedSpendString} - - - {data && ( - - )} - - - -
- ); -}; - -export default ViewKeySpendReport; diff --git a/ui/litellm-dashboard/src/components/view_key_table.tsx b/ui/litellm-dashboard/src/components/view_key_table.tsx index d812c19a3..bb6ffc420 100644 --- a/ui/litellm-dashboard/src/components/view_key_table.tsx +++ b/ui/litellm-dashboard/src/components/view_key_table.tsx @@ -17,6 +17,7 @@ import { DialogPanel, Text, Title, + Subtitle, Icon, BarChart, } from "@tremor/react"; @@ -32,8 +33,6 @@ import { Select, } from "antd"; -import ViewKeySpendReport from "./view_key_spend_report"; - const { Option } = Select; @@ -570,6 +569,7 @@ const ViewKeyTable: React.FC = ({ icon={InformationCircleIcon} size="sm" /> + = ({ setOpenDialogId(null); setSelectedItem(null); }} + static={true} > @@ -639,22 +640,14 @@ const ViewKeyTable: React.FC = ({ - - {spendData && ( - - )} - - + + Token Name + {selectedItem.key_alias ? selectedItem.key_alias : selectedItem.key_name} + Token ID + {selectedItem.token} Metadata - - {JSON.stringify(selectedItem.metadata)} +
{JSON.stringify(selectedItem.metadata)} 
+
)} -
-
+ + Date: Sat, 11 May 2024 13:08:16 -0700 Subject: [PATCH 028/184] feat - router async batch acompletion --- litellm/router.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/litellm/router.py b/litellm/router.py index f0d94908e..7396dab20 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -605,6 +605,33 @@ class Router: self.fail_calls[model_name] += 1 raise e + async def abatch_completion( + self, models: List[str], messages: List[Dict[str, str]], **kwargs + ): + + async def _async_completion_no_exceptions( + model: str, messages: List[Dict[str, str]], **kwargs + ): + """ + Wrapper around self.async_completion that catches exceptions and returns them as a result + """ + try: + return await self.acompletion(model=model, messages=messages, **kwargs) + except Exception as e: + return e + + _tasks = [] + for model in models: + # add each task but if the task fails + _tasks.append( + _async_completion_no_exceptions( + model=model, messages=messages, **kwargs + ) + ) + + response = await asyncio.gather(*_tasks) + return response + def image_generation(self, prompt: str, model: str, **kwargs): try: kwargs["model"] = model From 6561e0838ea36695847d38f648b81c46d225ce08 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 13:09:17 -0700 Subject: [PATCH 029/184] test - router.batch_acompletion --- litellm/tests/test_router_batch_completion.py | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 litellm/tests/test_router_batch_completion.py diff --git a/litellm/tests/test_router_batch_completion.py b/litellm/tests/test_router_batch_completion.py new file mode 100644 index 000000000..f2873b18d --- /dev/null +++ b/litellm/tests/test_router_batch_completion.py @@ -0,0 +1,60 @@ +#### What this tests #### +# This tests litellm router with batch completion + +import sys, os, time, openai +import traceback, asyncio +import pytest + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import litellm +from litellm import Router +from litellm.router import Deployment, LiteLLM_Params, ModelInfo +from concurrent.futures import ThreadPoolExecutor +from collections import defaultdict +from dotenv import load_dotenv +import os, httpx + +load_dotenv() + + +@pytest.mark.asyncio +async def test_batch_completion_multiple_models(): + litellm.set_verbose = True + + router = litellm.Router( + model_list=[ + { + "model_name": "gpt-3.5-turbo", + "litellm_params": { + "model": "gpt-3.5-turbo", + }, + }, + { + "model_name": "groq-llama", + "litellm_params": { + "model": "groq/llama3-8b-8192", + }, + }, + ] + ) + + response = await router.abatch_completion( + models=["gpt-3.5-turbo", "groq-llama"], + messages=[ + {"role": "user", "content": "is litellm becoming a better product ?"} + ], + max_tokens=15, + ) + + print(response) + assert len(response) == 2 + + models_in_responses = [] + for individual_response in response: + _model = individual_response["model"] + models_in_responses.append(_model) + + # assert both models are different + assert models_in_responses[0] != models_in_responses[1] From b8c7bbcb9f8d10103ebed09f9c73d6d49ca48024 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 13:24:25 -0700 Subject: [PATCH 030/184] support batch /chat/completions on proxy --- litellm/proxy/proxy_server.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 46c132773..0ae498baa 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3639,7 +3639,7 @@ async def chat_completion( ### MODEL ALIAS MAPPING ### # check if model name in model alias map # get the actual model name - if data["model"] in litellm.model_alias_map: + if isinstance(data["model"], str) and data["model"] in litellm.model_alias_map: data["model"] = litellm.model_alias_map[data["model"]] ## LOGGING OBJECT ## - initialize logging object for logging success/failure events for call @@ -3673,6 +3673,9 @@ async def chat_completion( # skip router if user passed their key if "api_key" in data: tasks.append(litellm.acompletion(**data)) + elif isinstance(data["model"], list) and llm_router is not None: + _models = data.pop("model") + tasks.append(llm_router.abatch_completion(models=_models, **data)) elif "user_config" in data: # initialize a new router instance. make request using this Router router_config = data.pop("user_config") From 31cb1be27929eec766447616942b5e7b70f2b61d Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 13:24:59 -0700 Subject: [PATCH 031/184] edit dev config.yaml --- litellm/proxy/proxy_config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index b1cbf2e81..85634c5b8 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -4,6 +4,12 @@ model_list: model: openai/fake api_key: fake-key api_base: https://exampleopenaiendpoint-production.up.railway.app/ + - model_name: llama3 + litellm_params: + model: groq/llama3-8b-8192 + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo - model_name: "*" litellm_params: model: openai/* From e1f94fcbbb3a51107fa14ef09d69132eff92e124 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 13:32:30 -0700 Subject: [PATCH 032/184] test batch completions on litellm proxy --- tests/test_openai_endpoints.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/tests/test_openai_endpoints.py b/tests/test_openai_endpoints.py index 38e87c254..7bc97ca59 100644 --- a/tests/test_openai_endpoints.py +++ b/tests/test_openai_endpoints.py @@ -4,6 +4,7 @@ import pytest import asyncio import aiohttp, openai from openai import OpenAI, AsyncOpenAI +from typing import Optional, List, Union def response_header_check(response): @@ -71,7 +72,7 @@ async def new_user(session): return await response.json() -async def chat_completion(session, key, model="gpt-4"): +async def chat_completion(session, key, model: Union[str, List] = "gpt-4"): url = "http://0.0.0.0:4000/chat/completions" headers = { "Authorization": f"Bearer {key}", @@ -409,3 +410,27 @@ async def test_openai_wildcard_chat_completion(): # call chat/completions with a model that the key was not created for + the model is not on the config.yaml await chat_completion(session=session, key=key, model="gpt-3.5-turbo-0125") + + +@pytest.mark.asyncio +async def test_batch_chat_completions(): + """ + - Make chat completion call using + + """ + async with aiohttp.ClientSession() as session: + + # call chat/completions with a model that the key was not created for + the model is not on the config.yaml + response = await chat_completion( + session=session, + key="sk-1234", + model=[ + "gpt-3.5-turbo", + "fake-openai-endpoint", + ], + ) + + print(f"response: {response}") + + assert len(response) == 2 + assert isinstance(response, list) From 5918ee543bd9d3523bfc4fdb6c6ceff4384f8a3e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 13:42:41 -0700 Subject: [PATCH 033/184] docs - batch completion litellm proxy --- docs/my-website/docs/proxy/user_keys.md | 84 +++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/docs/my-website/docs/proxy/user_keys.md b/docs/my-website/docs/proxy/user_keys.md index fa78b37c1..7aba832eb 100644 --- a/docs/my-website/docs/proxy/user_keys.md +++ b/docs/my-website/docs/proxy/user_keys.md @@ -365,6 +365,90 @@ curl --location 'http://0.0.0.0:4000/moderations' \ ## Advanced +### (BETA) Batch Completions - pass `model` as List + +Use this when you want to send 1 request to N Models + +#### Expected Request Format + +This same request will be sent to the following model groups on the [litellm proxy config.yaml](https://docs.litellm.ai/docs/proxy/configs) +- `model_name="llama3"` +- `model_name="gpt-3.5-turbo"` + +```shell +curl --location 'http://localhost:4000/chat/completions' \ + --header 'Authorization: Bearer sk-1234' \ + --header 'Content-Type: application/json' \ + --data '{ + "model": ["llama3", "gpt-3.5-turbo"], + "max_tokens": 10, + "user": "litellm2", + "messages": [ + { + "role": "user", + "content": "is litellm getting better" + } + ] +}' +``` + + +#### Expected Response Format + +Get a list of responses when `model` is passed as a list + +```json +[ + { + "id": "chatcmpl-3dbd5dd8-7c82-4ca3-bf1f-7c26f497cf2b", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "message": { + "content": "The Elder Scrolls IV: Oblivion!\n\nReleased", + "role": "assistant" + } + } + ], + "created": 1715459876, + "model": "groq/llama3-8b-8192", + "object": "chat.completion", + "system_fingerprint": "fp_179b0f92c9", + "usage": { + "completion_tokens": 10, + "prompt_tokens": 12, + "total_tokens": 22 + } + }, + { + "id": "chatcmpl-9NnldUfFLmVquFHSX4yAtjCw8PGei", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "message": { + "content": "TES4 could refer to The Elder Scrolls IV:", + "role": "assistant" + } + } + ], + "created": 1715459877, + "model": "gpt-3.5-turbo-0125", + "object": "chat.completion", + "system_fingerprint": null, + "usage": { + "completion_tokens": 10, + "prompt_tokens": 9, + "total_tokens": 19 + } + } +] +``` + + + + ### Pass User LLM API Keys, Fallbacks Allow your end-users to pass their model list, api base, OpenAI API key (any LiteLLM supported provider) to make requests From 4a3b08496129841597b1188820a3c45a01ee9abf Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 11 May 2024 13:43:08 -0700 Subject: [PATCH 034/184] feat(bedrock_httpx.py): moves to using httpx client for bedrock cohere calls --- .../generic_api_callback.py | 3 - litellm/integrations/aispend.py | 2 - litellm/integrations/berrispend.py | 1 - litellm/integrations/clickhouse.py | 4 - litellm/integrations/custom_logger.py | 2 - litellm/integrations/datadog.py | 2 - litellm/integrations/dynamodb.py | 2 - litellm/integrations/helicone.py | 2 - litellm/integrations/langfuse.py | 4 +- litellm/integrations/langsmith.py | 2 - litellm/integrations/lunary.py | 9 +- litellm/integrations/openmeter.py | 2 - litellm/integrations/prometheus.py | 2 - litellm/integrations/prometheus_services.py | 2 - litellm/integrations/prompt_layer.py | 2 - litellm/integrations/s3.py | 4 +- litellm/integrations/slack_alerting.py | 2 - litellm/integrations/supabase.py | 2 - litellm/integrations/weights_biases.py | 11 +- litellm/llms/bedrock_httpx.py | 124 ++++++++++++++++++ litellm/main.py | 3 +- .../proxy/example_config_yaml/custom_auth.py | 3 - litellm/router_strategy/least_busy.py | 2 - litellm/router_strategy/lowest_cost.py | 3 +- litellm/router_strategy/lowest_latency.py | 2 - litellm/router_strategy/lowest_tpm_rpm.py | 2 - litellm/router_strategy/lowest_tpm_rpm_v2.py | 2 - litellm/tests/test_completion.py | 9 ++ litellm/utils.py | 1 - 29 files changed, 147 insertions(+), 64 deletions(-) create mode 100644 litellm/llms/bedrock_httpx.py diff --git a/enterprise/enterprise_callbacks/generic_api_callback.py b/enterprise/enterprise_callbacks/generic_api_callback.py index 076c13d5e..cf1d22e8f 100644 --- a/enterprise/enterprise_callbacks/generic_api_callback.py +++ b/enterprise/enterprise_callbacks/generic_api_callback.py @@ -10,7 +10,6 @@ from litellm.caching import DualCache from typing import Literal, Union -dotenv.load_dotenv() # Loading env variables using dotenv import traceback @@ -19,8 +18,6 @@ import traceback import dotenv, os import requests - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/aispend.py b/litellm/integrations/aispend.py index a893f8923..2fe8ea0df 100644 --- a/litellm/integrations/aispend.py +++ b/litellm/integrations/aispend.py @@ -1,8 +1,6 @@ #### What this does #### # On success + failure, log events to aispend.io import dotenv, os - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime diff --git a/litellm/integrations/berrispend.py b/litellm/integrations/berrispend.py index 1f0ae4581..7d30b706c 100644 --- a/litellm/integrations/berrispend.py +++ b/litellm/integrations/berrispend.py @@ -3,7 +3,6 @@ import dotenv, os import requests # type: ignore -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime diff --git a/litellm/integrations/clickhouse.py b/litellm/integrations/clickhouse.py index 7d1fb37d9..0c38b8626 100644 --- a/litellm/integrations/clickhouse.py +++ b/litellm/integrations/clickhouse.py @@ -8,8 +8,6 @@ from litellm.proxy._types import UserAPIKeyAuth from litellm.caching import DualCache from typing import Literal, Union - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback @@ -18,8 +16,6 @@ import traceback import dotenv, os import requests - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/custom_logger.py b/litellm/integrations/custom_logger.py index 8a3e0f467..d50882592 100644 --- a/litellm/integrations/custom_logger.py +++ b/litellm/integrations/custom_logger.py @@ -6,8 +6,6 @@ from litellm.proxy._types import UserAPIKeyAuth from litellm.caching import DualCache from typing import Literal, Union, Optional - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback diff --git a/litellm/integrations/datadog.py b/litellm/integrations/datadog.py index d969341fc..6d5e08faf 100644 --- a/litellm/integrations/datadog.py +++ b/litellm/integrations/datadog.py @@ -3,8 +3,6 @@ import dotenv, os import requests # type: ignore - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/dynamodb.py b/litellm/integrations/dynamodb.py index b5462ee7f..21ccabe4b 100644 --- a/litellm/integrations/dynamodb.py +++ b/litellm/integrations/dynamodb.py @@ -3,8 +3,6 @@ import dotenv, os import requests # type: ignore - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/helicone.py b/litellm/integrations/helicone.py index c8c107541..85e73258e 100644 --- a/litellm/integrations/helicone.py +++ b/litellm/integrations/helicone.py @@ -3,8 +3,6 @@ import dotenv, os import requests # type: ignore import litellm - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 1e957dfcf..f27d19968 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -1,8 +1,6 @@ #### What this does #### # On success, logs events to Langfuse -import dotenv, os - -dotenv.load_dotenv() # Loading env variables using dotenv +import os import copy import traceback from packaging.version import Version diff --git a/litellm/integrations/langsmith.py b/litellm/integrations/langsmith.py index 8a0fb3852..92e440215 100644 --- a/litellm/integrations/langsmith.py +++ b/litellm/integrations/langsmith.py @@ -3,8 +3,6 @@ import dotenv, os # type: ignore import requests # type: ignore from datetime import datetime - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import asyncio import types diff --git a/litellm/integrations/lunary.py b/litellm/integrations/lunary.py index 6ddf2ca59..52316f315 100644 --- a/litellm/integrations/lunary.py +++ b/litellm/integrations/lunary.py @@ -2,14 +2,11 @@ # On success + failure, log events to lunary.ai from datetime import datetime, timezone import traceback -import dotenv import importlib import sys import packaging -dotenv.load_dotenv() - # convert to {completion: xx, tokens: xx} def parse_usage(usage): @@ -62,14 +59,16 @@ class LunaryLogger: version = importlib.metadata.version("lunary") # if version < 0.1.43 then raise ImportError if packaging.version.Version(version) < packaging.version.Version("0.1.43"): - print( + print( # noqa "Lunary version outdated. Required: >= 0.1.43. Upgrade via 'pip install lunary --upgrade'" ) raise ImportError self.lunary_client = lunary except ImportError: - print("Lunary not installed. Please install it using 'pip install lunary'") + print( # noqa + "Lunary not installed. Please install it using 'pip install lunary'" + ) # noqa raise ImportError def log_event( diff --git a/litellm/integrations/openmeter.py b/litellm/integrations/openmeter.py index a454739d5..2c470d6f4 100644 --- a/litellm/integrations/openmeter.py +++ b/litellm/integrations/openmeter.py @@ -3,8 +3,6 @@ import dotenv, os, json import litellm - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback from litellm.integrations.custom_logger import CustomLogger from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler diff --git a/litellm/integrations/prometheus.py b/litellm/integrations/prometheus.py index 577946ce1..6fbc6ca4c 100644 --- a/litellm/integrations/prometheus.py +++ b/litellm/integrations/prometheus.py @@ -4,8 +4,6 @@ import dotenv, os import requests # type: ignore - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/prometheus_services.py b/litellm/integrations/prometheus_services.py index d276bb85b..8fce8930d 100644 --- a/litellm/integrations/prometheus_services.py +++ b/litellm/integrations/prometheus_services.py @@ -5,8 +5,6 @@ import dotenv, os import requests # type: ignore - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/prompt_layer.py b/litellm/integrations/prompt_layer.py index ce610e1ef..531ed75fe 100644 --- a/litellm/integrations/prompt_layer.py +++ b/litellm/integrations/prompt_layer.py @@ -3,8 +3,6 @@ import dotenv, os import requests # type: ignore from pydantic import BaseModel - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback diff --git a/litellm/integrations/s3.py b/litellm/integrations/s3.py index d31b15840..d131e44f0 100644 --- a/litellm/integrations/s3.py +++ b/litellm/integrations/s3.py @@ -1,9 +1,7 @@ #### What this does #### # On success + failure, log events to Supabase -import dotenv, os - -dotenv.load_dotenv() # Loading env variables using dotenv +import os import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/slack_alerting.py b/litellm/integrations/slack_alerting.py index 07c3585f0..d03922bc1 100644 --- a/litellm/integrations/slack_alerting.py +++ b/litellm/integrations/slack_alerting.py @@ -2,8 +2,6 @@ # Class for sending Slack Alerts # import dotenv, os from litellm.proxy._types import UserAPIKeyAuth - -dotenv.load_dotenv() # Loading env variables using dotenv from litellm._logging import verbose_logger, verbose_proxy_logger import litellm, threading from typing import List, Literal, Any, Union, Optional, Dict diff --git a/litellm/integrations/supabase.py b/litellm/integrations/supabase.py index 58beba8a3..4e6bf517f 100644 --- a/litellm/integrations/supabase.py +++ b/litellm/integrations/supabase.py @@ -3,8 +3,6 @@ import dotenv, os import requests # type: ignore - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm diff --git a/litellm/integrations/weights_biases.py b/litellm/integrations/weights_biases.py index 53e6070a5..a56233b22 100644 --- a/litellm/integrations/weights_biases.py +++ b/litellm/integrations/weights_biases.py @@ -21,11 +21,11 @@ try: # contains a (known) object attribute object: Literal["chat.completion", "edit", "text_completion"] - def __getitem__(self, key: K) -> V: - ... # pragma: no cover + def __getitem__(self, key: K) -> V: ... # noqa - def get(self, key: K, default: Optional[V] = None) -> Optional[V]: - ... # pragma: no cover + def get( # noqa + self, key: K, default: Optional[V] = None + ) -> Optional[V]: ... # pragma: no cover class OpenAIRequestResponseResolver: def __call__( @@ -173,12 +173,11 @@ except: #### What this does #### # On success, logs events to Langfuse -import dotenv, os +import os import requests import requests from datetime import datetime -dotenv.load_dotenv() # Loading env variables using dotenv import traceback diff --git a/litellm/llms/bedrock_httpx.py b/litellm/llms/bedrock_httpx.py new file mode 100644 index 000000000..c6b0327e6 --- /dev/null +++ b/litellm/llms/bedrock_httpx.py @@ -0,0 +1,124 @@ +# What is this? +## Initial implementation of calling bedrock via httpx client (allows for async calls). +## V0 - just covers cohere command-r support + +import os, types +import json +from enum import Enum +import requests, copy # type: ignore +import time +from typing import Callable, Optional, List, Literal, Union +from litellm.utils import ( + ModelResponse, + Usage, + map_finish_reason, + CustomStreamWrapper, + Message, + Choices, + get_secret, +) +import litellm +from .prompt_templates.factory import prompt_factory, custom_prompt +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler +from .base import BaseLLM +import httpx # type: ignore +from .bedrock import BedrockError + + +class BedrockLLM(BaseLLM): + """ + Example call + + ``` + curl --location --request POST 'https://bedrock-runtime.{aws_region_name}.amazonaws.com/model/{bedrock_model_name}/invoke' \ + --header 'Content-Type: application/json' \ + --header 'Accept: application/json' \ + --user "$AWS_ACCESS_KEY_ID":"$AWS_SECRET_ACCESS_KEY" \ + --aws-sigv4 "aws:amz:us-east-1:bedrock" \ + --data-raw '{ + "prompt": "Hi", + "temperature": 0, + "p": 0.9, + "max_tokens": 4096 + }' + ``` + """ + + def __init__(self) -> None: + super().__init__() + + def get_credentials( + self, + aws_access_key_id: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_region_name: Optional[str] = None, + aws_session_name: Optional[str] = None, + aws_profile_name: Optional[str] = None, + aws_role_name: Optional[str] = None, + ): + """ + Return a boto3.Credentials object + """ + import boto3 + + ## CHECK IS 'os.environ/' passed in + params_to_check: List[Optional[str]] = [ + aws_access_key_id, + aws_secret_access_key, + aws_region_name, + aws_session_name, + aws_profile_name, + aws_role_name, + ] + + # Iterate over parameters and update if needed + for i, param in enumerate(params_to_check): + if param and param.startswith("os.environ/"): + _v = get_secret(param) + if _v is not None and isinstance(_v, str): + params_to_check[i] = _v + # Assign updated values back to parameters + ( + aws_access_key_id, + aws_secret_access_key, + aws_region_name, + aws_session_name, + aws_profile_name, + aws_role_name, + ) = params_to_check + + ### CHECK STS ### + if aws_role_name is not None and aws_session_name is not None: + sts_client = boto3.client( + "sts", + aws_access_key_id=aws_access_key_id, # [OPTIONAL] + aws_secret_access_key=aws_secret_access_key, # [OPTIONAL] + ) + + sts_response = sts_client.assume_role( + RoleArn=aws_role_name, RoleSessionName=aws_session_name + ) + + return sts_response["Credentials"] + elif aws_profile_name is not None: ### CHECK SESSION ### + # uses auth values from AWS profile usually stored in ~/.aws/credentials + client = boto3.Session(profile_name=aws_profile_name) + + return client.get_credentials() + else: + session = boto3.Session( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + region_name=aws_region_name, + ) + + return session.get_credentials() + + def completion(self, *args, **kwargs) -> Union[ModelResponse, CustomStreamWrapper]: + ## get credentials + ## generate signature + ## make request + return super().completion(*args, **kwargs) + + def embedding(self, *args, **kwargs): + return super().embedding(*args, **kwargs) diff --git a/litellm/main.py b/litellm/main.py index 9afdc7da2..8be71de0b 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -75,6 +75,7 @@ from .llms.anthropic import AnthropicChatCompletion from .llms.anthropic_text import AnthropicTextCompletion from .llms.huggingface_restapi import Huggingface from .llms.predibase import PredibaseChatCompletion +from .llms.bedrock_httpx import BedrockLLM from .llms.triton import TritonChatCompletion from .llms.prompt_templates.factory import ( prompt_factory, @@ -104,7 +105,6 @@ from litellm.utils import ( ) ####### ENVIRONMENT VARIABLES ################### -dotenv.load_dotenv() # Loading env variables using dotenv openai_chat_completions = OpenAIChatCompletion() openai_text_completions = OpenAITextCompletion() anthropic_chat_completions = AnthropicChatCompletion() @@ -114,6 +114,7 @@ azure_text_completions = AzureTextCompletion() huggingface = Huggingface() predibase_chat_completions = PredibaseChatCompletion() triton_chat_completions = TritonChatCompletion() +bedrock_chat_completion = BedrockLLM() ####### COMPLETION ENDPOINTS ################ diff --git a/litellm/proxy/example_config_yaml/custom_auth.py b/litellm/proxy/example_config_yaml/custom_auth.py index a764a647a..6cecf466c 100644 --- a/litellm/proxy/example_config_yaml/custom_auth.py +++ b/litellm/proxy/example_config_yaml/custom_auth.py @@ -1,10 +1,7 @@ from litellm.proxy._types import UserAPIKeyAuth, GenerateKeyRequest from fastapi import Request -from dotenv import load_dotenv import os -load_dotenv() - async def user_api_key_auth(request: Request, api_key: str) -> UserAPIKeyAuth: try: diff --git a/litellm/router_strategy/least_busy.py b/litellm/router_strategy/least_busy.py index 54d44b41d..417651fb3 100644 --- a/litellm/router_strategy/least_busy.py +++ b/litellm/router_strategy/least_busy.py @@ -8,8 +8,6 @@ import dotenv, os, requests, random # type: ignore from typing import Optional - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback from litellm.caching import DualCache from litellm.integrations.custom_logger import CustomLogger diff --git a/litellm/router_strategy/lowest_cost.py b/litellm/router_strategy/lowest_cost.py index 279af2ae9..fde7781b9 100644 --- a/litellm/router_strategy/lowest_cost.py +++ b/litellm/router_strategy/lowest_cost.py @@ -1,12 +1,11 @@ #### What this does #### # picks based on response time (for streaming, this is time to first token) from pydantic import BaseModel, Extra, Field, root_validator -import dotenv, os, requests, random # type: ignore +import os, requests, random # type: ignore from typing import Optional, Union, List, Dict from datetime import datetime, timedelta import random -dotenv.load_dotenv() # Loading env variables using dotenv import traceback from litellm.caching import DualCache from litellm.integrations.custom_logger import CustomLogger diff --git a/litellm/router_strategy/lowest_latency.py b/litellm/router_strategy/lowest_latency.py index afdfc1779..a7b93d344 100644 --- a/litellm/router_strategy/lowest_latency.py +++ b/litellm/router_strategy/lowest_latency.py @@ -5,8 +5,6 @@ import dotenv, os, requests, random # type: ignore from typing import Optional, Union, List, Dict from datetime import datetime, timedelta import random - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback from litellm.caching import DualCache from litellm.integrations.custom_logger import CustomLogger diff --git a/litellm/router_strategy/lowest_tpm_rpm.py b/litellm/router_strategy/lowest_tpm_rpm.py index 0a7773a84..625db7048 100644 --- a/litellm/router_strategy/lowest_tpm_rpm.py +++ b/litellm/router_strategy/lowest_tpm_rpm.py @@ -4,8 +4,6 @@ import dotenv, os, requests, random from typing import Optional, Union, List, Dict from datetime import datetime - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback from litellm import token_counter from litellm.caching import DualCache diff --git a/litellm/router_strategy/lowest_tpm_rpm_v2.py b/litellm/router_strategy/lowest_tpm_rpm_v2.py index f7a55d970..23e55f4a3 100644 --- a/litellm/router_strategy/lowest_tpm_rpm_v2.py +++ b/litellm/router_strategy/lowest_tpm_rpm_v2.py @@ -5,8 +5,6 @@ import dotenv, os, requests, random from typing import Optional, Union, List, Dict import datetime as datetime_og from datetime import datetime - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback, asyncio, httpx import litellm from litellm import token_counter diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 04f4cc511..214dc105b 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -2584,6 +2584,15 @@ def test_completion_chat_sagemaker_mistral(): # test_completion_chat_sagemaker_mistral() +def test_completion_bedrock_command_r(): + response = completion( + model="bedrock/cohere.command-r-plus-v1:0", + messages=[{"role": "user", "content": "Hey! how's it going?"}], + ) + + print(f"response: {response}") + + def test_completion_bedrock_titan_null_response(): try: response = completion( diff --git a/litellm/utils.py b/litellm/utils.py index 9218f92a3..0fd7963ae 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -117,7 +117,6 @@ MAX_THREADS = 100 # Create a ThreadPoolExecutor executor = ThreadPoolExecutor(max_workers=MAX_THREADS) -dotenv.load_dotenv() # Loading env variables using dotenv sentry_sdk_instance = None capture_exception = None add_breadcrumb = None From 62276fc22102aafbc9ea91d0f978a39b252ce75a Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 13:45:32 -0700 Subject: [PATCH 035/184] docs link to litellm batch completions --- docs/my-website/docs/completion/batching.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/my-website/docs/completion/batching.md b/docs/my-website/docs/completion/batching.md index 05683b3dd..09f59f743 100644 --- a/docs/my-website/docs/completion/batching.md +++ b/docs/my-website/docs/completion/batching.md @@ -4,6 +4,12 @@ LiteLLM allows you to: * Send 1 completion call to many models: Return Fastest Response * Send 1 completion call to many models: Return All Responses +:::info + +Trying to do batch completion on LiteLLM Proxy ? Go here: https://docs.litellm.ai/docs/proxy/user_keys#beta-batch-completions---pass-model-as-list + +::: + ## Send multiple completion calls to 1 model In the batch_completion method, you provide a list of `messages` where each sub-list of messages is passed to `litellm.completion()`, allowing you to process multiple prompts efficiently in a single API call. From 038522ab24d92e0644834dcc2ee0b9d7d5899417 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 14:02:16 -0700 Subject: [PATCH 036/184] fix - support debugging litellm params --- litellm/integrations/langfuse.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index bc1d50190..b9c8fafc9 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -318,11 +318,12 @@ class LangFuseLogger: else: clean_metadata[key] = value - session_id = clean_metadata.get("session_id", None) - trace_name = clean_metadata.get("trace_name", None) - trace_id = clean_metadata.get("trace_id", None) - existing_trace_id = clean_metadata.get("existing_trace_id", None) - update_trace_keys = clean_metadata.get("update_trace_keys", []) + session_id = clean_metadata.pop("session_id", None) + trace_name = clean_metadata.pop("trace_name", None) + trace_id = clean_metadata.pop("trace_id", None) + existing_trace_id = clean_metadata.pop("existing_trace_id", None) + update_trace_keys = clean_metadata.pop("update_trace_keys", []) + debug = clean_metadata.pop("debug", None) if trace_name is None and existing_trace_id is None: # just log `litellm-{call_type}` as the trace name @@ -342,10 +343,6 @@ class LangFuseLogger: if updated_trace_value is not None: trace_params[trace_param_key] = updated_trace_value - if "metadata" in update_trace_keys: - # log metadata in the trace - trace_params["metadata"] = clean_metadata - # Pop the trace specific keys that would have been popped if there were a new trace for key in list( filter(lambda key: key.startswith("trace_"), clean_metadata.keys()) @@ -367,7 +364,6 @@ class LangFuseLogger: "trace_version", clean_metadata.get("version", None) ), # If provided just version, it will applied to the trace as well, if applied a trace version it will take precedence "user_id": user_id, - "metadata": clean_metadata, } for key in list( filter(lambda key: key.startswith("trace_"), clean_metadata.keys()) @@ -381,6 +377,13 @@ class LangFuseLogger: else: trace_params["output"] = output + if debug == True: + if "metadata" in trace_params: + # log the raw_metadata in the trace + trace_params["metadata"]["metadata_passed_to_litellm"] = metadata + else: + trace_params["metadata"] = {"metadata_passed_to_litellm": metadata} + cost = kwargs.get("response_cost", None) print_verbose(f"trace: {cost}") From 97c81a5c7ed8b25a22c09d1f87177cf40394ae27 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 14:03:40 -0700 Subject: [PATCH 037/184] fix langfuse test --- litellm/tests/test_alangfuse.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/litellm/tests/test_alangfuse.py b/litellm/tests/test_alangfuse.py index 6f9b8d626..4eeba0721 100644 --- a/litellm/tests/test_alangfuse.py +++ b/litellm/tests/test_alangfuse.py @@ -247,7 +247,9 @@ async def test_langfuse_logging_metadata(langfuse_client): expected_filtered_metadata_keys = { "trace_name", "trace_id", + "existing_trace_id", "trace_user_id", + "session_id", "tags", "generation_name", "generation_id", From 1bf8e7ac75f0d44e911801f7ce23c9944f1d2f79 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 14:08:39 -0700 Subject: [PATCH 038/184] fix langfuse debug mode --- litellm/integrations/langfuse.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index b9c8fafc9..90c274a90 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -377,7 +377,7 @@ class LangFuseLogger: else: trace_params["output"] = output - if debug == True: + if debug == True or (isinstance(debug, str) and debug.lower() == "true"): if "metadata" in trace_params: # log the raw_metadata in the trace trace_params["metadata"]["metadata_passed_to_litellm"] = metadata From 360d2840583371b27b03f038d401a2d86db8276f Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 14:12:17 -0700 Subject: [PATCH 039/184] docs - debug langfuse --- docs/my-website/docs/observability/langfuse_integration.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/my-website/docs/observability/langfuse_integration.md b/docs/my-website/docs/observability/langfuse_integration.md index ebf20b633..7ba204497 100644 --- a/docs/my-website/docs/observability/langfuse_integration.md +++ b/docs/my-website/docs/observability/langfuse_integration.md @@ -136,6 +136,7 @@ response = completion( "existing_trace_id": "trace-id22", "trace_metadata": {"key": "updated_trace_value"}, # The new value to use for the langfuse Trace Metadata "update_trace_keys": ["input", "output", "trace_metadata"], # Updates the trace input & output to be this generations input & output also updates the Trace Metadata to match the passed in value + "debug_langfuse": True, # Will log the exact metadata sent to litellm for the trace/generation as `metadata_passed_to_litellm` }, ) From a41bef52971528b1b740f9ccd5e7c3dacb132ea2 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 14:12:26 -0700 Subject: [PATCH 040/184] debug langfuse --- litellm/integrations/langfuse.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 90c274a90..5cdf83a7c 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -323,7 +323,7 @@ class LangFuseLogger: trace_id = clean_metadata.pop("trace_id", None) existing_trace_id = clean_metadata.pop("existing_trace_id", None) update_trace_keys = clean_metadata.pop("update_trace_keys", []) - debug = clean_metadata.pop("debug", None) + debug = clean_metadata.pop("debug_langfuse", None) if trace_name is None and existing_trace_id is None: # just log `litellm-{call_type}` as the trace name From d4288b134ba32c2bc8a1085bdfa5f55b6825797b Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 14:24:48 -0700 Subject: [PATCH 041/184] fix - use csv list for batch completions --- litellm/proxy/proxy_server.py | 5 +++-- tests/test_openai_endpoints.py | 5 +---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 0ae498baa..210912fcf 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3673,8 +3673,9 @@ async def chat_completion( # skip router if user passed their key if "api_key" in data: tasks.append(litellm.acompletion(**data)) - elif isinstance(data["model"], list) and llm_router is not None: - _models = data.pop("model") + elif "," in data["model"] and llm_router is not None: + _models_csv_string = data.pop("model") + _models = _models_csv_string.split(",") tasks.append(llm_router.abatch_completion(models=_models, **data)) elif "user_config" in data: # initialize a new router instance. make request using this Router diff --git a/tests/test_openai_endpoints.py b/tests/test_openai_endpoints.py index 7bc97ca59..43dcae3cd 100644 --- a/tests/test_openai_endpoints.py +++ b/tests/test_openai_endpoints.py @@ -424,10 +424,7 @@ async def test_batch_chat_completions(): response = await chat_completion( session=session, key="sk-1234", - model=[ - "gpt-3.5-turbo", - "fake-openai-endpoint", - ], + model="gpt-3.5-turbo,fake-openai-endpoint", ) print(f"response: {response}") From bf2194d7fc5af49acc24f0e505bd14322f92fe3e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 14:27:20 -0700 Subject: [PATCH 042/184] feat - support model as csv on proxy --- docs/my-website/docs/proxy/user_keys.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/my-website/docs/proxy/user_keys.md b/docs/my-website/docs/proxy/user_keys.md index 7aba832eb..d86feb29c 100644 --- a/docs/my-website/docs/proxy/user_keys.md +++ b/docs/my-website/docs/proxy/user_keys.md @@ -365,12 +365,14 @@ curl --location 'http://0.0.0.0:4000/moderations' \ ## Advanced -### (BETA) Batch Completions - pass `model` as List +### (BETA) Batch Completions - pass multiple models Use this when you want to send 1 request to N Models #### Expected Request Format +Pass model as a string of comma separated value of models. Example `"model"="llama3,gpt-3.5-turbo"` + This same request will be sent to the following model groups on the [litellm proxy config.yaml](https://docs.litellm.ai/docs/proxy/configs) - `model_name="llama3"` - `model_name="gpt-3.5-turbo"` @@ -380,7 +382,7 @@ curl --location 'http://localhost:4000/chat/completions' \ --header 'Authorization: Bearer sk-1234' \ --header 'Content-Type: application/json' \ --data '{ - "model": ["llama3", "gpt-3.5-turbo"], + "model": "llama3,gpt-3.5-turbo", "max_tokens": 10, "user": "litellm2", "messages": [ From 25febe41c470b32b1dddd57ea64b199bdd18f9a6 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 14:37:32 -0700 Subject: [PATCH 043/184] docs - using batch completions with python --- docs/my-website/docs/proxy/user_keys.md | 96 +++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/docs/my-website/docs/proxy/user_keys.md b/docs/my-website/docs/proxy/user_keys.md index d86feb29c..cda3a46af 100644 --- a/docs/my-website/docs/proxy/user_keys.md +++ b/docs/my-website/docs/proxy/user_keys.md @@ -377,6 +377,95 @@ This same request will be sent to the following model groups on the [litellm pro - `model_name="llama3"` - `model_name="gpt-3.5-turbo"` + + + + + +```python +import openai + +client = openai.OpenAI(api_key="sk-1234", base_url="http://0.0.0.0:4000") + +response = client.chat.completions.create( + model="gpt-3.5-turbo,llama3", + messages=[ + {"role": "user", "content": "this is a test request, write a short poem"} + ], +) + +print(response) +``` + + + +#### Expected Response Format + +Get a list of responses when `model` is passed as a list + +```python +[ + ChatCompletion( + id='chatcmpl-9NoYhS2G0fswot0b6QpoQgmRQMaIf', + choices=[ + Choice( + finish_reason='stop', + index=0, + logprobs=None, + message=ChatCompletionMessage( + content='In the depths of my soul, a spark ignites\nA light that shines so pure and bright\nIt dances and leaps, refusing to die\nA flame of hope that reaches the sky\n\nIt warms my heart and fills me with bliss\nA reminder that in darkness, there is light to kiss\nSo I hold onto this fire, this guiding light\nAnd let it lead me through the darkest night.', + role='assistant', + function_call=None, + tool_calls=None + ) + ) + ], + created=1715462919, + model='gpt-3.5-turbo-0125', + object='chat.completion', + system_fingerprint=None, + usage=CompletionUsage( + completion_tokens=83, + prompt_tokens=17, + total_tokens=100 + ) + ), + ChatCompletion( + id='chatcmpl-4ac3e982-da4e-486d-bddb-ed1d5cb9c03c', + choices=[ + Choice( + finish_reason='stop', + index=0, + logprobs=None, + message=ChatCompletionMessage( + content="A test request, and I'm delighted!\nHere's a short poem, just for you:\n\nMoonbeams dance upon the sea,\nA path of light, for you to see.\nThe stars up high, a twinkling show,\nA night of wonder, for all to know.\n\nThe world is quiet, save the night,\nA peaceful hush, a gentle light.\nThe world is full, of beauty rare,\nA treasure trove, beyond compare.\n\nI hope you enjoyed this little test,\nA poem born, of whimsy and jest.\nLet me know, if there's anything else!", + role='assistant', + function_call=None, + tool_calls=None + ) + ) + ], + created=1715462919, + model='groq/llama3-8b-8192', + object='chat.completion', + system_fingerprint='fp_a2c8d063cb', + usage=CompletionUsage( + completion_tokens=120, + prompt_tokens=20, + total_tokens=140 + ) + ) +] +``` + + + + + + + + + ```shell curl --location 'http://localhost:4000/chat/completions' \ --header 'Authorization: Bearer sk-1234' \ @@ -395,6 +484,8 @@ curl --location 'http://localhost:4000/chat/completions' \ ``` + + #### Expected Response Format Get a list of responses when `model` is passed as a list @@ -449,6 +540,11 @@ Get a list of responses when `model` is passed as a list ``` + + + + + ### Pass User LLM API Keys, Fallbacks From 59c8c0adff167d373643e3872f2b4e5f5142fa81 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 11 May 2024 15:04:38 -0700 Subject: [PATCH 044/184] feat(bedrock_httpx.py): working cohere command r async calls --- litellm/__init__.py | 1 + litellm/llms/bedrock_httpx.py | 316 +++++++++++++++++++++- litellm/llms/custom_httpx/http_handler.py | 27 +- litellm/main.py | 45 ++- litellm/tests/test_completion.py | 1 + litellm/types/llms/bedrock.py | 6 + 6 files changed, 364 insertions(+), 32 deletions(-) create mode 100644 litellm/types/llms/bedrock.py diff --git a/litellm/__init__.py b/litellm/__init__.py index aedf42139..67170c68d 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -670,6 +670,7 @@ from .llms.sagemaker import SagemakerConfig from .llms.ollama import OllamaConfig from .llms.ollama_chat import OllamaChatConfig from .llms.maritalk import MaritTalkConfig +from .llms.bedrock_httpx import AmazonCohereChatConfig from .llms.bedrock import ( AmazonTitanConfig, AmazonAI21Config, diff --git a/litellm/llms/bedrock_httpx.py b/litellm/llms/bedrock_httpx.py index c6b0327e6..d3062b5ed 100644 --- a/litellm/llms/bedrock_httpx.py +++ b/litellm/llms/bedrock_httpx.py @@ -7,7 +7,7 @@ import json from enum import Enum import requests, copy # type: ignore import time -from typing import Callable, Optional, List, Literal, Union +from typing import Callable, Optional, List, Literal, Union, Any, TypedDict, Tuple from litellm.utils import ( ModelResponse, Usage, @@ -18,11 +18,110 @@ from litellm.utils import ( get_secret, ) import litellm -from .prompt_templates.factory import prompt_factory, custom_prompt -from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler +from .prompt_templates.factory import prompt_factory, custom_prompt, cohere_message_pt +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from .base import BaseLLM import httpx # type: ignore -from .bedrock import BedrockError +from .bedrock import BedrockError, convert_messages_to_prompt +from litellm.types.llms.bedrock import * + + +class AmazonCohereChatConfig: + """ + Reference - https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-cohere-command-r-plus.html + """ + + documents: Optional[List[Document]] = None + search_queries_only: Optional[bool] = None + preamble: Optional[str] = None + max_tokens: Optional[int] = None + temperature: Optional[float] = None + p: Optional[float] = None + k: Optional[float] = None + prompt_truncation: Optional[str] = None + frequency_penalty: Optional[float] = None + presence_penalty: Optional[float] = None + seed: Optional[int] = None + return_prompt: Optional[bool] = None + stop_sequences: Optional[List[str]] = None + raw_prompting: Optional[bool] = None + + def __init__( + self, + documents: Optional[List[Document]] = None, + search_queries_only: Optional[bool] = None, + preamble: Optional[str] = None, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, + p: Optional[float] = None, + k: Optional[float] = None, + prompt_truncation: Optional[str] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + seed: Optional[int] = None, + return_prompt: Optional[bool] = None, + stop_sequences: Optional[str] = None, + raw_prompting: Optional[bool] = None, + ) -> None: + locals_ = locals() + for key, value in locals_.items(): + if key != "self" and value is not None: + setattr(self.__class__, key, value) + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + + def get_supported_openai_params(self) -> List[str]: + return [ + "max_tokens", + "stream", + "stop", + "temperature", + "top_p", + "frequency_penalty", + "presence_penalty", + "seed", + "stop", + ] + + def map_openai_params( + self, non_default_params: dict, optional_params: dict + ) -> dict: + for param, value in non_default_params.items(): + if param == "max_tokens": + optional_params["max_tokens"] = value + if param == "stream": + optional_params["stream"] = value + if param == "stop": + if isinstance(value, str): + value = [value] + optional_params["stop_sequences"] = value + if param == "temperature": + optional_params["temperature"] = value + if param == "top_p": + optional_params["p"] = value + if param == "frequency_penalty": + optional_params["frequency_penalty"] = value + if param == "presence_penalty": + optional_params["presence_penalty"] = value + if "seed": + optional_params["seed"] = value + return optional_params class BedrockLLM(BaseLLM): @@ -47,6 +146,48 @@ class BedrockLLM(BaseLLM): def __init__(self) -> None: super().__init__() + def convert_messages_to_prompt( + self, model, messages, provider, custom_prompt_dict + ) -> Tuple[str, Optional[list]]: + # handle anthropic prompts and amazon titan prompts + prompt = "" + chat_history: Optional[list] = None + if provider == "anthropic" or provider == "amazon": + if model in custom_prompt_dict: + # check if the model has a registered custom prompt + model_prompt_details = custom_prompt_dict[model] + prompt = custom_prompt( + role_dict=model_prompt_details["roles"], + initial_prompt_value=model_prompt_details["initial_prompt_value"], + final_prompt_value=model_prompt_details["final_prompt_value"], + messages=messages, + ) + else: + prompt = prompt_factory( + model=model, messages=messages, custom_llm_provider="bedrock" + ) + elif provider == "mistral": + prompt = prompt_factory( + model=model, messages=messages, custom_llm_provider="bedrock" + ) + elif provider == "meta": + prompt = prompt_factory( + model=model, messages=messages, custom_llm_provider="bedrock" + ) + elif provider == "cohere": + prompt, chat_history = cohere_message_pt(messages=messages) + else: + prompt = "" + for message in messages: + if "role" in message: + if message["role"] == "user": + prompt += f"{message['content']}" + else: + prompt += f"{message['content']}" + else: + prompt += f"{message['content']}" + return prompt, chat_history # type: ignore + def get_credentials( self, aws_access_key_id: Optional[str] = None, @@ -114,11 +255,168 @@ class BedrockLLM(BaseLLM): return session.get_credentials() - def completion(self, *args, **kwargs) -> Union[ModelResponse, CustomStreamWrapper]: - ## get credentials - ## generate signature - ## make request - return super().completion(*args, **kwargs) + def completion( + self, + model: str, + messages: list, + custom_prompt_dict: dict, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + logging_obj, + optional_params: dict, + timeout: Optional[Union[float, httpx.Timeout]], + litellm_params=None, + logger_fn=None, + extra_headers: Optional[dict] = None, + client: Optional[HTTPHandler] = None, + ) -> Union[ModelResponse, CustomStreamWrapper]: + try: + import boto3 + + from botocore.auth import SigV4Auth + from botocore.awsrequest import AWSRequest + from botocore.credentials import Credentials + except ImportError as e: + raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.") + + ## CREDENTIALS ## + # pop aws_secret_access_key, aws_access_key_id, aws_region_name from kwargs, since completion calls fail with them + aws_secret_access_key = optional_params.pop("aws_secret_access_key", None) + aws_access_key_id = optional_params.pop("aws_access_key_id", None) + aws_region_name = optional_params.pop("aws_region_name", None) + aws_role_name = optional_params.pop("aws_role_name", None) + aws_session_name = optional_params.pop("aws_session_name", None) + aws_profile_name = optional_params.pop("aws_profile_name", None) + aws_bedrock_runtime_endpoint = optional_params.pop( + "aws_bedrock_runtime_endpoint", None + ) # https://bedrock-runtime.{region_name}.amazonaws.com + + ### SET REGION NAME ### + if aws_region_name is None: + # check env # + litellm_aws_region_name = get_secret("AWS_REGION_NAME", None) + + if litellm_aws_region_name is not None and isinstance( + litellm_aws_region_name, str + ): + aws_region_name = litellm_aws_region_name + + standard_aws_region_name = get_secret("AWS_REGION", None) + if standard_aws_region_name is not None and isinstance( + standard_aws_region_name, str + ): + aws_region_name = standard_aws_region_name + + if aws_region_name is None: + aws_region_name = "us-west-2" + + credentials: Credentials = self.get_credentials( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_region_name=aws_region_name, + aws_session_name=aws_session_name, + aws_profile_name=aws_profile_name, + aws_role_name=aws_role_name, + ) + + ### SET RUNTIME ENDPOINT ### + endpoint_url = "" + env_aws_bedrock_runtime_endpoint = get_secret("AWS_BEDROCK_RUNTIME_ENDPOINT") + if aws_bedrock_runtime_endpoint is not None and isinstance( + aws_bedrock_runtime_endpoint, str + ): + endpoint_url = aws_bedrock_runtime_endpoint + elif env_aws_bedrock_runtime_endpoint and isinstance( + env_aws_bedrock_runtime_endpoint, str + ): + endpoint_url = env_aws_bedrock_runtime_endpoint + else: + endpoint_url = f"https://bedrock-runtime.{aws_region_name}.amazonaws.com" + + endpoint_url = f"{endpoint_url}/model/{model}/invoke" + + sigv4 = SigV4Auth(credentials, "bedrock", aws_region_name) + + provider = model.split(".")[0] + prompt, chat_history = self.convert_messages_to_prompt( + model, messages, provider, custom_prompt_dict + ) + inference_params = copy.deepcopy(optional_params) + stream = inference_params.pop("stream", False) + + if provider == "cohere": + if model.startswith("cohere.command-r"): + ## LOAD CONFIG + config = litellm.AmazonCohereChatConfig().get_config() + for k, v in config.items(): + if ( + k not in inference_params + ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in + inference_params[k] = v + if optional_params.get("stream", False) == True: + inference_params["stream"] = ( + True # cohere requires stream = True in inference params + ) + + _data = {"message": prompt, **inference_params} + if chat_history is not None: + _data["chat_history"] = chat_history + data = json.dumps(_data) + else: + ## LOAD CONFIG + config = litellm.AmazonCohereConfig.get_config() + for k, v in config.items(): + if ( + k not in inference_params + ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in + inference_params[k] = v + if optional_params.get("stream", False) == True: + inference_params["stream"] = ( + True # cohere requires stream = True in inference params + ) + data = json.dumps({"prompt": prompt, **inference_params}) + else: + raise Exception("UNSUPPORTED PROVIDER") + + ## COMPLETION CALL + headers = {"Content-Type": "application/json"} + request = AWSRequest( + method="POST", url=endpoint_url, data=data, headers=headers + ) + sigv4.add_auth(request) + prepped = request.prepare() + + if client is None: + _params = {} + if timeout is not None: + if isinstance(timeout, float) or isinstance(timeout, int): + timeout = httpx.Timeout(timeout) + _params["timeout"] = timeout + self.client = HTTPHandler(**_params) # type: ignore + else: + self.client = client + + ## LOGGING + logging_obj.pre_call( + input=messages, + api_key="", + additional_args={ + "complete_input_dict": data, + "api_base": prepped.url, + "headers": prepped.headers, + }, + ) + + response = self.client.post(url=prepped.url, headers=prepped.headers, data=data) # type: ignore + + try: + response.raise_for_status() + except httpx.HTTPStatusError as err: + error_code = err.response.status_code + raise BedrockError(status_code=error_code, message=response.text) + + return response def embedding(self, *args, **kwargs): return super().embedding(*args, **kwargs) diff --git a/litellm/llms/custom_httpx/http_handler.py b/litellm/llms/custom_httpx/http_handler.py index 7c7d4938a..529ba3b39 100644 --- a/litellm/llms/custom_httpx/http_handler.py +++ b/litellm/llms/custom_httpx/http_handler.py @@ -58,16 +58,25 @@ class AsyncHTTPHandler: class HTTPHandler: def __init__( - self, timeout: httpx.Timeout = _DEFAULT_TIMEOUT, concurrent_limit=1000 + self, + timeout: Optional[httpx.Timeout] = None, + concurrent_limit=1000, + client: Optional[httpx.Client] = None, ): - # Create a client with a connection pool - self.client = httpx.Client( - timeout=timeout, - limits=httpx.Limits( - max_connections=concurrent_limit, - max_keepalive_connections=concurrent_limit, - ), - ) + if timeout is None: + timeout = _DEFAULT_TIMEOUT + + if client is None: + # Create a client with a connection pool + self.client = httpx.Client( + timeout=timeout, + limits=httpx.Limits( + max_connections=concurrent_limit, + max_keepalive_connections=concurrent_limit, + ), + ) + else: + self.client = client def close(self): # Close the client when you're done with it diff --git a/litellm/main.py b/litellm/main.py index 8be71de0b..d2f3939fd 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1922,20 +1922,37 @@ def completion( elif custom_llm_provider == "bedrock": # boto3 reads keys from .env custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict - response = bedrock.completion( - model=model, - messages=messages, - custom_prompt_dict=litellm.custom_prompt_dict, - model_response=model_response, - print_verbose=print_verbose, - optional_params=optional_params, - litellm_params=litellm_params, - logger_fn=logger_fn, - encoding=encoding, - logging_obj=logging, - extra_headers=extra_headers, - timeout=timeout, - ) + + if "cohere" in model: + response = bedrock_chat_completion.completion( + model=model, + messages=messages, + custom_prompt_dict=litellm.custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + optional_params=optional_params, + litellm_params=litellm_params, + logger_fn=logger_fn, + encoding=encoding, + logging_obj=logging, + extra_headers=extra_headers, + timeout=timeout, + ) + else: + response = bedrock.completion( + model=model, + messages=messages, + custom_prompt_dict=litellm.custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + optional_params=optional_params, + litellm_params=litellm_params, + logger_fn=logger_fn, + encoding=encoding, + logging_obj=logging, + extra_headers=extra_headers, + timeout=timeout, + ) if ( "stream" in optional_params diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 214dc105b..0cf6dda83 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -2585,6 +2585,7 @@ def test_completion_chat_sagemaker_mistral(): def test_completion_bedrock_command_r(): + litellm.set_verbose = True response = completion( model="bedrock/cohere.command-r-plus-v1:0", messages=[{"role": "user", "content": "Hey! how's it going?"}], diff --git a/litellm/types/llms/bedrock.py b/litellm/types/llms/bedrock.py new file mode 100644 index 000000000..87ef6fd3c --- /dev/null +++ b/litellm/types/llms/bedrock.py @@ -0,0 +1,6 @@ +from typing import TypedDict + + +class Document(TypedDict): + title: str + snippet: str From 9167ff0d759acbfca0ff98de8d472615faa3c9df Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Sat, 11 May 2024 15:22:30 -0700 Subject: [PATCH 045/184] Set fake env vars for `client_no_auth` fixture This allows all of the tests in `test_proxy_server.py` to pass, with the exception of `test_load_router_config`, without needing to set up real environment variables. Before: ```shell $ env -i PATH=$PATH poetry run pytest litellm/tests/test_proxy_server.py -k 'not test_load_router_config' --disable-warnings ... ========================================================== short test summary info =========================================================== ERROR litellm/tests/test_proxy_server.py::test_bedrock_embedding - openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY enviro... ERROR litellm/tests/test_proxy_server.py::test_chat_completion - openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY enviro... ERROR litellm/tests/test_proxy_server.py::test_chat_completion_azure - openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY enviro... ERROR litellm/tests/test_proxy_server.py::test_chat_completion_optional_params - openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY enviro... ERROR litellm/tests/test_proxy_server.py::test_embedding - openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY enviro... ERROR litellm/tests/test_proxy_server.py::test_engines_model_chat_completions - openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY enviro... ERROR litellm/tests/test_proxy_server.py::test_health - openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY enviro... ERROR litellm/tests/test_proxy_server.py::test_img_gen - openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY enviro... ERROR litellm/tests/test_proxy_server.py::test_openai_deployments_model_chat_completions_azure - openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY enviro... ========================================== 2 skipped, 1 deselected, 39 warnings, 9 errors in 3.24s =========================================== ``` After: ```shell $ env -i PATH=$PATH poetry run pytest litellm/tests/test_proxy_server.py -k 'not test_load_router_config' --disable-warnings ============================================================ test session starts ============================================================= platform darwin -- Python 3.12.3, pytest-7.4.4, pluggy-1.5.0 rootdir: /Users/abramowi/Code/OpenSource/litellm plugins: anyio-4.3.0, asyncio-0.23.6, mock-3.14.0 asyncio: mode=Mode.STRICT collected 12 items / 1 deselected / 11 selected litellm/tests/test_proxy_server.py s.........s [100%] ========================================== 9 passed, 2 skipped, 1 deselected, 48 warnings in 8.42s =========================================== ``` --- litellm/tests/test_proxy_server.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index c1965dc2a..76b2b2d18 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -109,7 +109,17 @@ def mock_patch_aimage_generation(): @pytest.fixture(scope="function") -def client_no_auth(): +def fake_env_vars(monkeypatch): + # Set some fake environment variables + monkeypatch.setenv("OPENAI_API_KEY", "fake_openai_api_key") + monkeypatch.setenv("OPENAI_API_BASE", "http://fake-openai-api-base") + monkeypatch.setenv("AZURE_API_BASE", "http://fake-azure-api-base") + monkeypatch.setenv("AZURE_OPENAI_API_KEY", "fake_azure_openai_api_key") + monkeypatch.setenv("AZURE_SWEDEN_API_BASE", "http://fake-azure-sweden-api-base") + + +@pytest.fixture(scope="function") +def client_no_auth(fake_env_vars): # Assuming litellm.proxy.proxy_server is an object from litellm.proxy.proxy_server import cleanup_router_config_variables From 91a6a0eef40c4f1a9f91a8e78a989f4091fc10c7 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 15:57:06 -0700 Subject: [PATCH 046/184] (Fix) - linting errors --- litellm/exceptions.py | 6 +- litellm/proxy/hooks/azure_content_safety.py | 3 +- litellm/utils.py | 71 +++++++++++++++------ 3 files changed, 54 insertions(+), 26 deletions(-) diff --git a/litellm/exceptions.py b/litellm/exceptions.py index 7c3471acf..d239f1e12 100644 --- a/litellm/exceptions.py +++ b/litellm/exceptions.py @@ -202,13 +202,11 @@ class BudgetExceededError(Exception): ## DEPRECATED ## class InvalidRequestError(openai.BadRequestError): # type: ignore - def __init__( - self, message, model, llm_provider, response: Optional[httpx.Response] = None - ): + def __init__(self, message, model, llm_provider): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider super().__init__( - self.message, response=response, body=None + self.message, f"{self.model}" ) # Call the base class constructor with the parameters it needs diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py index 433571c15..5b5139f8c 100644 --- a/litellm/proxy/hooks/azure_content_safety.py +++ b/litellm/proxy/hooks/azure_content_safety.py @@ -4,6 +4,7 @@ from litellm.proxy._types import UserAPIKeyAuth import litellm, traceback, sys, uuid from fastapi import HTTPException from litellm._logging import verbose_proxy_logger +from typing import Optional class _PROXY_AzureContentSafety( @@ -71,7 +72,7 @@ class _PROXY_AzureContentSafety( return result - async def test_violation(self, content: str, source: str = None): + async def test_violation(self, content: str, source: Optional[str] = None): verbose_proxy_logger.debug("Testing Azure Content-Safety for: %s", content) # Construct a request diff --git a/litellm/utils.py b/litellm/utils.py index e787d2155..2eee41d8d 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -35,6 +35,7 @@ from dataclasses import ( import litellm._service_logger # for storing API inputs, outputs, and metadata from litellm.llms.custom_httpx.http_handler import HTTPHandler from litellm.caching import DualCache + oidc_cache = DualCache() try: @@ -2957,7 +2958,7 @@ def client(original_function): ) else: return result - + return result # Prints Exactly what was passed to litellm function - don't execute any logic here - it should just print @@ -9559,16 +9560,20 @@ def get_secret( if oidc_token is not None: return oidc_token - client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) + oidc_client = HTTPHandler( + timeout=httpx.Timeout(timeout=600.0, connect=5.0) + ) # https://cloud.google.com/compute/docs/instances/verifying-instance-identity#request_signature - response = client.get( + response = oidc_client.get( "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/identity", params={"audience": oidc_aud}, headers={"Metadata-Flavor": "Google"}, ) if response.status_code == 200: oidc_token = response.text - oidc_cache.set_cache(key=secret_name, value=oidc_token, ttl=3600 - 60) + oidc_cache.set_cache( + key=secret_name, value=oidc_token, ttl=3600 - 60 + ) return oidc_token else: raise ValueError("Google OIDC provider failed") @@ -9587,25 +9592,34 @@ def get_secret( case "github": # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-cloud-providers#using-custom-actions actions_id_token_request_url = os.getenv("ACTIONS_ID_TOKEN_REQUEST_URL") - actions_id_token_request_token = os.getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN") - if actions_id_token_request_url is None or actions_id_token_request_token is None: - raise ValueError("ACTIONS_ID_TOKEN_REQUEST_URL or ACTIONS_ID_TOKEN_REQUEST_TOKEN not found in environment") + actions_id_token_request_token = os.getenv( + "ACTIONS_ID_TOKEN_REQUEST_TOKEN" + ) + if ( + actions_id_token_request_url is None + or actions_id_token_request_token is None + ): + raise ValueError( + "ACTIONS_ID_TOKEN_REQUEST_URL or ACTIONS_ID_TOKEN_REQUEST_TOKEN not found in environment" + ) oidc_token = oidc_cache.get_cache(key=secret_name) if oidc_token is not None: return oidc_token - client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0)) - response = client.get( + oidc_client = HTTPHandler( + timeout=httpx.Timeout(timeout=600.0, connect=5.0) + ) + response = oidc_client.get( actions_id_token_request_url, params={"audience": oidc_aud}, headers={ "Authorization": f"Bearer {actions_id_token_request_token}", "Accept": "application/json; api-version=2.0", - }, + }, ) if response.status_code == 200: - oidc_token = response.text['value'] + oidc_token = response.text["value"] oidc_cache.set_cache(key=secret_name, value=oidc_token, ttl=300 - 5) return oidc_token else: @@ -9613,7 +9627,6 @@ def get_secret( case _: raise ValueError("Unsupported OIDC provider") - try: if litellm.secret_manager_client is not None: try: @@ -10562,7 +10575,12 @@ class CustomStreamWrapper: response = chunk.replace("data: ", "").strip() parsed_response = json.loads(response) else: - return {"text": "", "is_finished": False, "prompt_tokens": 0, "completion_tokens": 0} + return { + "text": "", + "is_finished": False, + "prompt_tokens": 0, + "completion_tokens": 0, + } else: print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") raise ValueError( @@ -10583,19 +10601,32 @@ class CustomStreamWrapper: return {"text": "", "is_finished": False} except Exception as e: raise e - + def handle_clarifai_completion_chunk(self, chunk): try: if isinstance(chunk, dict): - parsed_response = chunk + parsed_response = chunk if isinstance(chunk, (str, bytes)): if isinstance(chunk, bytes): parsed_response = chunk.decode("utf-8") else: parsed_response = chunk - data_json = json.loads(parsed_response) - text = data_json.get("outputs", "")[0].get("data", "").get("text", "").get("raw","") - prompt_tokens = len(encoding.encode(data_json.get("outputs", "")[0].get("input","").get("data", "").get("text", "").get("raw",""))) + data_json = json.loads(parsed_response) + text = ( + data_json.get("outputs", "")[0] + .get("data", "") + .get("text", "") + .get("raw", "") + ) + prompt_tokens = len( + encoding.encode( + data_json.get("outputs", "")[0] + .get("input", "") + .get("data", "") + .get("text", "") + .get("raw", "") + ) + ) completion_tokens = len(encoding.encode(text)) return { "text": text, @@ -10650,9 +10681,7 @@ class CustomStreamWrapper: completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] - elif ( - self.custom_llm_provider and self.custom_llm_provider == "clarifai" - ): + elif self.custom_llm_provider and self.custom_llm_provider == "clarifai": response_obj = self.handle_clarifai_completion_chunk(chunk) completion_obj["content"] = response_obj["text"] elif self.model == "replicate" or self.custom_llm_provider == "replicate": From d7f71208800397f3c5d14ba81e5f2c304cc08ae9 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 11 May 2024 15:58:53 -0700 Subject: [PATCH 047/184] ui - new build --- litellm/proxy/_experimental/out/404.html | 2 +- .../_buildManifest.js | 0 .../_ssgManifest.js | 0 ...bc85-17d29013b8ff3da5.js => 2f6dbc85-052c4579f80d66ae.js} | 0 .../{69-e49705773ae41779.js => 69-04708d7d4a17c1ee.js} | 0 .../{566-ccd699ab19124658.js => 884-7576ee407a2ecbe6.js} | 4 ++-- ...nd-a166f65a6af292d9.js => _not-found-b1ee1381b72386c2.js} | 0 ...layout-94ae1345f5d85446.js => layout-a756276e228ca058.js} | 0 .../out/_next/static/chunks/app/page-c804e862b63be987.js | 1 - .../out/_next/static/chunks/app/page-e6190351ac8da62a.js | 1 + ...1056-dafd44dfa2da140c.js => fd9d1056-f960ab1e6d32b002.js} | 0 .../out/_next/static/chunks/webpack-de9c0fadf6a94b3b.js | 2 +- .../_experimental/out/_next/static/css/a1602eb39f799143.css | 5 ----- .../_experimental/out/_next/static/css/f04e46b02318b660.css | 5 +++++ litellm/proxy/_experimental/out/index.html | 2 +- litellm/proxy/_experimental/out/index.txt | 4 ++-- ui/litellm-dashboard/out/404.html | 2 +- .../_buildManifest.js | 0 .../_ssgManifest.js | 0 ...bc85-17d29013b8ff3da5.js => 2f6dbc85-052c4579f80d66ae.js} | 0 .../{69-e49705773ae41779.js => 69-04708d7d4a17c1ee.js} | 0 .../{566-ccd699ab19124658.js => 884-7576ee407a2ecbe6.js} | 4 ++-- ...nd-a166f65a6af292d9.js => _not-found-b1ee1381b72386c2.js} | 0 ...layout-94ae1345f5d85446.js => layout-a756276e228ca058.js} | 0 .../out/_next/static/chunks/app/page-c804e862b63be987.js | 1 - .../out/_next/static/chunks/app/page-e6190351ac8da62a.js | 1 + ...1056-dafd44dfa2da140c.js => fd9d1056-f960ab1e6d32b002.js} | 0 .../out/_next/static/chunks/webpack-de9c0fadf6a94b3b.js | 2 +- .../out/_next/static/css/a1602eb39f799143.css | 5 ----- .../out/_next/static/css/f04e46b02318b660.css | 5 +++++ ui/litellm-dashboard/out/index.html | 2 +- ui/litellm-dashboard/out/index.txt | 4 ++-- 32 files changed, 26 insertions(+), 26 deletions(-) rename litellm/proxy/_experimental/out/_next/static/{K8KXTbmuI2ArWjjdMi2iq => 84BZ5uERcn4DsO4_POsLl}/_buildManifest.js (100%) rename litellm/proxy/_experimental/out/_next/static/{K8KXTbmuI2ArWjjdMi2iq => 84BZ5uERcn4DsO4_POsLl}/_ssgManifest.js (100%) rename litellm/proxy/_experimental/out/_next/static/chunks/{2f6dbc85-17d29013b8ff3da5.js => 2f6dbc85-052c4579f80d66ae.js} (100%) rename litellm/proxy/_experimental/out/_next/static/chunks/{69-e49705773ae41779.js => 69-04708d7d4a17c1ee.js} (100%) rename litellm/proxy/_experimental/out/_next/static/chunks/{566-ccd699ab19124658.js => 884-7576ee407a2ecbe6.js} (64%) rename litellm/proxy/_experimental/out/_next/static/chunks/app/{_not-found-a166f65a6af292d9.js => _not-found-b1ee1381b72386c2.js} (100%) rename litellm/proxy/_experimental/out/_next/static/chunks/app/{layout-94ae1345f5d85446.js => layout-a756276e228ca058.js} (100%) delete mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/app/page-c804e862b63be987.js create mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/app/page-e6190351ac8da62a.js rename litellm/proxy/_experimental/out/_next/static/chunks/{fd9d1056-dafd44dfa2da140c.js => fd9d1056-f960ab1e6d32b002.js} (100%) rename ui/litellm-dashboard/out/_next/static/chunks/webpack-5b257e1ab47d4b4a.js => litellm/proxy/_experimental/out/_next/static/chunks/webpack-de9c0fadf6a94b3b.js (98%) delete mode 100644 litellm/proxy/_experimental/out/_next/static/css/a1602eb39f799143.css create mode 100644 litellm/proxy/_experimental/out/_next/static/css/f04e46b02318b660.css rename ui/litellm-dashboard/out/_next/static/{K8KXTbmuI2ArWjjdMi2iq => 84BZ5uERcn4DsO4_POsLl}/_buildManifest.js (100%) rename ui/litellm-dashboard/out/_next/static/{K8KXTbmuI2ArWjjdMi2iq => 84BZ5uERcn4DsO4_POsLl}/_ssgManifest.js (100%) rename ui/litellm-dashboard/out/_next/static/chunks/{2f6dbc85-17d29013b8ff3da5.js => 2f6dbc85-052c4579f80d66ae.js} (100%) rename ui/litellm-dashboard/out/_next/static/chunks/{69-e49705773ae41779.js => 69-04708d7d4a17c1ee.js} (100%) rename ui/litellm-dashboard/out/_next/static/chunks/{566-ccd699ab19124658.js => 884-7576ee407a2ecbe6.js} (64%) rename ui/litellm-dashboard/out/_next/static/chunks/app/{_not-found-a166f65a6af292d9.js => _not-found-b1ee1381b72386c2.js} (100%) rename ui/litellm-dashboard/out/_next/static/chunks/app/{layout-94ae1345f5d85446.js => layout-a756276e228ca058.js} (100%) delete mode 100644 ui/litellm-dashboard/out/_next/static/chunks/app/page-c804e862b63be987.js create mode 100644 ui/litellm-dashboard/out/_next/static/chunks/app/page-e6190351ac8da62a.js rename ui/litellm-dashboard/out/_next/static/chunks/{fd9d1056-dafd44dfa2da140c.js => fd9d1056-f960ab1e6d32b002.js} (100%) rename litellm/proxy/_experimental/out/_next/static/chunks/webpack-5b257e1ab47d4b4a.js => ui/litellm-dashboard/out/_next/static/chunks/webpack-de9c0fadf6a94b3b.js (98%) delete mode 100644 ui/litellm-dashboard/out/_next/static/css/a1602eb39f799143.css create mode 100644 ui/litellm-dashboard/out/_next/static/css/f04e46b02318b660.css diff --git a/litellm/proxy/_experimental/out/404.html b/litellm/proxy/_experimental/out/404.html index 448d7cf87..eaf570135 100644 --- a/litellm/proxy/_experimental/out/404.html +++ b/litellm/proxy/_experimental/out/404.html @@ -1 +1 @@ -404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

\ No newline at end of file +404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

\ No newline at end of file diff --git a/litellm/proxy/_experimental/out/_next/static/K8KXTbmuI2ArWjjdMi2iq/_buildManifest.js b/litellm/proxy/_experimental/out/_next/static/84BZ5uERcn4DsO4_POsLl/_buildManifest.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/K8KXTbmuI2ArWjjdMi2iq/_buildManifest.js rename to litellm/proxy/_experimental/out/_next/static/84BZ5uERcn4DsO4_POsLl/_buildManifest.js diff --git a/litellm/proxy/_experimental/out/_next/static/K8KXTbmuI2ArWjjdMi2iq/_ssgManifest.js b/litellm/proxy/_experimental/out/_next/static/84BZ5uERcn4DsO4_POsLl/_ssgManifest.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/K8KXTbmuI2ArWjjdMi2iq/_ssgManifest.js rename to litellm/proxy/_experimental/out/_next/static/84BZ5uERcn4DsO4_POsLl/_ssgManifest.js diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/2f6dbc85-17d29013b8ff3da5.js b/litellm/proxy/_experimental/out/_next/static/chunks/2f6dbc85-052c4579f80d66ae.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/chunks/2f6dbc85-17d29013b8ff3da5.js rename to litellm/proxy/_experimental/out/_next/static/chunks/2f6dbc85-052c4579f80d66ae.js diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/69-e49705773ae41779.js b/litellm/proxy/_experimental/out/_next/static/chunks/69-04708d7d4a17c1ee.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/chunks/69-e49705773ae41779.js rename to litellm/proxy/_experimental/out/_next/static/chunks/69-04708d7d4a17c1ee.js diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/566-ccd699ab19124658.js b/litellm/proxy/_experimental/out/_next/static/chunks/884-7576ee407a2ecbe6.js similarity index 64% rename from litellm/proxy/_experimental/out/_next/static/chunks/566-ccd699ab19124658.js rename to litellm/proxy/_experimental/out/_next/static/chunks/884-7576ee407a2ecbe6.js index 3b819d415..f0f47e76a 100644 --- a/litellm/proxy/_experimental/out/_next/static/chunks/566-ccd699ab19124658.js +++ b/litellm/proxy/_experimental/out/_next/static/chunks/884-7576ee407a2ecbe6.js @@ -1,4 +1,4 @@ -"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[566],{12215:function(e,t,n){n.d(t,{iN:function(){return h},R_:function(){return d},EV:function(){return g},ez:function(){return p}});var r=n(41785),o=n(76991),a=[{index:7,opacity:.15},{index:6,opacity:.25},{index:5,opacity:.3},{index:5,opacity:.45},{index:5,opacity:.65},{index:5,opacity:.85},{index:4,opacity:.9},{index:3,opacity:.95},{index:2,opacity:.97},{index:1,opacity:.98}];function i(e){var t=e.r,n=e.g,o=e.b,a=(0,r.py)(t,n,o);return{h:360*a.h,s:a.s,v:a.v}}function l(e){var t=e.r,n=e.g,o=e.b;return"#".concat((0,r.vq)(t,n,o,!1))}function s(e,t,n){var r;return(r=Math.round(e.h)>=60&&240>=Math.round(e.h)?n?Math.round(e.h)-2*t:Math.round(e.h)+2*t:n?Math.round(e.h)+2*t:Math.round(e.h)-2*t)<0?r+=360:r>=360&&(r-=360),r}function c(e,t,n){var r;return 0===e.h&&0===e.s?e.s:((r=n?e.s-.16*t:4===t?e.s+.16:e.s+.05*t)>1&&(r=1),n&&5===t&&r>.1&&(r=.1),r<.06&&(r=.06),Number(r.toFixed(2)))}function u(e,t,n){var r;return(r=n?e.v+.05*t:e.v-.15*t)>1&&(r=1),Number(r.toFixed(2))}function d(e){for(var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=[],r=(0,o.uA)(e),d=5;d>0;d-=1){var p=i(r),f=l((0,o.uA)({h:s(p,d,!0),s:c(p,d,!0),v:u(p,d,!0)}));n.push(f)}n.push(l(r));for(var m=1;m<=4;m+=1){var g=i(r),h=l((0,o.uA)({h:s(g,m),s:c(g,m),v:u(g,m)}));n.push(h)}return"dark"===t.theme?a.map(function(e){var r,a,i,s=e.index,c=e.opacity;return l((r=(0,o.uA)(t.backgroundColor||"#141414"),a=(0,o.uA)(n[s]),i=100*c/100,{r:(a.r-r.r)*i+r.r,g:(a.g-r.g)*i+r.g,b:(a.b-r.b)*i+r.b}))}):n}var p={red:"#F5222D",volcano:"#FA541C",orange:"#FA8C16",gold:"#FAAD14",yellow:"#FADB14",lime:"#A0D911",green:"#52C41A",cyan:"#13C2C2",blue:"#1677FF",geekblue:"#2F54EB",purple:"#722ED1",magenta:"#EB2F96",grey:"#666666"},f={},m={};Object.keys(p).forEach(function(e){f[e]=d(p[e]),f[e].primary=f[e][5],m[e]=d(p[e],{theme:"dark",backgroundColor:"#141414"}),m[e].primary=m[e][5]}),f.red,f.volcano;var g=f.gold;f.orange,f.yellow,f.lime,f.green,f.cyan;var h=f.blue;f.geekblue,f.purple,f.magenta,f.grey,f.grey},8985:function(e,t,n){n.d(t,{E4:function(){return ej},jG:function(){return A},ks:function(){return Z},bf:function(){return F},CI:function(){return eD},fp:function(){return X},xy:function(){return eM}});var r,o,a=n(50833),i=n(80406),l=n(63787),s=n(5239),c=function(e){for(var t,n=0,r=0,o=e.length;o>=4;++r,o-=4)t=(65535&(t=255&e.charCodeAt(r)|(255&e.charCodeAt(++r))<<8|(255&e.charCodeAt(++r))<<16|(255&e.charCodeAt(++r))<<24))*1540483477+((t>>>16)*59797<<16),t^=t>>>24,n=(65535&t)*1540483477+((t>>>16)*59797<<16)^(65535&n)*1540483477+((n>>>16)*59797<<16);switch(o){case 3:n^=(255&e.charCodeAt(r+2))<<16;case 2:n^=(255&e.charCodeAt(r+1))<<8;case 1:n^=255&e.charCodeAt(r),n=(65535&n)*1540483477+((n>>>16)*59797<<16)}return n^=n>>>13,(((n=(65535&n)*1540483477+((n>>>16)*59797<<16))^n>>>15)>>>0).toString(36)},u=n(24050),d=n(64090),p=n.t(d,2);n(61475),n(92536);var f=n(47365),m=n(65127);function g(e){return e.join("%")}var h=function(){function e(t){(0,f.Z)(this,e),(0,a.Z)(this,"instanceId",void 0),(0,a.Z)(this,"cache",new Map),this.instanceId=t}return(0,m.Z)(e,[{key:"get",value:function(e){return this.opGet(g(e))}},{key:"opGet",value:function(e){return this.cache.get(e)||null}},{key:"update",value:function(e,t){return this.opUpdate(g(e),t)}},{key:"opUpdate",value:function(e,t){var n=t(this.cache.get(e));null===n?this.cache.delete(e):this.cache.set(e,n)}}]),e}(),b="data-token-hash",v="data-css-hash",y="__cssinjs_instance__",E=d.createContext({hashPriority:"low",cache:function(){var e=Math.random().toString(12).slice(2);if("undefined"!=typeof document&&document.head&&document.body){var t=document.body.querySelectorAll("style[".concat(v,"]"))||[],n=document.head.firstChild;Array.from(t).forEach(function(t){t[y]=t[y]||e,t[y]===e&&document.head.insertBefore(t,n)});var r={};Array.from(document.querySelectorAll("style[".concat(v,"]"))).forEach(function(t){var n,o=t.getAttribute(v);r[o]?t[y]===e&&(null===(n=t.parentNode)||void 0===n||n.removeChild(t)):r[o]=!0})}return new h(e)}(),defaultCache:!0}),w=n(6976),S=n(22127),x=function(){function e(){(0,f.Z)(this,e),(0,a.Z)(this,"cache",void 0),(0,a.Z)(this,"keys",void 0),(0,a.Z)(this,"cacheCallTimes",void 0),this.cache=new Map,this.keys=[],this.cacheCallTimes=0}return(0,m.Z)(e,[{key:"size",value:function(){return this.keys.length}},{key:"internalGet",value:function(e){var t,n,r=arguments.length>1&&void 0!==arguments[1]&&arguments[1],o={map:this.cache};return e.forEach(function(e){if(o){var t;o=null===(t=o)||void 0===t||null===(t=t.map)||void 0===t?void 0:t.get(e)}else o=void 0}),null!==(t=o)&&void 0!==t&&t.value&&r&&(o.value[1]=this.cacheCallTimes++),null===(n=o)||void 0===n?void 0:n.value}},{key:"get",value:function(e){var t;return null===(t=this.internalGet(e,!0))||void 0===t?void 0:t[0]}},{key:"has",value:function(e){return!!this.internalGet(e)}},{key:"set",value:function(t,n){var r=this;if(!this.has(t)){if(this.size()+1>e.MAX_CACHE_SIZE+e.MAX_CACHE_OFFSET){var o=this.keys.reduce(function(e,t){var n=(0,i.Z)(e,2)[1];return r.internalGet(t)[1]0,"[Ant Design CSS-in-JS] Theme should have at least one derivative function."),k+=1}return(0,m.Z)(e,[{key:"getDerivativeToken",value:function(e){return this.derivatives.reduce(function(t,n){return n(e,t)},void 0)}}]),e}(),T=new x;function A(e){var t=Array.isArray(e)?e:[e];return T.has(t)||T.set(t,new C(t)),T.get(t)}var N=new WeakMap,I={},R=new WeakMap;function _(e){var t=R.get(e)||"";return t||(Object.keys(e).forEach(function(n){var r=e[n];t+=n,r instanceof C?t+=r.id:r&&"object"===(0,w.Z)(r)?t+=_(r):t+=r}),R.set(e,t)),t}function P(e,t){return c("".concat(t,"_").concat(_(e)))}var M="random-".concat(Date.now(),"-").concat(Math.random()).replace(/\./g,""),L="_bAmBoO_",D=void 0,j=(0,S.Z)();function F(e){return"number"==typeof e?"".concat(e,"px"):e}function B(e,t,n){var r,o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},i=arguments.length>4&&void 0!==arguments[4]&&arguments[4];if(i)return e;var l=(0,s.Z)((0,s.Z)({},o),{},(r={},(0,a.Z)(r,b,t),(0,a.Z)(r,v,n),r)),c=Object.keys(l).map(function(e){var t=l[e];return t?"".concat(e,'="').concat(t,'"'):null}).filter(function(e){return e}).join(" ");return"")}var Z=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return"--".concat(t?"".concat(t,"-"):"").concat(e).replace(/([a-z0-9])([A-Z])/g,"$1-$2").replace(/([A-Z]+)([A-Z][a-z0-9]+)/g,"$1-$2").replace(/([a-z])([A-Z0-9])/g,"$1-$2").toLowerCase()},U=function(e,t,n){var r,o={},a={};return Object.entries(e).forEach(function(e){var t=(0,i.Z)(e,2),r=t[0],l=t[1];if(null!=n&&null!==(s=n.preserve)&&void 0!==s&&s[r])a[r]=l;else if(("string"==typeof l||"number"==typeof l)&&!(null!=n&&null!==(c=n.ignore)&&void 0!==c&&c[r])){var s,c,u,d=Z(r,null==n?void 0:n.prefix);o[d]="number"!=typeof l||null!=n&&null!==(u=n.unitless)&&void 0!==u&&u[r]?String(l):"".concat(l,"px"),a[r]="var(".concat(d,")")}}),[a,(r={scope:null==n?void 0:n.scope},Object.keys(o).length?".".concat(t).concat(null!=r&&r.scope?".".concat(r.scope):"","{").concat(Object.entries(o).map(function(e){var t=(0,i.Z)(e,2),n=t[0],r=t[1];return"".concat(n,":").concat(r,";")}).join(""),"}"):"")]},z=n(24800),H=(0,s.Z)({},p).useInsertionEffect,G=H?function(e,t,n){return H(function(){return e(),t()},n)}:function(e,t,n){d.useMemo(e,n),(0,z.Z)(function(){return t(!0)},n)},W=void 0!==(0,s.Z)({},p).useInsertionEffect?function(e){var t=[],n=!1;return d.useEffect(function(){return n=!1,function(){n=!0,t.length&&t.forEach(function(e){return e()})}},e),function(e){n||t.push(e)}}:function(){return function(e){e()}};function $(e,t,n,r,o){var a=d.useContext(E).cache,s=g([e].concat((0,l.Z)(t))),c=W([s]),u=function(e){a.opUpdate(s,function(t){var r=(0,i.Z)(t||[void 0,void 0],2),o=r[0],a=[void 0===o?0:o,r[1]||n()];return e?e(a):a})};d.useMemo(function(){u()},[s]);var p=a.opGet(s)[1];return G(function(){null==o||o(p)},function(e){return u(function(t){var n=(0,i.Z)(t,2),r=n[0],a=n[1];return e&&0===r&&(null==o||o(p)),[r+1,a]}),function(){a.opUpdate(s,function(t){var n=(0,i.Z)(t||[],2),o=n[0],l=void 0===o?0:o,u=n[1];return 0==l-1?(c(function(){(e||!a.opGet(s))&&(null==r||r(u,!1))}),null):[l-1,u]})}},[s]),p}var V={},q=new Map,Y=function(e,t,n,r){var o=n.getDerivativeToken(e),a=(0,s.Z)((0,s.Z)({},o),t);return r&&(a=r(a)),a},K="token";function X(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=(0,d.useContext)(E),o=r.cache.instanceId,a=r.container,p=n.salt,f=void 0===p?"":p,m=n.override,g=void 0===m?V:m,h=n.formatToken,w=n.getComputedToken,S=n.cssVar,x=function(e,t){for(var n=N,r=0;r=(q.get(e)||0)}),n.length-r.length>0&&r.forEach(function(e){"undefined"!=typeof document&&document.querySelectorAll("style[".concat(b,'="').concat(e,'"]')).forEach(function(e){if(e[y]===o){var t;null===(t=e.parentNode)||void 0===t||t.removeChild(e)}}),q.delete(e)})},function(e){var t=(0,i.Z)(e,4),n=t[0],r=t[3];if(S&&r){var l=(0,u.hq)(r,c("css-variables-".concat(n._themeKey)),{mark:v,prepend:"queue",attachTo:a,priority:-999});l[y]=o,l.setAttribute(b,n._themeKey)}})}var Q=n(14749),J={animationIterationCount:1,borderImageOutset:1,borderImageSlice:1,borderImageWidth:1,boxFlex:1,boxFlexGroup:1,boxOrdinalGroup:1,columnCount:1,columns:1,flex:1,flexGrow:1,flexPositive:1,flexShrink:1,flexNegative:1,flexOrder:1,gridRow:1,gridRowEnd:1,gridRowSpan:1,gridRowStart:1,gridColumn:1,gridColumnEnd:1,gridColumnSpan:1,gridColumnStart:1,msGridRow:1,msGridRowSpan:1,msGridColumn:1,msGridColumnSpan:1,fontWeight:1,lineHeight:1,opacity:1,order:1,orphans:1,tabSize:1,widows:1,zIndex:1,zoom:1,WebkitLineClamp:1,fillOpacity:1,floodOpacity:1,stopOpacity:1,strokeDasharray:1,strokeDashoffset:1,strokeMiterlimit:1,strokeOpacity:1,strokeWidth:1},ee="comm",et="rule",en="decl",er=Math.abs,eo=String.fromCharCode;function ea(e,t,n){return e.replace(t,n)}function ei(e,t){return 0|e.charCodeAt(t)}function el(e,t,n){return e.slice(t,n)}function es(e){return e.length}function ec(e,t){return t.push(e),e}function eu(e,t){for(var n="",r=0;r0?f[v]+" "+y:ea(y,/&\f/g,f[v])).trim())&&(s[b++]=E);return ev(e,t,n,0===o?et:l,s,c,u,d)}function eO(e,t,n,r,o){return ev(e,t,n,en,el(e,0,r),el(e,r+1,-1),r,o)}var ek="data-ant-cssinjs-cache-path",eC="_FILE_STYLE__",eT=!0,eA="_multi_value_";function eN(e){var t,n,r;return eu((r=function e(t,n,r,o,a,i,l,s,c){for(var u,d,p,f=0,m=0,g=l,h=0,b=0,v=0,y=1,E=1,w=1,S=0,x="",O=a,k=i,C=o,T=x;E;)switch(v=S,S=ey()){case 40:if(108!=v&&58==ei(T,g-1)){-1!=(d=T+=ea(eS(S),"&","&\f"),p=er(f?s[f-1]:0),d.indexOf("&\f",p))&&(w=-1);break}case 34:case 39:case 91:T+=eS(S);break;case 9:case 10:case 13:case 32:T+=function(e){for(;eh=eE();)if(eh<33)ey();else break;return ew(e)>2||ew(eh)>3?"":" "}(v);break;case 92:T+=function(e,t){for(var n;--t&&ey()&&!(eh<48)&&!(eh>102)&&(!(eh>57)||!(eh<65))&&(!(eh>70)||!(eh<97)););return n=eg+(t<6&&32==eE()&&32==ey()),el(eb,e,n)}(eg-1,7);continue;case 47:switch(eE()){case 42:case 47:ec(ev(u=function(e,t){for(;ey();)if(e+eh===57)break;else if(e+eh===84&&47===eE())break;return"/*"+el(eb,t,eg-1)+"*"+eo(47===e?e:ey())}(ey(),eg),n,r,ee,eo(eh),el(u,2,-2),0,c),c);break;default:T+="/"}break;case 123*y:s[f++]=es(T)*w;case 125*y:case 59:case 0:switch(S){case 0:case 125:E=0;case 59+m:-1==w&&(T=ea(T,/\f/g,"")),b>0&&es(T)-g&&ec(b>32?eO(T+";",o,r,g-1,c):eO(ea(T," ","")+";",o,r,g-2,c),c);break;case 59:T+=";";default:if(ec(C=ex(T,n,r,f,m,a,s,x,O=[],k=[],g,i),i),123===S){if(0===m)e(T,n,C,C,O,i,g,s,k);else switch(99===h&&110===ei(T,3)?100:h){case 100:case 108:case 109:case 115:e(t,C,C,o&&ec(ex(t,C,C,0,0,a,s,x,a,O=[],g,k),k),a,k,g,s,o?O:k);break;default:e(T,C,C,C,[""],k,0,s,k)}}}f=m=b=0,y=w=1,x=T="",g=l;break;case 58:g=1+es(T),b=v;default:if(y<1){if(123==S)--y;else if(125==S&&0==y++&&125==(eh=eg>0?ei(eb,--eg):0,ef--,10===eh&&(ef=1,ep--),eh))continue}switch(T+=eo(S),S*y){case 38:w=m>0?1:(T+="\f",-1);break;case 44:s[f++]=(es(T)-1)*w,w=1;break;case 64:45===eE()&&(T+=eS(ey())),h=eE(),m=g=es(x=T+=function(e){for(;!ew(eE());)ey();return el(eb,e,eg)}(eg)),S++;break;case 45:45===v&&2==es(T)&&(y=0)}}return i}("",null,null,null,[""],(n=t=e,ep=ef=1,em=es(eb=n),eg=0,t=[]),0,[0],t),eb="",r),ed).replace(/\{%%%\:[^;];}/g,";")}var eI=function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{root:!0,parentSelectors:[]},o=r.root,a=r.injectHash,c=r.parentSelectors,d=n.hashId,p=n.layer,f=(n.path,n.hashPriority),m=n.transformers,g=void 0===m?[]:m;n.linters;var h="",b={};function v(t){var r=t.getName(d);if(!b[r]){var o=e(t.style,n,{root:!1,parentSelectors:c}),a=(0,i.Z)(o,1)[0];b[r]="@keyframes ".concat(t.getName(d)).concat(a)}}if((function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return t.forEach(function(t){Array.isArray(t)?e(t,n):t&&n.push(t)}),n})(Array.isArray(t)?t:[t]).forEach(function(t){var r="string"!=typeof t||o?t:{};if("string"==typeof r)h+="".concat(r,"\n");else if(r._keyframe)v(r);else{var u=g.reduce(function(e,t){var n;return(null==t||null===(n=t.visit)||void 0===n?void 0:n.call(t,e))||e},r);Object.keys(u).forEach(function(t){var r=u[t];if("object"!==(0,w.Z)(r)||!r||"animationName"===t&&r._keyframe||"object"===(0,w.Z)(r)&&r&&("_skip_check_"in r||eA in r)){function p(e,t){var n=e.replace(/[A-Z]/g,function(e){return"-".concat(e.toLowerCase())}),r=t;J[e]||"number"!=typeof r||0===r||(r="".concat(r,"px")),"animationName"===e&&null!=t&&t._keyframe&&(v(t),r=t.getName(d)),h+="".concat(n,":").concat(r,";")}var m,g=null!==(m=null==r?void 0:r.value)&&void 0!==m?m:r;"object"===(0,w.Z)(r)&&null!=r&&r[eA]&&Array.isArray(g)?g.forEach(function(e){p(t,e)}):p(t,g)}else{var y=!1,E=t.trim(),S=!1;(o||a)&&d?E.startsWith("@")?y=!0:E=function(e,t,n){if(!t)return e;var r=".".concat(t),o="low"===n?":where(".concat(r,")"):r;return e.split(",").map(function(e){var t,n=e.trim().split(/\s+/),r=n[0]||"",a=(null===(t=r.match(/^\w+/))||void 0===t?void 0:t[0])||"";return[r="".concat(a).concat(o).concat(r.slice(a.length))].concat((0,l.Z)(n.slice(1))).join(" ")}).join(",")}(t,d,f):o&&!d&&("&"===E||""===E)&&(E="",S=!0);var x=e(r,n,{root:S,injectHash:y,parentSelectors:[].concat((0,l.Z)(c),[E])}),O=(0,i.Z)(x,2),k=O[0],C=O[1];b=(0,s.Z)((0,s.Z)({},b),C),h+="".concat(E).concat(k)}})}}),o){if(p&&(void 0===D&&(D=function(e,t,n){if((0,S.Z)()){(0,u.hq)(e,M);var r,o,a=document.createElement("div");a.style.position="fixed",a.style.left="0",a.style.top="0",null==t||t(a),document.body.appendChild(a);var i=n?n(a):null===(r=getComputedStyle(a).content)||void 0===r?void 0:r.includes(L);return null===(o=a.parentNode)||void 0===o||o.removeChild(a),(0,u.jL)(M),i}return!1}("@layer ".concat(M," { .").concat(M,' { content: "').concat(L,'"!important; } }'),function(e){e.className=M})),D)){var y=p.split(","),E=y[y.length-1].trim();h="@layer ".concat(E," {").concat(h,"}"),y.length>1&&(h="@layer ".concat(p,"{%%%:%}").concat(h))}}else h="{".concat(h,"}");return[h,b]};function eR(e,t){return c("".concat(e.join("%")).concat(t))}function e_(){return null}var eP="style";function eM(e,t){var n=e.token,o=e.path,s=e.hashId,c=e.layer,p=e.nonce,f=e.clientOnly,m=e.order,g=void 0===m?0:m,h=d.useContext(E),w=h.autoClear,x=(h.mock,h.defaultCache),O=h.hashPriority,k=h.container,C=h.ssrInline,T=h.transformers,A=h.linters,N=h.cache,I=n._tokenKey,R=[I].concat((0,l.Z)(o)),_=$(eP,R,function(){var e=R.join("|");if(!function(){if(!r&&(r={},(0,S.Z)())){var e,t=document.createElement("div");t.className=ek,t.style.position="fixed",t.style.visibility="hidden",t.style.top="-9999px",document.body.appendChild(t);var n=getComputedStyle(t).content||"";(n=n.replace(/^"/,"").replace(/"$/,"")).split(";").forEach(function(e){var t=e.split(":"),n=(0,i.Z)(t,2),o=n[0],a=n[1];r[o]=a});var o=document.querySelector("style[".concat(ek,"]"));o&&(eT=!1,null===(e=o.parentNode)||void 0===e||e.removeChild(o)),document.body.removeChild(t)}}(),r[e]){var n=function(e){var t=r[e],n=null;if(t&&(0,S.Z)()){if(eT)n=eC;else{var o=document.querySelector("style[".concat(v,'="').concat(r[e],'"]'));o?n=o.innerHTML:delete r[e]}}return[n,t]}(e),a=(0,i.Z)(n,2),l=a[0],u=a[1];if(l)return[l,I,u,{},f,g]}var d=eI(t(),{hashId:s,hashPriority:O,layer:c,path:o.join("-"),transformers:T,linters:A}),p=(0,i.Z)(d,2),m=p[0],h=p[1],b=eN(m),y=eR(R,b);return[b,I,y,h,f,g]},function(e,t){var n=(0,i.Z)(e,3)[2];(t||w)&&j&&(0,u.jL)(n,{mark:v})},function(e){var t=(0,i.Z)(e,4),n=t[0],r=(t[1],t[2]),o=t[3];if(j&&n!==eC){var a={mark:v,prepend:"queue",attachTo:k,priority:g},l="function"==typeof p?p():p;l&&(a.csp={nonce:l});var s=(0,u.hq)(n,r,a);s[y]=N.instanceId,s.setAttribute(b,I),Object.keys(o).forEach(function(e){(0,u.hq)(eN(o[e]),"_effect-".concat(e),a)})}}),P=(0,i.Z)(_,3),M=P[0],L=P[1],D=P[2];return function(e){var t,n;return t=C&&!j&&x?d.createElement("style",(0,Q.Z)({},(n={},(0,a.Z)(n,b,L),(0,a.Z)(n,v,D),n),{dangerouslySetInnerHTML:{__html:M}})):d.createElement(e_,null),d.createElement(d.Fragment,null,t,e)}}var eL="cssVar",eD=function(e,t){var n=e.key,r=e.prefix,o=e.unitless,a=e.ignore,s=e.token,c=e.scope,p=void 0===c?"":c,f=(0,d.useContext)(E),m=f.cache.instanceId,g=f.container,h=s._tokenKey,w=[].concat((0,l.Z)(e.path),[n,p,h]);return $(eL,w,function(){var e=U(t(),n,{prefix:r,unitless:o,ignore:a,scope:p}),l=(0,i.Z)(e,2),s=l[0],c=l[1],u=eR(w,c);return[s,c,u,n]},function(e){var t=(0,i.Z)(e,3)[2];j&&(0,u.jL)(t,{mark:v})},function(e){var t=(0,i.Z)(e,3),r=t[1],o=t[2];if(r){var a=(0,u.hq)(r,o,{mark:v,prepend:"queue",attachTo:g,priority:-999});a[y]=m,a.setAttribute(b,n)}})};o={},(0,a.Z)(o,eP,function(e,t,n){var r=(0,i.Z)(e,6),o=r[0],a=r[1],l=r[2],s=r[3],c=r[4],u=r[5],d=(n||{}).plain;if(c)return null;var p=o,f={"data-rc-order":"prependQueue","data-rc-priority":"".concat(u)};return p=B(o,a,l,f,d),s&&Object.keys(s).forEach(function(e){if(!t[e]){t[e]=!0;var n=eN(s[e]);p+=B(n,a,"_effect-".concat(e),f,d)}}),[u,l,p]}),(0,a.Z)(o,K,function(e,t,n){var r=(0,i.Z)(e,5),o=r[2],a=r[3],l=r[4],s=(n||{}).plain;if(!a)return null;var c=o._tokenKey,u=B(a,l,c,{"data-rc-order":"prependQueue","data-rc-priority":"".concat(-999)},s);return[-999,c,u]}),(0,a.Z)(o,eL,function(e,t,n){var r=(0,i.Z)(e,4),o=r[1],a=r[2],l=r[3],s=(n||{}).plain;if(!o)return null;var c=B(o,l,a,{"data-rc-order":"prependQueue","data-rc-priority":"".concat(-999)},s);return[-999,a,c]});var ej=function(){function e(t,n){(0,f.Z)(this,e),(0,a.Z)(this,"name",void 0),(0,a.Z)(this,"style",void 0),(0,a.Z)(this,"_keyframe",!0),this.name=t,this.style=n}return(0,m.Z)(e,[{key:"getName",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return e?"".concat(e,"-").concat(this.name):this.name}}]),e}();function eF(e){return e.notSplit=!0,e}eF(["borderTop","borderBottom"]),eF(["borderTop"]),eF(["borderBottom"]),eF(["borderLeft","borderRight"]),eF(["borderLeft"]),eF(["borderRight"])},60688:function(e,t,n){n.d(t,{Z:function(){return A}});var r=n(14749),o=n(80406),a=n(50833),i=n(60635),l=n(64090),s=n(16480),c=n.n(s),u=n(12215),d=n(67689),p=n(5239),f=n(6976),m=n(24050),g=n(74687),h=n(53850);function b(e){return"object"===(0,f.Z)(e)&&"string"==typeof e.name&&"string"==typeof e.theme&&("object"===(0,f.Z)(e.icon)||"function"==typeof e.icon)}function v(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return Object.keys(e).reduce(function(t,n){var r=e[n];return"class"===n?(t.className=r,delete t.class):(delete t[n],t[n.replace(/-(.)/g,function(e,t){return t.toUpperCase()})]=r),t},{})}function y(e){return(0,u.R_)(e)[0]}function E(e){return e?Array.isArray(e)?e:[e]:[]}var w=function(e){var t=(0,l.useContext)(d.Z),n=t.csp,r=t.prefixCls,o="\n.anticon {\n display: inline-block;\n color: inherit;\n font-style: normal;\n line-height: 0;\n text-align: center;\n text-transform: none;\n vertical-align: -0.125em;\n text-rendering: optimizeLegibility;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n\n.anticon > * {\n line-height: 1;\n}\n\n.anticon svg {\n display: inline-block;\n}\n\n.anticon::before {\n display: none;\n}\n\n.anticon .anticon-icon {\n display: block;\n}\n\n.anticon[tabindex] {\n cursor: pointer;\n}\n\n.anticon-spin::before,\n.anticon-spin {\n display: inline-block;\n -webkit-animation: loadingCircle 1s infinite linear;\n animation: loadingCircle 1s infinite linear;\n}\n\n@-webkit-keyframes loadingCircle {\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n\n@keyframes loadingCircle {\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n";r&&(o=o.replace(/anticon/g,r)),(0,l.useEffect)(function(){var t=e.current,r=(0,g.A)(t);(0,m.hq)(o,"@ant-design-icons",{prepend:!0,csp:n,attachTo:r})},[])},S=["icon","className","onClick","style","primaryColor","secondaryColor"],x={primaryColor:"#333",secondaryColor:"#E6E6E6",calculated:!1},O=function(e){var t,n,r=e.icon,o=e.className,a=e.onClick,s=e.style,c=e.primaryColor,u=e.secondaryColor,d=(0,i.Z)(e,S),f=l.useRef(),m=x;if(c&&(m={primaryColor:c,secondaryColor:u||y(c)}),w(f),t=b(r),n="icon should be icon definiton, but got ".concat(r),(0,h.ZP)(t,"[@ant-design/icons] ".concat(n)),!b(r))return null;var g=r;return g&&"function"==typeof g.icon&&(g=(0,p.Z)((0,p.Z)({},g),{},{icon:g.icon(m.primaryColor,m.secondaryColor)})),function e(t,n,r){return r?l.createElement(t.tag,(0,p.Z)((0,p.Z)({key:n},v(t.attrs)),r),(t.children||[]).map(function(r,o){return e(r,"".concat(n,"-").concat(t.tag,"-").concat(o))})):l.createElement(t.tag,(0,p.Z)({key:n},v(t.attrs)),(t.children||[]).map(function(r,o){return e(r,"".concat(n,"-").concat(t.tag,"-").concat(o))}))}(g.icon,"svg-".concat(g.name),(0,p.Z)((0,p.Z)({className:o,onClick:a,style:s,"data-icon":g.name,width:"1em",height:"1em",fill:"currentColor","aria-hidden":"true"},d),{},{ref:f}))};function k(e){var t=E(e),n=(0,o.Z)(t,2),r=n[0],a=n[1];return O.setTwoToneColors({primaryColor:r,secondaryColor:a})}O.displayName="IconReact",O.getTwoToneColors=function(){return(0,p.Z)({},x)},O.setTwoToneColors=function(e){var t=e.primaryColor,n=e.secondaryColor;x.primaryColor=t,x.secondaryColor=n||y(t),x.calculated=!!n};var C=["className","icon","spin","rotate","tabIndex","onClick","twoToneColor"];k(u.iN.primary);var T=l.forwardRef(function(e,t){var n,s=e.className,u=e.icon,p=e.spin,f=e.rotate,m=e.tabIndex,g=e.onClick,h=e.twoToneColor,b=(0,i.Z)(e,C),v=l.useContext(d.Z),y=v.prefixCls,w=void 0===y?"anticon":y,S=v.rootClassName,x=c()(S,w,(n={},(0,a.Z)(n,"".concat(w,"-").concat(u.name),!!u.name),(0,a.Z)(n,"".concat(w,"-spin"),!!p||"loading"===u.name),n),s),k=m;void 0===k&&g&&(k=-1);var T=E(h),A=(0,o.Z)(T,2),N=A[0],I=A[1];return l.createElement("span",(0,r.Z)({role:"img","aria-label":u.name},b,{ref:t,tabIndex:k,onClick:g,className:x}),l.createElement(O,{icon:u,primaryColor:N,secondaryColor:I,style:f?{msTransform:"rotate(".concat(f,"deg)"),transform:"rotate(".concat(f,"deg)")}:void 0}))});T.displayName="AntdIcon",T.getTwoToneColor=function(){var e=O.getTwoToneColors();return e.calculated?[e.primaryColor,e.secondaryColor]:e.primaryColor},T.setTwoToneColor=k;var A=T},67689:function(e,t,n){var r=(0,n(64090).createContext)({});t.Z=r},99537:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm193.5 301.7l-210.6 292a31.8 31.8 0 01-51.7 0L318.5 484.9c-3.8-5.3 0-12.7 6.5-12.7h46.9c10.2 0 19.9 4.9 25.9 13.3l71.2 98.8 157.2-218c6-8.3 15.6-13.3 25.9-13.3H699c6.5 0 10.3 7.4 6.5 12.7z"}}]},name:"check-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},90507:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M912 190h-69.9c-9.8 0-19.1 4.5-25.1 12.2L404.7 724.5 207 474a32 32 0 00-25.1-12.2H112c-6.7 0-10.4 7.7-6.3 12.9l273.9 347c12.8 16.2 37.4 16.2 50.3 0l488.4-618.9c4.1-5.1.4-12.8-6.3-12.8z"}}]},name:"check",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},77136:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{"fill-rule":"evenodd",viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64c247.4 0 448 200.6 448 448S759.4 960 512 960 64 759.4 64 512 264.6 64 512 64zm127.98 274.82h-.04l-.08.06L512 466.75 384.14 338.88c-.04-.05-.06-.06-.08-.06a.12.12 0 00-.07 0c-.03 0-.05.01-.09.05l-45.02 45.02a.2.2 0 00-.05.09.12.12 0 000 .07v.02a.27.27 0 00.06.06L466.75 512 338.88 639.86c-.05.04-.06.06-.06.08a.12.12 0 000 .07c0 .03.01.05.05.09l45.02 45.02a.2.2 0 00.09.05.12.12 0 00.07 0c.02 0 .04-.01.08-.05L512 557.25l127.86 127.87c.04.04.06.05.08.05a.12.12 0 00.07 0c.03 0 .05-.01.09-.05l45.02-45.02a.2.2 0 00.05-.09.12.12 0 000-.07v-.02a.27.27 0 00-.05-.06L557.25 512l127.87-127.86c.04-.04.05-.06.05-.08a.12.12 0 000-.07c0-.03-.01-.05-.05-.09l-45.02-45.02a.2.2 0 00-.09-.05.12.12 0 00-.07 0z"}}]},name:"close-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},81303:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{"fill-rule":"evenodd",viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M799.86 166.31c.02 0 .04.02.08.06l57.69 57.7c.04.03.05.05.06.08a.12.12 0 010 .06c0 .03-.02.05-.06.09L569.93 512l287.7 287.7c.04.04.05.06.06.09a.12.12 0 010 .07c0 .02-.02.04-.06.08l-57.7 57.69c-.03.04-.05.05-.07.06a.12.12 0 01-.07 0c-.03 0-.05-.02-.09-.06L512 569.93l-287.7 287.7c-.04.04-.06.05-.09.06a.12.12 0 01-.07 0c-.02 0-.04-.02-.08-.06l-57.69-57.7c-.04-.03-.05-.05-.06-.07a.12.12 0 010-.07c0-.03.02-.05.06-.09L454.07 512l-287.7-287.7c-.04-.04-.05-.06-.06-.09a.12.12 0 010-.07c0-.02.02-.04.06-.08l57.7-57.69c.03-.04.05-.05.07-.06a.12.12 0 01.07 0c.03 0 .05.02.09.06L512 454.07l287.7-287.7c.04-.04.06-.05.09-.06a.12.12 0 01.07 0z"}}]},name:"close",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},20383:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M884 256h-75c-5.1 0-9.9 2.5-12.9 6.6L512 654.2 227.9 262.6c-3-4.1-7.8-6.6-12.9-6.6h-75c-6.5 0-10.3 7.4-6.5 12.7l352.6 486.1c12.8 17.6 39 17.6 51.7 0l352.6-486.1c3.9-5.3.1-12.7-6.4-12.7z"}}]},name:"down",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},31413:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M176 511a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0z"}}]},name:"ellipsis",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},20653:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm-32 232c0-4.4 3.6-8 8-8h48c4.4 0 8 3.6 8 8v272c0 4.4-3.6 8-8 8h-48c-4.4 0-8-3.6-8-8V296zm32 440a48.01 48.01 0 010-96 48.01 48.01 0 010 96z"}}]},name:"exclamation-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},41311:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M942.2 486.2C847.4 286.5 704.1 186 512 186c-192.2 0-335.4 100.5-430.2 300.3a60.3 60.3 0 000 51.5C176.6 737.5 319.9 838 512 838c192.2 0 335.4-100.5 430.2-300.3 7.7-16.2 7.7-35 0-51.5zM512 766c-161.3 0-279.4-81.8-362.7-254C232.6 339.8 350.7 258 512 258c161.3 0 279.4 81.8 362.7 254C791.5 684.2 673.4 766 512 766zm-4-430c-97.2 0-176 78.8-176 176s78.8 176 176 176 176-78.8 176-176-78.8-176-176-176zm0 288c-61.9 0-112-50.1-112-112s50.1-112 112-112 112 50.1 112 112-50.1 112-112 112z"}}]},name:"eye",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},40388:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm32 664c0 4.4-3.6 8-8 8h-48c-4.4 0-8-3.6-8-8V456c0-4.4 3.6-8 8-8h48c4.4 0 8 3.6 8 8v272zm-32-344a48.01 48.01 0 010-96 48.01 48.01 0 010 96z"}}]},name:"info-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},66155:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"0 0 1024 1024",focusable:"false"},children:[{tag:"path",attrs:{d:"M988 548c-19.9 0-36-16.1-36-36 0-59.4-11.6-117-34.6-171.3a440.45 440.45 0 00-94.3-139.9 437.71 437.71 0 00-139.9-94.3C629 83.6 571.4 72 512 72c-19.9 0-36-16.1-36-36s16.1-36 36-36c69.1 0 136.2 13.5 199.3 40.3C772.3 66 827 103 874 150c47 47 83.9 101.8 109.7 162.7 26.7 63.1 40.2 130.2 40.2 199.3.1 19.9-16 36-35.9 36z"}}]},name:"loading",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},50459:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M765.7 486.8L314.9 134.7A7.97 7.97 0 00302 141v77.3c0 4.9 2.3 9.6 6.1 12.6l360 281.1-360 281.1c-3.9 3-6.1 7.7-6.1 12.6V883c0 6.7 7.7 10.4 12.9 6.3l450.8-352.1a31.96 31.96 0 000-50.4z"}}]},name:"right",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},96871:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M909.6 854.5L649.9 594.8C690.2 542.7 712 479 712 412c0-80.2-31.3-155.4-87.9-212.1-56.6-56.7-132-87.9-212.1-87.9s-155.5 31.3-212.1 87.9C143.2 256.5 112 331.8 112 412c0 80.1 31.3 155.5 87.9 212.1C256.5 680.8 331.8 712 412 712c67 0 130.6-21.8 182.7-62l259.7 259.6a8.2 8.2 0 0011.6 0l43.6-43.5a8.2 8.2 0 000-11.6zM570.4 570.4C528 612.7 471.8 636 412 636s-116-23.3-158.4-65.6C211.3 528 188 471.8 188 412s23.3-116.1 65.6-158.4C296 211.3 352.2 188 412 188s116.1 23.2 158.4 65.6S636 352.2 636 412s-23.3 116.1-65.6 158.4z"}}]},name:"search",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},97766:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M400 317.7h73.9V656c0 4.4 3.6 8 8 8h60c4.4 0 8-3.6 8-8V317.7H624c6.7 0 10.4-7.7 6.3-12.9L518.3 163a8 8 0 00-12.6 0l-112 141.7c-4.1 5.3-.4 13 6.3 13zM878 626h-60c-4.4 0-8 3.6-8 8v154H214V634c0-4.4-3.6-8-8-8h-60c-4.4 0-8 3.6-8 8v198c0 17.7 14.3 32 32 32h684c17.7 0 32-14.3 32-32V634c0-4.4-3.6-8-8-8z"}}]},name:"upload",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},41785:function(e,t,n){n.d(t,{T6:function(){return p},VD:function(){return f},WE:function(){return c},Yt:function(){return m},lC:function(){return a},py:function(){return s},rW:function(){return o},s:function(){return d},ve:function(){return l},vq:function(){return u}});var r=n(27974);function o(e,t,n){return{r:255*(0,r.sh)(e,255),g:255*(0,r.sh)(t,255),b:255*(0,r.sh)(n,255)}}function a(e,t,n){var o=Math.max(e=(0,r.sh)(e,255),t=(0,r.sh)(t,255),n=(0,r.sh)(n,255)),a=Math.min(e,t,n),i=0,l=0,s=(o+a)/2;if(o===a)l=0,i=0;else{var c=o-a;switch(l=s>.5?c/(2-o-a):c/(o+a),o){case e:i=(t-n)/c+(t1&&(n-=1),n<1/6)?e+6*n*(t-e):n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}function l(e,t,n){if(e=(0,r.sh)(e,360),t=(0,r.sh)(t,100),n=(0,r.sh)(n,100),0===t)a=n,l=n,o=n;else{var o,a,l,s=n<.5?n*(1+t):n+t-n*t,c=2*n-s;o=i(c,s,e+1/3),a=i(c,s,e),l=i(c,s,e-1/3)}return{r:255*o,g:255*a,b:255*l}}function s(e,t,n){var o=Math.max(e=(0,r.sh)(e,255),t=(0,r.sh)(t,255),n=(0,r.sh)(n,255)),a=Math.min(e,t,n),i=0,l=o-a;if(o===a)i=0;else{switch(o){case e:i=(t-n)/l+(t>16,g:(65280&e)>>8,b:255&e}}},6564:function(e,t,n){n.d(t,{R:function(){return r}});var r={aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",goldenrod:"#daa520",gold:"#ffd700",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavenderblush:"#fff0f5",lavender:"#e6e6fa",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",rebeccapurple:"#663399",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"}},76991:function(e,t,n){n.d(t,{uA:function(){return i}});var r=n(41785),o=n(6564),a=n(27974);function i(e){var t={r:0,g:0,b:0},n=1,i=null,l=null,s=null,c=!1,p=!1;return"string"==typeof e&&(e=function(e){if(0===(e=e.trim().toLowerCase()).length)return!1;var t=!1;if(o.R[e])e=o.R[e],t=!0;else if("transparent"===e)return{r:0,g:0,b:0,a:0,format:"name"};var n=u.rgb.exec(e);return n?{r:n[1],g:n[2],b:n[3]}:(n=u.rgba.exec(e))?{r:n[1],g:n[2],b:n[3],a:n[4]}:(n=u.hsl.exec(e))?{h:n[1],s:n[2],l:n[3]}:(n=u.hsla.exec(e))?{h:n[1],s:n[2],l:n[3],a:n[4]}:(n=u.hsv.exec(e))?{h:n[1],s:n[2],v:n[3]}:(n=u.hsva.exec(e))?{h:n[1],s:n[2],v:n[3],a:n[4]}:(n=u.hex8.exec(e))?{r:(0,r.VD)(n[1]),g:(0,r.VD)(n[2]),b:(0,r.VD)(n[3]),a:(0,r.T6)(n[4]),format:t?"name":"hex8"}:(n=u.hex6.exec(e))?{r:(0,r.VD)(n[1]),g:(0,r.VD)(n[2]),b:(0,r.VD)(n[3]),format:t?"name":"hex"}:(n=u.hex4.exec(e))?{r:(0,r.VD)(n[1]+n[1]),g:(0,r.VD)(n[2]+n[2]),b:(0,r.VD)(n[3]+n[3]),a:(0,r.T6)(n[4]+n[4]),format:t?"name":"hex8"}:!!(n=u.hex3.exec(e))&&{r:(0,r.VD)(n[1]+n[1]),g:(0,r.VD)(n[2]+n[2]),b:(0,r.VD)(n[3]+n[3]),format:t?"name":"hex"}}(e)),"object"==typeof e&&(d(e.r)&&d(e.g)&&d(e.b)?(t=(0,r.rW)(e.r,e.g,e.b),c=!0,p="%"===String(e.r).substr(-1)?"prgb":"rgb"):d(e.h)&&d(e.s)&&d(e.v)?(i=(0,a.JX)(e.s),l=(0,a.JX)(e.v),t=(0,r.WE)(e.h,i,l),c=!0,p="hsv"):d(e.h)&&d(e.s)&&d(e.l)&&(i=(0,a.JX)(e.s),s=(0,a.JX)(e.l),t=(0,r.ve)(e.h,i,s),c=!0,p="hsl"),Object.prototype.hasOwnProperty.call(e,"a")&&(n=e.a)),n=(0,a.Yq)(n),{ok:c,format:e.format||p,r:Math.min(255,Math.max(t.r,0)),g:Math.min(255,Math.max(t.g,0)),b:Math.min(255,Math.max(t.b,0)),a:n}}var l="(?:".concat("[-\\+]?\\d*\\.\\d+%?",")|(?:").concat("[-\\+]?\\d+%?",")"),s="[\\s|\\(]+(".concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")\\s*\\)?"),c="[\\s|\\(]+(".concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")\\s*\\)?"),u={CSS_UNIT:new RegExp(l),rgb:RegExp("rgb"+s),rgba:RegExp("rgba"+c),hsl:RegExp("hsl"+s),hsla:RegExp("hsla"+c),hsv:RegExp("hsv"+s),hsva:RegExp("hsva"+c),hex3:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex6:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,hex4:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex8:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/};function d(e){return!!u.CSS_UNIT.exec(String(e))}},6336:function(e,t,n){n.d(t,{C:function(){return l}});var r=n(41785),o=n(6564),a=n(76991),i=n(27974),l=function(){function e(t,n){if(void 0===t&&(t=""),void 0===n&&(n={}),t instanceof e)return t;"number"==typeof t&&(t=(0,r.Yt)(t)),this.originalInput=t;var o,i=(0,a.uA)(t);this.originalInput=t,this.r=i.r,this.g=i.g,this.b=i.b,this.a=i.a,this.roundA=Math.round(100*this.a)/100,this.format=null!==(o=n.format)&&void 0!==o?o:i.format,this.gradientType=n.gradientType,this.r<1&&(this.r=Math.round(this.r)),this.g<1&&(this.g=Math.round(this.g)),this.b<1&&(this.b=Math.round(this.b)),this.isValid=i.ok}return e.prototype.isDark=function(){return 128>this.getBrightness()},e.prototype.isLight=function(){return!this.isDark()},e.prototype.getBrightness=function(){var e=this.toRgb();return(299*e.r+587*e.g+114*e.b)/1e3},e.prototype.getLuminance=function(){var e=this.toRgb(),t=e.r/255,n=e.g/255,r=e.b/255;return .2126*(t<=.03928?t/12.92:Math.pow((t+.055)/1.055,2.4))+.7152*(n<=.03928?n/12.92:Math.pow((n+.055)/1.055,2.4))+.0722*(r<=.03928?r/12.92:Math.pow((r+.055)/1.055,2.4))},e.prototype.getAlpha=function(){return this.a},e.prototype.setAlpha=function(e){return this.a=(0,i.Yq)(e),this.roundA=Math.round(100*this.a)/100,this},e.prototype.isMonochrome=function(){return 0===this.toHsl().s},e.prototype.toHsv=function(){var e=(0,r.py)(this.r,this.g,this.b);return{h:360*e.h,s:e.s,v:e.v,a:this.a}},e.prototype.toHsvString=function(){var e=(0,r.py)(this.r,this.g,this.b),t=Math.round(360*e.h),n=Math.round(100*e.s),o=Math.round(100*e.v);return 1===this.a?"hsv(".concat(t,", ").concat(n,"%, ").concat(o,"%)"):"hsva(".concat(t,", ").concat(n,"%, ").concat(o,"%, ").concat(this.roundA,")")},e.prototype.toHsl=function(){var e=(0,r.lC)(this.r,this.g,this.b);return{h:360*e.h,s:e.s,l:e.l,a:this.a}},e.prototype.toHslString=function(){var e=(0,r.lC)(this.r,this.g,this.b),t=Math.round(360*e.h),n=Math.round(100*e.s),o=Math.round(100*e.l);return 1===this.a?"hsl(".concat(t,", ").concat(n,"%, ").concat(o,"%)"):"hsla(".concat(t,", ").concat(n,"%, ").concat(o,"%, ").concat(this.roundA,")")},e.prototype.toHex=function(e){return void 0===e&&(e=!1),(0,r.vq)(this.r,this.g,this.b,e)},e.prototype.toHexString=function(e){return void 0===e&&(e=!1),"#"+this.toHex(e)},e.prototype.toHex8=function(e){return void 0===e&&(e=!1),(0,r.s)(this.r,this.g,this.b,this.a,e)},e.prototype.toHex8String=function(e){return void 0===e&&(e=!1),"#"+this.toHex8(e)},e.prototype.toHexShortString=function(e){return void 0===e&&(e=!1),1===this.a?this.toHexString(e):this.toHex8String(e)},e.prototype.toRgb=function(){return{r:Math.round(this.r),g:Math.round(this.g),b:Math.round(this.b),a:this.a}},e.prototype.toRgbString=function(){var e=Math.round(this.r),t=Math.round(this.g),n=Math.round(this.b);return 1===this.a?"rgb(".concat(e,", ").concat(t,", ").concat(n,")"):"rgba(".concat(e,", ").concat(t,", ").concat(n,", ").concat(this.roundA,")")},e.prototype.toPercentageRgb=function(){var e=function(e){return"".concat(Math.round(100*(0,i.sh)(e,255)),"%")};return{r:e(this.r),g:e(this.g),b:e(this.b),a:this.a}},e.prototype.toPercentageRgbString=function(){var e=function(e){return Math.round(100*(0,i.sh)(e,255))};return 1===this.a?"rgb(".concat(e(this.r),"%, ").concat(e(this.g),"%, ").concat(e(this.b),"%)"):"rgba(".concat(e(this.r),"%, ").concat(e(this.g),"%, ").concat(e(this.b),"%, ").concat(this.roundA,")")},e.prototype.toName=function(){if(0===this.a)return"transparent";if(this.a<1)return!1;for(var e="#"+(0,r.vq)(this.r,this.g,this.b,!1),t=0,n=Object.entries(o.R);t=0;return!t&&r&&(e.startsWith("hex")||"name"===e)?"name"===e&&0===this.a?this.toName():this.toRgbString():("rgb"===e&&(n=this.toRgbString()),"prgb"===e&&(n=this.toPercentageRgbString()),("hex"===e||"hex6"===e)&&(n=this.toHexString()),"hex3"===e&&(n=this.toHexString(!0)),"hex4"===e&&(n=this.toHex8String(!0)),"hex8"===e&&(n=this.toHex8String()),"name"===e&&(n=this.toName()),"hsl"===e&&(n=this.toHslString()),"hsv"===e&&(n=this.toHsvString()),n||this.toHexString())},e.prototype.toNumber=function(){return(Math.round(this.r)<<16)+(Math.round(this.g)<<8)+Math.round(this.b)},e.prototype.clone=function(){return new e(this.toString())},e.prototype.lighten=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.l+=t/100,n.l=(0,i.V2)(n.l),new e(n)},e.prototype.brighten=function(t){void 0===t&&(t=10);var n=this.toRgb();return n.r=Math.max(0,Math.min(255,n.r-Math.round(-(t/100*255)))),n.g=Math.max(0,Math.min(255,n.g-Math.round(-(t/100*255)))),n.b=Math.max(0,Math.min(255,n.b-Math.round(-(t/100*255)))),new e(n)},e.prototype.darken=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.l-=t/100,n.l=(0,i.V2)(n.l),new e(n)},e.prototype.tint=function(e){return void 0===e&&(e=10),this.mix("white",e)},e.prototype.shade=function(e){return void 0===e&&(e=10),this.mix("black",e)},e.prototype.desaturate=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.s-=t/100,n.s=(0,i.V2)(n.s),new e(n)},e.prototype.saturate=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.s+=t/100,n.s=(0,i.V2)(n.s),new e(n)},e.prototype.greyscale=function(){return this.desaturate(100)},e.prototype.spin=function(t){var n=this.toHsl(),r=(n.h+t)%360;return n.h=r<0?360+r:r,new e(n)},e.prototype.mix=function(t,n){void 0===n&&(n=50);var r=this.toRgb(),o=new e(t).toRgb(),a=n/100;return new e({r:(o.r-r.r)*a+r.r,g:(o.g-r.g)*a+r.g,b:(o.b-r.b)*a+r.b,a:(o.a-r.a)*a+r.a})},e.prototype.analogous=function(t,n){void 0===t&&(t=6),void 0===n&&(n=30);var r=this.toHsl(),o=360/n,a=[this];for(r.h=(r.h-(o*t>>1)+720)%360;--t;)r.h=(r.h+o)%360,a.push(new e(r));return a},e.prototype.complement=function(){var t=this.toHsl();return t.h=(t.h+180)%360,new e(t)},e.prototype.monochromatic=function(t){void 0===t&&(t=6);for(var n=this.toHsv(),r=n.h,o=n.s,a=n.v,i=[],l=1/t;t--;)i.push(new e({h:r,s:o,v:a})),a=(a+l)%1;return i},e.prototype.splitcomplement=function(){var t=this.toHsl(),n=t.h;return[this,new e({h:(n+72)%360,s:t.s,l:t.l}),new e({h:(n+216)%360,s:t.s,l:t.l})]},e.prototype.onBackground=function(t){var n=this.toRgb(),r=new e(t).toRgb(),o=n.a+r.a*(1-n.a);return new e({r:(n.r*n.a+r.r*r.a*(1-n.a))/o,g:(n.g*n.a+r.g*r.a*(1-n.a))/o,b:(n.b*n.a+r.b*r.a*(1-n.a))/o,a:o})},e.prototype.triad=function(){return this.polyad(3)},e.prototype.tetrad=function(){return this.polyad(4)},e.prototype.polyad=function(t){for(var n=this.toHsl(),r=n.h,o=[this],a=360/t,i=1;iMath.abs(e-t))?1:e=360===t?(e<0?e%t+t:e%t)/parseFloat(String(t)):e%t/parseFloat(String(t))}function o(e){return Math.min(1,Math.max(0,e))}function a(e){return(isNaN(e=parseFloat(e))||e<0||e>1)&&(e=1),e}function i(e){return e<=1?"".concat(100*Number(e),"%"):e}function l(e){return 1===e.length?"0"+e:String(e)}n.d(t,{FZ:function(){return l},JX:function(){return i},V2:function(){return o},Yq:function(){return a},sh:function(){return r}})},88804:function(e,t,n){n.d(t,{Z:function(){return y}});var r,o=n(80406),a=n(64090),i=n(89542),l=n(22127);n(53850);var s=n(74084),c=a.createContext(null),u=n(63787),d=n(24800),p=[],f=n(24050);function m(e){var t=e.match(/^(.*)px$/),n=Number(null==t?void 0:t[1]);return Number.isNaN(n)?function(e){if("undefined"==typeof document)return 0;if(void 0===r){var t=document.createElement("div");t.style.width="100%",t.style.height="200px";var n=document.createElement("div"),o=n.style;o.position="absolute",o.top="0",o.left="0",o.pointerEvents="none",o.visibility="hidden",o.width="200px",o.height="150px",o.overflow="hidden",n.appendChild(t),document.body.appendChild(n);var a=t.offsetWidth;n.style.overflow="scroll";var i=t.offsetWidth;a===i&&(i=n.clientWidth),document.body.removeChild(n),r=a-i}return r}():n}var g="rc-util-locker-".concat(Date.now()),h=0,b=!1,v=function(e){return!1!==e&&((0,l.Z)()&&e?"string"==typeof e?document.querySelector(e):"function"==typeof e?e():e:null)},y=a.forwardRef(function(e,t){var n,r,y,E,w=e.open,S=e.autoLock,x=e.getContainer,O=(e.debug,e.autoDestroy),k=void 0===O||O,C=e.children,T=a.useState(w),A=(0,o.Z)(T,2),N=A[0],I=A[1],R=N||w;a.useEffect(function(){(k||w)&&I(w)},[w,k]);var _=a.useState(function(){return v(x)}),P=(0,o.Z)(_,2),M=P[0],L=P[1];a.useEffect(function(){var e=v(x);L(null!=e?e:null)});var D=function(e,t){var n=a.useState(function(){return(0,l.Z)()?document.createElement("div"):null}),r=(0,o.Z)(n,1)[0],i=a.useRef(!1),s=a.useContext(c),f=a.useState(p),m=(0,o.Z)(f,2),g=m[0],h=m[1],b=s||(i.current?void 0:function(e){h(function(t){return[e].concat((0,u.Z)(t))})});function v(){r.parentElement||document.body.appendChild(r),i.current=!0}function y(){var e;null===(e=r.parentElement)||void 0===e||e.removeChild(r),i.current=!1}return(0,d.Z)(function(){return e?s?s(v):v():y(),y},[e]),(0,d.Z)(function(){g.length&&(g.forEach(function(e){return e()}),h(p))},[g]),[r,b]}(R&&!M,0),j=(0,o.Z)(D,2),F=j[0],B=j[1],Z=null!=M?M:F;n=!!(S&&w&&(0,l.Z)()&&(Z===F||Z===document.body)),r=a.useState(function(){return h+=1,"".concat(g,"_").concat(h)}),y=(0,o.Z)(r,1)[0],(0,d.Z)(function(){if(n){var e=function(e){if("undefined"==typeof document||!e||!(e instanceof Element))return{width:0,height:0};var t=getComputedStyle(e,"::-webkit-scrollbar"),n=t.width,r=t.height;return{width:m(n),height:m(r)}}(document.body).width,t=document.body.scrollHeight>(window.innerHeight||document.documentElement.clientHeight)&&window.innerWidth>document.body.offsetWidth;(0,f.hq)("\nhtml body {\n overflow-y: hidden;\n ".concat(t?"width: calc(100% - ".concat(e,"px);"):"","\n}"),y)}else(0,f.jL)(y);return function(){(0,f.jL)(y)}},[n,y]);var U=null;C&&(0,s.Yr)(C)&&t&&(U=C.ref);var z=(0,s.x1)(U,t);if(!R||!(0,l.Z)()||void 0===M)return null;var H=!1===Z||("boolean"==typeof E&&(b=E),b),G=C;return t&&(G=a.cloneElement(C,{ref:z})),a.createElement(c.Provider,{value:B},H?G:(0,i.createPortal)(G,Z))})},44101:function(e,t,n){n.d(t,{Z:function(){return z}});var r=n(5239),o=n(80406),a=n(60635),i=n(88804),l=n(16480),s=n.n(l),c=n(46505),u=n(97472),d=n(74687),p=n(54811),f=n(91010),m=n(24800),g=n(76158),h=n(64090),b=n(14749),v=n(49367),y=n(74084);function E(e){var t=e.prefixCls,n=e.align,r=e.arrow,o=e.arrowPos,a=r||{},i=a.className,l=a.content,c=o.x,u=o.y,d=h.useRef();if(!n||!n.points)return null;var p={position:"absolute"};if(!1!==n.autoArrow){var f=n.points[0],m=n.points[1],g=f[0],b=f[1],v=m[0],y=m[1];g!==v&&["t","b"].includes(g)?"t"===g?p.top=0:p.bottom=0:p.top=void 0===u?0:u,b!==y&&["l","r"].includes(b)?"l"===b?p.left=0:p.right=0:p.left=void 0===c?0:c}return h.createElement("div",{ref:d,className:s()("".concat(t,"-arrow"),i),style:p},l)}function w(e){var t=e.prefixCls,n=e.open,r=e.zIndex,o=e.mask,a=e.motion;return o?h.createElement(v.ZP,(0,b.Z)({},a,{motionAppear:!0,visible:n,removeOnLeave:!0}),function(e){var n=e.className;return h.createElement("div",{style:{zIndex:r},className:s()("".concat(t,"-mask"),n)})}):null}var S=h.memo(function(e){return e.children},function(e,t){return t.cache}),x=h.forwardRef(function(e,t){var n=e.popup,a=e.className,i=e.prefixCls,l=e.style,u=e.target,d=e.onVisibleChanged,p=e.open,f=e.keepDom,g=e.fresh,x=e.onClick,O=e.mask,k=e.arrow,C=e.arrowPos,T=e.align,A=e.motion,N=e.maskMotion,I=e.forceRender,R=e.getPopupContainer,_=e.autoDestroy,P=e.portal,M=e.zIndex,L=e.onMouseEnter,D=e.onMouseLeave,j=e.onPointerEnter,F=e.ready,B=e.offsetX,Z=e.offsetY,U=e.offsetR,z=e.offsetB,H=e.onAlign,G=e.onPrepare,W=e.stretch,$=e.targetWidth,V=e.targetHeight,q="function"==typeof n?n():n,Y=p||f,K=(null==R?void 0:R.length)>0,X=h.useState(!R||!K),Q=(0,o.Z)(X,2),J=Q[0],ee=Q[1];if((0,m.Z)(function(){!J&&K&&u&&ee(!0)},[J,K,u]),!J)return null;var et="auto",en={left:"-1000vw",top:"-1000vh",right:et,bottom:et};if(F||!p){var er,eo=T.points,ea=T.dynamicInset||(null===(er=T._experimental)||void 0===er?void 0:er.dynamicInset),ei=ea&&"r"===eo[0][1],el=ea&&"b"===eo[0][0];ei?(en.right=U,en.left=et):(en.left=B,en.right=et),el?(en.bottom=z,en.top=et):(en.top=Z,en.bottom=et)}var es={};return W&&(W.includes("height")&&V?es.height=V:W.includes("minHeight")&&V&&(es.minHeight=V),W.includes("width")&&$?es.width=$:W.includes("minWidth")&&$&&(es.minWidth=$)),p||(es.pointerEvents="none"),h.createElement(P,{open:I||Y,getContainer:R&&function(){return R(u)},autoDestroy:_},h.createElement(w,{prefixCls:i,open:p,zIndex:M,mask:O,motion:N}),h.createElement(c.Z,{onResize:H,disabled:!p},function(e){return h.createElement(v.ZP,(0,b.Z)({motionAppear:!0,motionEnter:!0,motionLeave:!0,removeOnLeave:!1,forceRender:I,leavedClassName:"".concat(i,"-hidden")},A,{onAppearPrepare:G,onEnterPrepare:G,visible:p,onVisibleChanged:function(e){var t;null==A||null===(t=A.onVisibleChanged)||void 0===t||t.call(A,e),d(e)}}),function(n,o){var c=n.className,u=n.style,d=s()(i,c,a);return h.createElement("div",{ref:(0,y.sQ)(e,t,o),className:d,style:(0,r.Z)((0,r.Z)((0,r.Z)((0,r.Z)({"--arrow-x":"".concat(C.x||0,"px"),"--arrow-y":"".concat(C.y||0,"px")},en),es),u),{},{boxSizing:"border-box",zIndex:M},l),onMouseEnter:L,onMouseLeave:D,onPointerEnter:j,onClick:x},k&&h.createElement(E,{prefixCls:i,arrow:k,arrowPos:C,align:T}),h.createElement(S,{cache:!p&&!g},q))})}))}),O=h.forwardRef(function(e,t){var n=e.children,r=e.getTriggerDOMNode,o=(0,y.Yr)(n),a=h.useCallback(function(e){(0,y.mH)(t,r?r(e):e)},[r]),i=(0,y.x1)(a,n.ref);return o?h.cloneElement(n,{ref:i}):n}),k=h.createContext(null);function C(e){return e?Array.isArray(e)?e:[e]:[]}var T=n(73193);function A(e,t,n,r){return t||(n?{motionName:"".concat(e,"-").concat(n)}:r?{motionName:r}:null)}function N(e){return e.ownerDocument.defaultView}function I(e){for(var t=[],n=null==e?void 0:e.parentElement,r=["hidden","scroll","clip","auto"];n;){var o=N(n).getComputedStyle(n);[o.overflowX,o.overflowY,o.overflow].some(function(e){return r.includes(e)})&&t.push(n),n=n.parentElement}return t}function R(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1;return Number.isNaN(e)?t:e}function _(e){return R(parseFloat(e),0)}function P(e,t){var n=(0,r.Z)({},e);return(t||[]).forEach(function(e){if(!(e instanceof HTMLBodyElement||e instanceof HTMLHtmlElement)){var t=N(e).getComputedStyle(e),r=t.overflow,o=t.overflowClipMargin,a=t.borderTopWidth,i=t.borderBottomWidth,l=t.borderLeftWidth,s=t.borderRightWidth,c=e.getBoundingClientRect(),u=e.offsetHeight,d=e.clientHeight,p=e.offsetWidth,f=e.clientWidth,m=_(a),g=_(i),h=_(l),b=_(s),v=R(Math.round(c.width/p*1e3)/1e3),y=R(Math.round(c.height/u*1e3)/1e3),E=m*y,w=h*v,S=0,x=0;if("clip"===r){var O=_(o);S=O*v,x=O*y}var k=c.x+w-S,C=c.y+E-x,T=k+c.width+2*S-w-b*v-(p-f-h-b)*v,A=C+c.height+2*x-E-g*y-(u-d-m-g)*y;n.left=Math.max(n.left,k),n.top=Math.max(n.top,C),n.right=Math.min(n.right,T),n.bottom=Math.min(n.bottom,A)}}),n}function M(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,n="".concat(t),r=n.match(/^(.*)\%$/);return r?parseFloat(r[1])/100*e:parseFloat(n)}function L(e,t){var n=(0,o.Z)(t||[],2),r=n[0],a=n[1];return[M(e.width,r),M(e.height,a)]}function D(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return[e[0],e[1]]}function j(e,t){var n,r=t[0],o=t[1];return n="t"===r?e.y:"b"===r?e.y+e.height:e.y+e.height/2,{x:"l"===o?e.x:"r"===o?e.x+e.width:e.x+e.width/2,y:n}}function F(e,t){var n={t:"b",b:"t",l:"r",r:"l"};return e.map(function(e,r){return r===t?n[e]||"c":e}).join("")}var B=n(63787);n(53850);var Z=n(19223),U=["prefixCls","children","action","showAction","hideAction","popupVisible","defaultPopupVisible","onPopupVisibleChange","afterPopupVisibleChange","mouseEnterDelay","mouseLeaveDelay","focusDelay","blurDelay","mask","maskClosable","getPopupContainer","forceRender","autoDestroy","destroyPopupOnHide","popup","popupClassName","popupStyle","popupPlacement","builtinPlacements","popupAlign","zIndex","stretch","getPopupClassNameFromAlign","fresh","alignPoint","onPopupClick","onPopupAlign","arrow","popupMotion","maskMotion","popupTransitionName","popupAnimation","maskTransitionName","maskAnimation","className","getTriggerDOMNode"],z=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:i.Z;return h.forwardRef(function(t,n){var i,l,b,v,y,E,w,S,_,M,z,H,G,W,$,V,q,Y=t.prefixCls,K=void 0===Y?"rc-trigger-popup":Y,X=t.children,Q=t.action,J=t.showAction,ee=t.hideAction,et=t.popupVisible,en=t.defaultPopupVisible,er=t.onPopupVisibleChange,eo=t.afterPopupVisibleChange,ea=t.mouseEnterDelay,ei=t.mouseLeaveDelay,el=void 0===ei?.1:ei,es=t.focusDelay,ec=t.blurDelay,eu=t.mask,ed=t.maskClosable,ep=t.getPopupContainer,ef=t.forceRender,em=t.autoDestroy,eg=t.destroyPopupOnHide,eh=t.popup,eb=t.popupClassName,ev=t.popupStyle,ey=t.popupPlacement,eE=t.builtinPlacements,ew=void 0===eE?{}:eE,eS=t.popupAlign,ex=t.zIndex,eO=t.stretch,ek=t.getPopupClassNameFromAlign,eC=t.fresh,eT=t.alignPoint,eA=t.onPopupClick,eN=t.onPopupAlign,eI=t.arrow,eR=t.popupMotion,e_=t.maskMotion,eP=t.popupTransitionName,eM=t.popupAnimation,eL=t.maskTransitionName,eD=t.maskAnimation,ej=t.className,eF=t.getTriggerDOMNode,eB=(0,a.Z)(t,U),eZ=h.useState(!1),eU=(0,o.Z)(eZ,2),ez=eU[0],eH=eU[1];(0,m.Z)(function(){eH((0,g.Z)())},[]);var eG=h.useRef({}),eW=h.useContext(k),e$=h.useMemo(function(){return{registerSubPopup:function(e,t){eG.current[e]=t,null==eW||eW.registerSubPopup(e,t)}}},[eW]),eV=(0,f.Z)(),eq=h.useState(null),eY=(0,o.Z)(eq,2),eK=eY[0],eX=eY[1],eQ=(0,p.Z)(function(e){(0,u.S)(e)&&eK!==e&&eX(e),null==eW||eW.registerSubPopup(eV,e)}),eJ=h.useState(null),e0=(0,o.Z)(eJ,2),e1=e0[0],e2=e0[1],e4=h.useRef(null),e3=(0,p.Z)(function(e){(0,u.S)(e)&&e1!==e&&(e2(e),e4.current=e)}),e6=h.Children.only(X),e5=(null==e6?void 0:e6.props)||{},e8={},e9=(0,p.Z)(function(e){var t,n;return(null==e1?void 0:e1.contains(e))||(null===(t=(0,d.A)(e1))||void 0===t?void 0:t.host)===e||e===e1||(null==eK?void 0:eK.contains(e))||(null===(n=(0,d.A)(eK))||void 0===n?void 0:n.host)===e||e===eK||Object.values(eG.current).some(function(t){return(null==t?void 0:t.contains(e))||e===t})}),e7=A(K,eR,eM,eP),te=A(K,e_,eD,eL),tt=h.useState(en||!1),tn=(0,o.Z)(tt,2),tr=tn[0],to=tn[1],ta=null!=et?et:tr,ti=(0,p.Z)(function(e){void 0===et&&to(e)});(0,m.Z)(function(){to(et||!1)},[et]);var tl=h.useRef(ta);tl.current=ta;var ts=h.useRef([]);ts.current=[];var tc=(0,p.Z)(function(e){var t;ti(e),(null!==(t=ts.current[ts.current.length-1])&&void 0!==t?t:ta)!==e&&(ts.current.push(e),null==er||er(e))}),tu=h.useRef(),td=function(){clearTimeout(tu.current)},tp=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0;td(),0===t?tc(e):tu.current=setTimeout(function(){tc(e)},1e3*t)};h.useEffect(function(){return td},[]);var tf=h.useState(!1),tm=(0,o.Z)(tf,2),tg=tm[0],th=tm[1];(0,m.Z)(function(e){(!e||ta)&&th(!0)},[ta]);var tb=h.useState(null),tv=(0,o.Z)(tb,2),ty=tv[0],tE=tv[1],tw=h.useState([0,0]),tS=(0,o.Z)(tw,2),tx=tS[0],tO=tS[1],tk=function(e){tO([e.clientX,e.clientY])},tC=(i=eT?tx:e1,l=h.useState({ready:!1,offsetX:0,offsetY:0,offsetR:0,offsetB:0,arrowX:0,arrowY:0,scaleX:1,scaleY:1,align:ew[ey]||{}}),v=(b=(0,o.Z)(l,2))[0],y=b[1],E=h.useRef(0),w=h.useMemo(function(){return eK?I(eK):[]},[eK]),S=h.useRef({}),ta||(S.current={}),_=(0,p.Z)(function(){if(eK&&i&&ta){var e,t,n,a,l,s,c,d=eK.ownerDocument,p=N(eK).getComputedStyle(eK),f=p.width,m=p.height,g=p.position,h=eK.style.left,b=eK.style.top,v=eK.style.right,E=eK.style.bottom,x=eK.style.overflow,O=(0,r.Z)((0,r.Z)({},ew[ey]),eS),k=d.createElement("div");if(null===(e=eK.parentElement)||void 0===e||e.appendChild(k),k.style.left="".concat(eK.offsetLeft,"px"),k.style.top="".concat(eK.offsetTop,"px"),k.style.position=g,k.style.height="".concat(eK.offsetHeight,"px"),k.style.width="".concat(eK.offsetWidth,"px"),eK.style.left="0",eK.style.top="0",eK.style.right="auto",eK.style.bottom="auto",eK.style.overflow="hidden",Array.isArray(i))n={x:i[0],y:i[1],width:0,height:0};else{var C=i.getBoundingClientRect();n={x:C.x,y:C.y,width:C.width,height:C.height}}var A=eK.getBoundingClientRect(),I=d.documentElement,_=I.clientWidth,M=I.clientHeight,B=I.scrollWidth,Z=I.scrollHeight,U=I.scrollTop,z=I.scrollLeft,H=A.height,G=A.width,W=n.height,$=n.width,V=O.htmlRegion,q="visible",Y="visibleFirst";"scroll"!==V&&V!==Y&&(V=q);var K=V===Y,X=P({left:-z,top:-U,right:B-z,bottom:Z-U},w),Q=P({left:0,top:0,right:_,bottom:M},w),J=V===q?Q:X,ee=K?Q:J;eK.style.left="auto",eK.style.top="auto",eK.style.right="0",eK.style.bottom="0";var et=eK.getBoundingClientRect();eK.style.left=h,eK.style.top=b,eK.style.right=v,eK.style.bottom=E,eK.style.overflow=x,null===(t=eK.parentElement)||void 0===t||t.removeChild(k);var en=R(Math.round(G/parseFloat(f)*1e3)/1e3),er=R(Math.round(H/parseFloat(m)*1e3)/1e3);if(!(0===en||0===er||(0,u.S)(i)&&!(0,T.Z)(i))){var eo=O.offset,ea=O.targetOffset,ei=L(A,eo),el=(0,o.Z)(ei,2),es=el[0],ec=el[1],eu=L(n,ea),ed=(0,o.Z)(eu,2),ep=ed[0],ef=ed[1];n.x-=ep,n.y-=ef;var em=O.points||[],eg=(0,o.Z)(em,2),eh=eg[0],eb=D(eg[1]),ev=D(eh),eE=j(n,eb),ex=j(A,ev),eO=(0,r.Z)({},O),ek=eE.x-ex.x+es,eC=eE.y-ex.y+ec,eT=tt(ek,eC),eA=tt(ek,eC,Q),eI=j(n,["t","l"]),eR=j(A,["t","l"]),e_=j(n,["b","r"]),eP=j(A,["b","r"]),eM=O.overflow||{},eL=eM.adjustX,eD=eM.adjustY,ej=eM.shiftX,eF=eM.shiftY,eB=function(e){return"boolean"==typeof e?e:e>=0};tn();var eZ=eB(eD),eU=ev[0]===eb[0];if(eZ&&"t"===ev[0]&&(l>ee.bottom||S.current.bt)){var ez=eC;eU?ez-=H-W:ez=eI.y-eP.y-ec;var eH=tt(ek,ez),eG=tt(ek,ez,Q);eH>eT||eH===eT&&(!K||eG>=eA)?(S.current.bt=!0,eC=ez,ec=-ec,eO.points=[F(ev,0),F(eb,0)]):S.current.bt=!1}if(eZ&&"b"===ev[0]&&(aeT||e$===eT&&(!K||eV>=eA)?(S.current.tb=!0,eC=eW,ec=-ec,eO.points=[F(ev,0),F(eb,0)]):S.current.tb=!1}var eq=eB(eL),eY=ev[1]===eb[1];if(eq&&"l"===ev[1]&&(c>ee.right||S.current.rl)){var eX=ek;eY?eX-=G-$:eX=eI.x-eP.x-es;var eQ=tt(eX,eC),eJ=tt(eX,eC,Q);eQ>eT||eQ===eT&&(!K||eJ>=eA)?(S.current.rl=!0,ek=eX,es=-es,eO.points=[F(ev,1),F(eb,1)]):S.current.rl=!1}if(eq&&"r"===ev[1]&&(seT||e1===eT&&(!K||e2>=eA)?(S.current.lr=!0,ek=e0,es=-es,eO.points=[F(ev,1),F(eb,1)]):S.current.lr=!1}tn();var e4=!0===ej?0:ej;"number"==typeof e4&&(sQ.right&&(ek-=c-Q.right-es,n.x>Q.right-e4&&(ek+=n.x-Q.right+e4)));var e3=!0===eF?0:eF;"number"==typeof e3&&(aQ.bottom&&(eC-=l-Q.bottom-ec,n.y>Q.bottom-e3&&(eC+=n.y-Q.bottom+e3)));var e6=A.x+ek,e5=A.y+eC,e8=n.x,e9=n.y;null==eN||eN(eK,eO);var e7=et.right-A.x-(ek+A.width),te=et.bottom-A.y-(eC+A.height);y({ready:!0,offsetX:ek/en,offsetY:eC/er,offsetR:e7/en,offsetB:te/er,arrowX:((Math.max(e6,e8)+Math.min(e6+G,e8+$))/2-e6)/en,arrowY:((Math.max(e5,e9)+Math.min(e5+H,e9+W))/2-e5)/er,scaleX:en,scaleY:er,align:eO})}function tt(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:J,r=A.x+e,o=A.y+t,a=Math.max(r,n.left),i=Math.max(o,n.top);return Math.max(0,(Math.min(r+G,n.right)-a)*(Math.min(o+H,n.bottom)-i))}function tn(){l=(a=A.y+eC)+H,c=(s=A.x+ek)+G}}}),M=function(){y(function(e){return(0,r.Z)((0,r.Z)({},e),{},{ready:!1})})},(0,m.Z)(M,[ey]),(0,m.Z)(function(){ta||M()},[ta]),[v.ready,v.offsetX,v.offsetY,v.offsetR,v.offsetB,v.arrowX,v.arrowY,v.scaleX,v.scaleY,v.align,function(){E.current+=1;var e=E.current;Promise.resolve().then(function(){E.current===e&&_()})}]),tT=(0,o.Z)(tC,11),tA=tT[0],tN=tT[1],tI=tT[2],tR=tT[3],t_=tT[4],tP=tT[5],tM=tT[6],tL=tT[7],tD=tT[8],tj=tT[9],tF=tT[10],tB=(z=void 0===Q?"hover":Q,h.useMemo(function(){var e=C(null!=J?J:z),t=C(null!=ee?ee:z),n=new Set(e),r=new Set(t);return ez&&(n.has("hover")&&(n.delete("hover"),n.add("click")),r.has("hover")&&(r.delete("hover"),r.add("click"))),[n,r]},[ez,z,J,ee])),tZ=(0,o.Z)(tB,2),tU=tZ[0],tz=tZ[1],tH=tU.has("click"),tG=tz.has("click")||tz.has("contextMenu"),tW=(0,p.Z)(function(){tg||tF()});H=function(){tl.current&&eT&&tG&&tp(!1)},(0,m.Z)(function(){if(ta&&e1&&eK){var e=I(e1),t=I(eK),n=N(eK),r=new Set([n].concat((0,B.Z)(e),(0,B.Z)(t)));function o(){tW(),H()}return r.forEach(function(e){e.addEventListener("scroll",o,{passive:!0})}),n.addEventListener("resize",o,{passive:!0}),tW(),function(){r.forEach(function(e){e.removeEventListener("scroll",o),n.removeEventListener("resize",o)})}}},[ta,e1,eK]),(0,m.Z)(function(){tW()},[tx,ey]),(0,m.Z)(function(){ta&&!(null!=ew&&ew[ey])&&tW()},[JSON.stringify(eS)]);var t$=h.useMemo(function(){var e=function(e,t,n,r){for(var o=n.points,a=Object.keys(e),i=0;i0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],n=arguments.length>2?arguments[2]:void 0;return n?e[0]===t[0]:e[0]===t[0]&&e[1]===t[1]}(null===(l=e[s])||void 0===l?void 0:l.points,o,r))return"".concat(t,"-placement-").concat(s)}return""}(ew,K,tj,eT);return s()(e,null==ek?void 0:ek(tj))},[tj,ek,ew,K,eT]);h.useImperativeHandle(n,function(){return{nativeElement:e4.current,forceAlign:tW}});var tV=h.useState(0),tq=(0,o.Z)(tV,2),tY=tq[0],tK=tq[1],tX=h.useState(0),tQ=(0,o.Z)(tX,2),tJ=tQ[0],t0=tQ[1],t1=function(){if(eO&&e1){var e=e1.getBoundingClientRect();tK(e.width),t0(e.height)}};function t2(e,t,n,r){e8[e]=function(o){var a;null==r||r(o),tp(t,n);for(var i=arguments.length,l=Array(i>1?i-1:0),s=1;s1?n-1:0),o=1;o1?n-1:0),o=1;o{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),o.createElement("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M19 9l-7 7-7-7"}))}},8903:function(e,t,n){n.d(t,{Z:function(){return a}});var r=n(69703),o=n(64090);let a=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 20 20",fill:"currentColor"}),o.createElement("path",{fillRule:"evenodd",d:"M10 18a8 8 0 100-16 8 8 0 000 16zM8.707 7.293a1 1 0 00-1.414 1.414L8.586 10l-1.293 1.293a1 1 0 101.414 1.414L10 11.414l1.293 1.293a1 1 0 001.414-1.414L11.414 10l1.293-1.293a1 1 0 00-1.414-1.414L10 8.586 8.707 7.293z",clipRule:"evenodd"}))}},57750:function(e,t,n){n.d(t,{Z:function(){return eg}});var r=n(69703),o=n(64090),a=n(26587),i=n(65558),l=n(75504),s=n(30638),c=n(80509),u=n.n(c),d=n(5037),p=n.n(d),f=n(71292),m=n.n(f),g=n(96240),h=n.n(g),b=n(93574),v=n.n(b),y=n(72996),E=n(84487),w=n(7986),S=n(71594),x=n(68139),O=n(20757),k=n(9586),C=n(765),T=["layout","type","stroke","connectNulls","isRange","ref"];function A(e){return(A="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function N(){return(N=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(a,T));return o.createElement(w.m,{clipPath:n?"url(#clipPath-".concat(r,")"):null},o.createElement(y.H,N({},(0,C.L6)(d,!0),{points:e,connectNulls:c,type:l,baseLine:t,layout:i,stroke:"none",className:"recharts-area-area"})),"none"!==s&&o.createElement(y.H,N({},(0,C.L6)(this.props,!1),{className:"recharts-area-curve",layout:i,type:l,connectNulls:c,fill:"none",points:e})),"none"!==s&&u&&o.createElement(y.H,N({},(0,C.L6)(this.props,!1),{className:"recharts-area-curve",layout:i,type:l,connectNulls:c,fill:"none",points:t})))}},{key:"renderAreaWithAnimation",value:function(e,t){var n=this,r=this.props,a=r.points,i=r.baseLine,l=r.isAnimationActive,c=r.animationBegin,u=r.animationDuration,d=r.animationEasing,p=r.animationId,f=this.state,g=f.prevPoints,b=f.prevBaseLine;return o.createElement(s.ZP,{begin:c,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"area-".concat(p),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var l=r.t;if(g){var s,c=g.length/a.length,u=a.map(function(e,t){var n=Math.floor(t*c);if(g[n]){var r=g[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return R(R({},e),{},{x:o(l),y:a(l)})}return e});return s=(0,O.hj)(i)&&"number"==typeof i?(0,O.k4)(b,i)(l):m()(i)||h()(i)?(0,O.k4)(b,0)(l):i.map(function(e,t){var n=Math.floor(t*c);if(b[n]){var r=b[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return R(R({},e),{},{x:o(l),y:a(l)})}return e}),n.renderAreaStatically(u,s,e,t)}return o.createElement(w.m,null,o.createElement("defs",null,o.createElement("clipPath",{id:"animationClipPath-".concat(t)},n.renderClipRect(l))),o.createElement(w.m,{clipPath:"url(#animationClipPath-".concat(t,")")},n.renderAreaStatically(a,i,e,t)))})}},{key:"renderArea",value:function(e,t){var n=this.props,r=n.points,o=n.baseLine,a=n.isAnimationActive,i=this.state,l=i.prevPoints,s=i.prevBaseLine,c=i.totalLength;return a&&r&&r.length&&(!l&&c>0||!v()(l,r)||!v()(s,o))?this.renderAreaWithAnimation(e,t):this.renderAreaStatically(r,o,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,a=t.points,i=t.className,s=t.top,c=t.left,u=t.xAxis,d=t.yAxis,p=t.width,f=t.height,g=t.isAnimationActive,h=t.id;if(n||!a||!a.length)return null;var b=this.state.isAnimationFinished,v=1===a.length,y=(0,l.Z)("recharts-area",i),E=u&&u.allowDataOverflow,x=d&&d.allowDataOverflow,O=E||x,k=m()(h)?this.id:h,T=null!==(e=(0,C.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},A=T.r,N=T.strokeWidth,I=((0,C.$k)(r)?r:{}).clipDot,R=void 0===I||I,_=2*(void 0===A?3:A)+(void 0===N?2:N);return o.createElement(w.m,{className:y},E||x?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(k)},o.createElement("rect",{x:E?c:c-p/2,y:x?s:s-f/2,width:E?p:2*p,height:x?f:2*f})),!R&&o.createElement("clipPath",{id:"clipPath-dots-".concat(k)},o.createElement("rect",{x:c-_/2,y:s-_/2,width:p+_,height:f+_}))):null,v?null:this.renderArea(O,k),(r||v)&&this.renderDots(O,R,k),(!g||b)&&S.e.renderCallByParent(this.props,a))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,curBaseLine:e.baseLine,prevPoints:t.curPoints,prevBaseLine:t.curBaseLine}:e.points!==t.curPoints||e.baseLine!==t.curBaseLine?{curPoints:e.points,curBaseLine:e.baseLine}:null}}],n&&_(i.prototype,n),r&&_(i,r),Object.defineProperty(i,"prototype",{writable:!1}),i}(o.PureComponent);D(F,"displayName","Area"),D(F,"defaultProps",{stroke:"#3182bd",fill:"#3182bd",fillOpacity:.6,xAxisId:0,yAxisId:0,legendType:"line",connectNulls:!1,points:[],dot:!1,activeDot:!0,hide:!1,isAnimationActive:!x.x.isSsr,animationBegin:0,animationDuration:1500,animationEasing:"ease"}),D(F,"getBaseValue",function(e,t,n,r){var o=e.layout,a=e.baseValue,i=t.props.baseValue,l=null!=i?i:a;if((0,O.hj)(l)&&"number"==typeof l)return l;var s="horizontal"===o?r:n,c=s.scale.domain();if("number"===s.type){var u=Math.max(c[0],c[1]),d=Math.min(c[0],c[1]);return"dataMin"===l?d:"dataMax"===l?u:u<0?u:Math.max(Math.min(c[0],c[1]),0)}return"dataMin"===l?c[0]:"dataMax"===l?c[1]:c[0]}),D(F,"getComposedData",function(e){var t,n=e.props,r=e.item,o=e.xAxis,a=e.yAxis,i=e.xAxisTicks,l=e.yAxisTicks,s=e.bandSize,c=e.dataKey,u=e.stackedData,d=e.dataStartIndex,p=e.displayedData,f=e.offset,m=n.layout,g=u&&u.length,h=F.getBaseValue(n,r,o,a),b="horizontal"===m,v=!1,y=p.map(function(e,t){g?n=u[d+t]:Array.isArray(n=(0,k.F$)(e,c))?v=!0:n=[h,n];var n,r=null==n[1]||g&&null==(0,k.F$)(e,c);return b?{x:(0,k.Hv)({axis:o,ticks:i,bandSize:s,entry:e,index:t}),y:r?null:a.scale(n[1]),value:n,payload:e}:{x:r?null:o.scale(n[1]),y:(0,k.Hv)({axis:a,ticks:l,bandSize:s,entry:e,index:t}),value:n,payload:e}});return t=g||v?y.map(function(e){var t=Array.isArray(e.value)?e.value[0]:null;return b?{x:e.x,y:null!=t&&null!=e.y?a.scale(t):null}:{x:null!=t?o.scale(t):null,y:e.y}}):b?a.scale(h):o.scale(h),R({points:y,baseLine:t,layout:m,isRange:v},f)}),D(F,"renderDotItem",function(e,t){return o.isValidElement(e)?o.cloneElement(e,t):u()(e)?e(t):o.createElement(E.o,N({},t,{className:"recharts-area-dot"}))});var B=n(23356),Z=n(22983),U=n(12627),z=(0,i.z)({chartName:"AreaChart",GraphicalChild:F,axisComponents:[{axisType:"xAxis",AxisComp:B.K},{axisType:"yAxis",AxisComp:Z.B}],formatAxisMap:U.t9}),H=n(38333),G=n(10166),W=n(94866),$=n(99355),V=["type","layout","connectNulls","ref"];function q(e){return(q="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function Y(){return(Y=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);na){s=[].concat(Q(r.slice(0,c)),[a-u]);break}var d=s.length%2==0?[0,l]:[l];return[].concat(Q(i.repeat(r,Math.floor(t/o))),Q(s),d).map(function(e){return"".concat(e,"px")}).join(", ")}),eo(en(e),"id",(0,O.EL)("recharts-line-")),eo(en(e),"pathRef",function(t){e.mainCurve=t}),eo(en(e),"handleAnimationEnd",function(){e.setState({isAnimationFinished:!0}),e.props.onAnimationEnd&&e.props.onAnimationEnd()}),eo(en(e),"handleAnimationStart",function(){e.setState({isAnimationFinished:!1}),e.props.onAnimationStart&&e.props.onAnimationStart()}),e}return n=[{key:"componentDidMount",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();this.setState({totalLength:e})}}},{key:"componentDidUpdate",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();e!==this.state.totalLength&&this.setState({totalLength:e})}}},{key:"getTotalLength",value:function(){var e=this.mainCurve;try{return e&&e.getTotalLength&&e.getTotalLength()||0}catch(e){return 0}}},{key:"renderErrorBar",value:function(e,t){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var n=this.props,r=n.points,a=n.xAxis,i=n.yAxis,l=n.layout,s=n.children,c=(0,C.NN)(s,$.W);if(!c)return null;var u=function(e,t){return{x:e.x,y:e.y,value:e.value,errorVal:(0,k.F$)(e.payload,t)}};return o.createElement(w.m,{clipPath:e?"url(#clipPath-".concat(t,")"):null},c.map(function(e){return o.cloneElement(e,{key:"bar-".concat(e.props.dataKey),data:r,xAxis:a,yAxis:i,layout:l,dataPointFormatter:u})}))}},{key:"renderDots",value:function(e,t,n){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var r=this.props,a=r.dot,l=r.points,s=r.dataKey,c=(0,C.L6)(this.props,!1),u=(0,C.L6)(a,!0),d=l.map(function(e,t){var n=X(X(X({key:"dot-".concat(t),r:3},c),u),{},{value:e.value,dataKey:s,cx:e.x,cy:e.y,index:t,payload:e.payload});return i.renderDotItem(a,n)}),p={clipPath:e?"url(#clipPath-".concat(t?"":"dots-").concat(n,")"):null};return o.createElement(w.m,Y({className:"recharts-line-dots",key:"dots"},p),d)}},{key:"renderCurveStatically",value:function(e,t,n,r){var a=this.props,i=a.type,l=a.layout,s=a.connectNulls,c=(a.ref,function(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},a=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(a,V)),u=X(X(X({},(0,C.L6)(c,!0)),{},{fill:"none",className:"recharts-line-curve",clipPath:t?"url(#clipPath-".concat(n,")"):null,points:e},r),{},{type:i,layout:l,connectNulls:s});return o.createElement(y.H,Y({},u,{pathRef:this.pathRef}))}},{key:"renderCurveWithAnimation",value:function(e,t){var n=this,r=this.props,a=r.points,i=r.strokeDasharray,l=r.isAnimationActive,c=r.animationBegin,u=r.animationDuration,d=r.animationEasing,p=r.animationId,f=r.animateNewValues,m=r.width,g=r.height,h=this.state,b=h.prevPoints,v=h.totalLength;return o.createElement(s.ZP,{begin:c,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"line-".concat(p),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var o,l=r.t;if(b){var s=b.length/a.length,c=a.map(function(e,t){var n=Math.floor(t*s);if(b[n]){var r=b[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return X(X({},e),{},{x:o(l),y:a(l)})}if(f){var i=(0,O.k4)(2*m,e.x),c=(0,O.k4)(g/2,e.y);return X(X({},e),{},{x:i(l),y:c(l)})}return X(X({},e),{},{x:e.x,y:e.y})});return n.renderCurveStatically(c,e,t)}var u=(0,O.k4)(0,v)(l);if(i){var d="".concat(i).split(/[,\s]+/gim).map(function(e){return parseFloat(e)});o=n.getStrokeDasharray(u,v,d)}else o=n.generateSimpleStrokeDasharray(v,u);return n.renderCurveStatically(a,e,t,{strokeDasharray:o})})}},{key:"renderCurve",value:function(e,t){var n=this.props,r=n.points,o=n.isAnimationActive,a=this.state,i=a.prevPoints,l=a.totalLength;return o&&r&&r.length&&(!i&&l>0||!v()(i,r))?this.renderCurveWithAnimation(e,t):this.renderCurveStatically(r,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,a=t.points,i=t.className,s=t.xAxis,c=t.yAxis,u=t.top,d=t.left,p=t.width,f=t.height,g=t.isAnimationActive,h=t.id;if(n||!a||!a.length)return null;var b=this.state.isAnimationFinished,v=1===a.length,y=(0,l.Z)("recharts-line",i),E=s&&s.allowDataOverflow,x=c&&c.allowDataOverflow,O=E||x,k=m()(h)?this.id:h,T=null!==(e=(0,C.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},A=T.r,N=T.strokeWidth,I=((0,C.$k)(r)?r:{}).clipDot,R=void 0===I||I,_=2*(void 0===A?3:A)+(void 0===N?2:N);return o.createElement(w.m,{className:y},E||x?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(k)},o.createElement("rect",{x:E?d:d-p/2,y:x?u:u-f/2,width:E?p:2*p,height:x?f:2*f})),!R&&o.createElement("clipPath",{id:"clipPath-dots-".concat(k)},o.createElement("rect",{x:d-_/2,y:u-_/2,width:p+_,height:f+_}))):null,!v&&this.renderCurve(O,k),this.renderErrorBar(O,k),(v||r)&&this.renderDots(O,R,k),(!g||b)&&S.e.renderCallByParent(this.props,a))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,prevPoints:t.curPoints}:e.points!==t.curPoints?{curPoints:e.points}:null}},{key:"repeat",value:function(e,t){for(var n=e.length%2!=0?[].concat(Q(e),[0]):e,r=[],o=0;o{let{data:n=[],categories:i=[],index:l,stack:s=!1,colors:c=ep.s,valueFormatter:u=em.Cj,startEndOnly:d=!1,showXAxis:p=!0,showYAxis:f=!0,yAxisWidth:m=56,intervalType:g="equidistantPreserveStart",showAnimation:h=!1,animationDuration:b=900,showTooltip:v=!0,showLegend:y=!0,showGridLines:w=!0,showGradient:S=!0,autoMinValue:x=!1,curveType:O="linear",minValue:k,maxValue:C,connectNulls:T=!1,allowDecimals:A=!0,noDataText:N,className:I,onValueChange:R,enableLegendSlider:_=!1,customTooltip:P,rotateLabelX:M,tickGap:L=5}=e,D=(0,r._T)(e,["data","categories","index","stack","colors","valueFormatter","startEndOnly","showXAxis","showYAxis","yAxisWidth","intervalType","showAnimation","animationDuration","showTooltip","showLegend","showGridLines","showGradient","autoMinValue","curveType","minValue","maxValue","connectNulls","allowDecimals","noDataText","className","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap"]),j=(p||f)&&(!d||f)?20:0,[U,$]=(0,o.useState)(60),[V,q]=(0,o.useState)(void 0),[Y,K]=(0,o.useState)(void 0),X=(0,eu.me)(i,c),Q=(0,eu.i4)(x,k,C),J=!!R;function ee(e){J&&(e===Y&&!V||(0,eu.FB)(n,e)&&V&&V.dataKey===e?(K(void 0),null==R||R(null)):(K(e),null==R||R({eventType:"category",categoryClicked:e})),q(void 0))}return o.createElement("div",Object.assign({ref:t,className:(0,ef.q)("w-full h-80",I)},D),o.createElement(a.h,{className:"h-full w-full"},(null==n?void 0:n.length)?o.createElement(z,{data:n,onClick:J&&(Y||V)?()=>{q(void 0),K(void 0),null==R||R(null)}:void 0},w?o.createElement(H.q,{className:(0,ef.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:!0,vertical:!1}):null,o.createElement(B.K,{padding:{left:j,right:j},hide:!p,dataKey:l,tick:{transform:"translate(0, 6)"},ticks:d?[n[0][l],n[n.length-1][l]]:void 0,fill:"",stroke:"",className:(0,ef.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),interval:d?"preserveStartEnd":g,tickLine:!1,axisLine:!1,minTickGap:L,angle:null==M?void 0:M.angle,dy:null==M?void 0:M.verticalShift,height:null==M?void 0:M.xAxisHeight}),o.createElement(Z.B,{width:m,hide:!f,axisLine:!1,tickLine:!1,type:"number",domain:Q,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,ef.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:u,allowDecimals:A}),o.createElement(G.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{stroke:"#d1d5db",strokeWidth:1},content:v?e=>{let{active:t,payload:n,label:r}=e;return P?o.createElement(P,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=X.get(e.dataKey))&&void 0!==t?t:ed.fr.Gray})}),active:t,label:r}):o.createElement(es.ZP,{active:t,payload:n,label:r,valueFormatter:u,categoryColors:X})}:o.createElement(o.Fragment,null),position:{y:0}}),y?o.createElement(W.D,{verticalAlign:"top",height:U,content:e=>{let{payload:t}=e;return(0,el.Z)({payload:t},X,$,Y,J?e=>ee(e):void 0,_)}}):null,i.map(e=>{var t,n;return o.createElement("defs",{key:e},S?o.createElement("linearGradient",{className:(0,em.bM)(null!==(t=X.get(e))&&void 0!==t?t:ed.fr.Gray,ep.K.text).textColor,id:X.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{offset:"5%",stopColor:"currentColor",stopOpacity:V||Y&&Y!==e?.15:.4}),o.createElement("stop",{offset:"95%",stopColor:"currentColor",stopOpacity:0})):o.createElement("linearGradient",{className:(0,em.bM)(null!==(n=X.get(e))&&void 0!==n?n:ed.fr.Gray,ep.K.text).textColor,id:X.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{stopColor:"currentColor",stopOpacity:V||Y&&Y!==e?.1:.3})))}),i.map(e=>{var t;return o.createElement(F,{className:(0,em.bM)(null!==(t=X.get(e))&&void 0!==t?t:ed.fr.Gray,ep.K.text).strokeColor,strokeOpacity:V||Y&&Y!==e?.3:1,activeDot:e=>{var t;let{cx:r,cy:a,stroke:i,strokeLinecap:l,strokeLinejoin:s,strokeWidth:c,dataKey:u}=e;return o.createElement(E.o,{className:(0,ef.q)("stroke-tremor-background dark:stroke-dark-tremor-background",R?"cursor-pointer":"",(0,em.bM)(null!==(t=X.get(u))&&void 0!==t?t:ed.fr.Gray,ep.K.text).fillColor),cx:r,cy:a,r:5,fill:"",stroke:i,strokeLinecap:l,strokeLinejoin:s,strokeWidth:c,onClick:(t,r)=>{r.stopPropagation(),J&&(e.index===(null==V?void 0:V.index)&&e.dataKey===(null==V?void 0:V.dataKey)||(0,eu.FB)(n,e.dataKey)&&Y&&Y===e.dataKey?(K(void 0),q(void 0),null==R||R(null)):(K(e.dataKey),q({index:e.index,dataKey:e.dataKey}),null==R||R(Object.assign({eventType:"dot",categoryClicked:e.dataKey},e.payload))))}})},dot:t=>{var r;let{stroke:a,strokeLinecap:i,strokeLinejoin:l,strokeWidth:s,cx:c,cy:u,dataKey:d,index:p}=t;return(0,eu.FB)(n,e)&&!(V||Y&&Y!==e)||(null==V?void 0:V.index)===p&&(null==V?void 0:V.dataKey)===e?o.createElement(E.o,{key:p,cx:c,cy:u,r:5,stroke:a,fill:"",strokeLinecap:i,strokeLinejoin:l,strokeWidth:s,className:(0,ef.q)("stroke-tremor-background dark:stroke-dark-tremor-background",R?"cursor-pointer":"",(0,em.bM)(null!==(r=X.get(d))&&void 0!==r?r:ed.fr.Gray,ep.K.text).fillColor)}):o.createElement(o.Fragment,{key:p})},key:e,name:e,type:O,dataKey:e,stroke:"",fill:"url(#".concat(X.get(e),")"),strokeWidth:2,strokeLinejoin:"round",strokeLinecap:"round",isAnimationActive:h,animationDuration:b,stackId:s?"a":void 0,connectNulls:T})}),R?i.map(e=>o.createElement(ei,{className:(0,ef.q)("cursor-pointer"),strokeOpacity:0,key:e,name:e,type:O,dataKey:e,stroke:"transparent",fill:"transparent",legendType:"none",tooltipType:"none",strokeWidth:12,connectNulls:T,onClick:(e,t)=>{t.stopPropagation();let{name:n}=e;ee(n)}})):null):o.createElement(ec.Z,{noDataText:N})))});eg.displayName="AreaChart"},44041:function(e,t,n){n.d(t,{Z:function(){return x}});var r=n(69703),o=n(54942),a=n(2898),i=n(99250),l=n(65492),s=n(64090),c=n(26587),u=n(65558),d=n(28485),p=n(23356),f=n(22983),m=n(12627),g=(0,u.z)({chartName:"BarChart",GraphicalChild:d.$,defaultTooltipEventType:"axis",validateTooltipEventTypes:["axis","item"],axisComponents:[{axisType:"xAxis",AxisComp:p.K},{axisType:"yAxis",AxisComp:f.B}],formatAxisMap:m.t9}),h=n(38333),b=n(10166),v=n(94866),y=n(17280),E=n(30470),w=n(77448),S=n(36342);let x=s.forwardRef((e,t)=>{let{data:n=[],categories:u=[],index:m,colors:x=a.s,valueFormatter:O=l.Cj,layout:k="horizontal",stack:C=!1,relative:T=!1,startEndOnly:A=!1,animationDuration:N=900,showAnimation:I=!1,showXAxis:R=!0,showYAxis:_=!0,yAxisWidth:P=56,intervalType:M="equidistantPreserveStart",showTooltip:L=!0,showLegend:D=!0,showGridLines:j=!0,autoMinValue:F=!1,minValue:B,maxValue:Z,allowDecimals:U=!0,noDataText:z,onValueChange:H,enableLegendSlider:G=!1,customTooltip:W,rotateLabelX:$,tickGap:V=5,className:q}=e,Y=(0,r._T)(e,["data","categories","index","colors","valueFormatter","layout","stack","relative","startEndOnly","animationDuration","showAnimation","showXAxis","showYAxis","yAxisWidth","intervalType","showTooltip","showLegend","showGridLines","autoMinValue","minValue","maxValue","allowDecimals","noDataText","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap","className"]),K=R||_?20:0,[X,Q]=(0,s.useState)(60),J=(0,S.me)(u,x),[ee,et]=s.useState(void 0),[en,er]=(0,s.useState)(void 0),eo=!!H;function ea(e,t,n){var r,o,a,i;n.stopPropagation(),H&&((0,S.vZ)(ee,Object.assign(Object.assign({},e.payload),{value:e.value}))?(er(void 0),et(void 0),null==H||H(null)):(er(null===(o=null===(r=e.tooltipPayload)||void 0===r?void 0:r[0])||void 0===o?void 0:o.dataKey),et(Object.assign(Object.assign({},e.payload),{value:e.value})),null==H||H(Object.assign({eventType:"bar",categoryClicked:null===(i=null===(a=e.tooltipPayload)||void 0===a?void 0:a[0])||void 0===i?void 0:i.dataKey},e.payload))))}let ei=(0,S.i4)(F,B,Z);return s.createElement("div",Object.assign({ref:t,className:(0,i.q)("w-full h-80",q)},Y),s.createElement(c.h,{className:"h-full w-full"},(null==n?void 0:n.length)?s.createElement(g,{data:n,stackOffset:C?"sign":T?"expand":"none",layout:"vertical"===k?"vertical":"horizontal",onClick:eo&&(en||ee)?()=>{et(void 0),er(void 0),null==H||H(null)}:void 0},j?s.createElement(h.q,{className:(0,i.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:"vertical"!==k,vertical:"vertical"===k}):null,"vertical"!==k?s.createElement(p.K,{padding:{left:K,right:K},hide:!R,dataKey:m,interval:A?"preserveStartEnd":M,tick:{transform:"translate(0, 6)"},ticks:A?[n[0][m],n[n.length-1][m]]:void 0,fill:"",stroke:"",className:(0,i.q)("mt-4 text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,angle:null==$?void 0:$.angle,dy:null==$?void 0:$.verticalShift,height:null==$?void 0:$.xAxisHeight,minTickGap:V}):s.createElement(p.K,{hide:!R,type:"number",tick:{transform:"translate(-3, 0)"},domain:ei,fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,tickFormatter:O,minTickGap:V,allowDecimals:U,angle:null==$?void 0:$.angle,dy:null==$?void 0:$.verticalShift,height:null==$?void 0:$.xAxisHeight}),"vertical"!==k?s.createElement(f.B,{width:P,hide:!_,axisLine:!1,tickLine:!1,type:"number",domain:ei,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:T?e=>"".concat((100*e).toString()," %"):O,allowDecimals:U}):s.createElement(f.B,{width:P,hide:!_,dataKey:m,axisLine:!1,tickLine:!1,ticks:A?[n[0][m],n[n.length-1][m]]:void 0,type:"category",interval:"preserveStartEnd",tick:{transform:"translate(0, 6)"},fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content")}),s.createElement(b.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{fill:"#d1d5db",opacity:"0.15"},content:L?e=>{let{active:t,payload:n,label:r}=e;return W?s.createElement(W,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=J.get(e.dataKey))&&void 0!==t?t:o.fr.Gray})}),active:t,label:r}):s.createElement(E.ZP,{active:t,payload:n,label:r,valueFormatter:O,categoryColors:J})}:s.createElement(s.Fragment,null),position:{y:0}}),D?s.createElement(v.D,{verticalAlign:"top",height:X,content:e=>{let{payload:t}=e;return(0,y.Z)({payload:t},J,Q,en,eo?e=>{eo&&(e!==en||ee?(er(e),null==H||H({eventType:"category",categoryClicked:e})):(er(void 0),null==H||H(null)),et(void 0))}:void 0,G)}}):null,u.map(e=>{var t;return s.createElement(d.$,{className:(0,i.q)((0,l.bM)(null!==(t=J.get(e))&&void 0!==t?t:o.fr.Gray,a.K.background).fillColor,H?"cursor-pointer":""),key:e,name:e,type:"linear",stackId:C||T?"a":void 0,dataKey:e,fill:"",isAnimationActive:I,animationDuration:N,shape:e=>((e,t,n,r)=>{let{fillOpacity:o,name:a,payload:i,value:l}=e,{x:c,width:u,y:d,height:p}=e;return"horizontal"===r&&p<0?(d+=p,p=Math.abs(p)):"vertical"===r&&u<0&&(c+=u,u=Math.abs(u)),s.createElement("rect",{x:c,y:d,width:u,height:p,opacity:t||n&&n!==a?(0,S.vZ)(t,Object.assign(Object.assign({},i),{value:l}))?o:.3:o})})(e,ee,en,k),onClick:ea})})):s.createElement(w.Z,{noDataText:z})))});x.displayName="BarChart"},17280:function(e,t,n){n.d(t,{Z:function(){return g}});var r=n(64090);let o=(e,t)=>{let[n,o]=(0,r.useState)(t);(0,r.useEffect)(()=>{let t=()=>{o(window.innerWidth),e()};return t(),window.addEventListener("resize",t),()=>window.removeEventListener("resize",t)},[e,n])};var a=n(69703),i=n(2898),l=n(99250),s=n(65492);let c=e=>{var t=(0,a._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M8 12L14 6V18L8 12Z"}))},u=e=>{var t=(0,a._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M16 12L10 18V6L16 12Z"}))},d=(0,s.fn)("Legend"),p=e=>{let{name:t,color:n,onClick:o,activeLegend:a}=e,c=!!o;return r.createElement("li",{className:(0,l.q)(d("legendItem"),"group inline-flex items-center px-2 py-0.5 rounded-tremor-small transition whitespace-nowrap",c?"cursor-pointer":"cursor-default","text-tremor-content",c?"hover:bg-tremor-background-subtle":"","dark:text-dark-tremor-content",c?"dark:hover:bg-dark-tremor-background-subtle":""),onClick:e=>{e.stopPropagation(),null==o||o(t,n)}},r.createElement("svg",{className:(0,l.q)("flex-none h-2 w-2 mr-1.5",(0,s.bM)(n,i.K.text).textColor,a&&a!==t?"opacity-40":"opacity-100"),fill:"currentColor",viewBox:"0 0 8 8"},r.createElement("circle",{cx:4,cy:4,r:4})),r.createElement("p",{className:(0,l.q)("whitespace-nowrap truncate text-tremor-default","text-tremor-content",c?"group-hover:text-tremor-content-emphasis":"","dark:text-dark-tremor-content",a&&a!==t?"opacity-40":"opacity-100",c?"dark:group-hover:text-dark-tremor-content-emphasis":"")},t))},f=e=>{let{icon:t,onClick:n,disabled:o}=e,[a,i]=r.useState(!1),s=r.useRef(null);return r.useEffect(()=>(a?s.current=setInterval(()=>{null==n||n()},300):clearInterval(s.current),()=>clearInterval(s.current)),[a,n]),(0,r.useEffect)(()=>{o&&(clearInterval(s.current),i(!1))},[o]),r.createElement("button",{type:"button",className:(0,l.q)(d("legendSliderButton"),"w-5 group inline-flex items-center truncate rounded-tremor-small transition",o?"cursor-not-allowed":"cursor-pointer",o?"text-tremor-content-subtle":"text-tremor-content hover:text-tremor-content-emphasis hover:bg-tremor-background-subtle",o?"dark:text-dark-tremor-subtle":"dark:text-dark-tremor dark:hover:text-tremor-content-emphasis dark:hover:bg-dark-tremor-background-subtle"),disabled:o,onClick:e=>{e.stopPropagation(),null==n||n()},onMouseDown:e=>{e.stopPropagation(),i(!0)},onMouseUp:e=>{e.stopPropagation(),i(!1)}},r.createElement(t,{className:"w-full"}))},m=r.forwardRef((e,t)=>{var n,o;let{categories:s,colors:m=i.s,className:g,onClickLegendItem:h,activeLegend:b,enableLegendSlider:v=!1}=e,y=(0,a._T)(e,["categories","colors","className","onClickLegendItem","activeLegend","enableLegendSlider"]),E=r.useRef(null),[w,S]=r.useState(null),[x,O]=r.useState(null),k=r.useRef(null),C=(0,r.useCallback)(()=>{let e=null==E?void 0:E.current;e&&S({left:e.scrollLeft>0,right:e.scrollWidth-e.clientWidth>e.scrollLeft})},[S]),T=(0,r.useCallback)(e=>{var t;let n=null==E?void 0:E.current,r=null!==(t=null==n?void 0:n.clientWidth)&&void 0!==t?t:0;n&&v&&(n.scrollTo({left:"left"===e?n.scrollLeft-r:n.scrollLeft+r,behavior:"smooth"}),setTimeout(()=>{C()},400))},[v,C]);r.useEffect(()=>{let e=e=>{"ArrowLeft"===e?T("left"):"ArrowRight"===e&&T("right")};return x?(e(x),k.current=setInterval(()=>{e(x)},300)):clearInterval(k.current),()=>clearInterval(k.current)},[x,T]);let A=e=>{e.stopPropagation(),"ArrowLeft"!==e.key&&"ArrowRight"!==e.key||(e.preventDefault(),O(e.key))},N=e=>{e.stopPropagation(),O(null)};return r.useEffect(()=>{let e=null==E?void 0:E.current;return v&&(C(),null==e||e.addEventListener("keydown",A),null==e||e.addEventListener("keyup",N)),()=>{null==e||e.removeEventListener("keydown",A),null==e||e.removeEventListener("keyup",N)}},[C,v]),r.createElement("ol",Object.assign({ref:t,className:(0,l.q)(d("root"),"relative overflow-hidden",g)},y),r.createElement("div",{ref:E,tabIndex:0,className:(0,l.q)("h-full flex",v?(null==w?void 0:w.right)||(null==w?void 0:w.left)?"pl-4 pr-12 items-center overflow-auto snap-mandatory [&::-webkit-scrollbar]:hidden [scrollbar-width:none]":"":"flex-wrap")},s.map((e,t)=>r.createElement(p,{key:"item-".concat(t),name:e,color:m[t],onClick:h,activeLegend:b}))),v&&((null==w?void 0:w.right)||(null==w?void 0:w.left))?r.createElement(r.Fragment,null,r.createElement("div",{className:(0,l.q)("absolute top-0 bottom-0 left-0 w-4 bg-gradient-to-r from-white to-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("absolute top-0 bottom-0 right-10 w-4 bg-gradient-to-r from-transparent to-white pointer-events-none")}),r.createElement("div",{className:(0,l.q)("absolute flex top-0 pr-1 bottom-0 right-0 items-center justify-center h-full bg-tremor-background")},r.createElement(f,{icon:c,onClick:()=>{O(null),T("left")},disabled:!(null==w?void 0:w.left)}),r.createElement(f,{icon:u,onClick:()=>{O(null),T("right")},disabled:!(null==w?void 0:w.right)}))):null)});m.displayName="Legend";let g=(e,t,n,a,i,l)=>{let{payload:s}=e,c=(0,r.useRef)(null);o(()=>{var e,t;n((t=null===(e=c.current)||void 0===e?void 0:e.clientHeight)?Number(t)+20:60)});let u=s.filter(e=>"none"!==e.type);return r.createElement("div",{ref:c,className:"flex items-center justify-end"},r.createElement(m,{categories:u.map(e=>e.value),colors:u.map(e=>t.get(e.value)),onClickLegendItem:i,activeLegend:a,enableLegendSlider:l}))}},30470:function(e,t,n){n.d(t,{ZP:function(){return u}});var r=n(64090),o=n(54942),a=n(2898),i=n(99250),l=n(65492);let s=e=>{let{children:t}=e;return r.createElement("div",{className:(0,i.q)("rounded-tremor-default text-tremor-default border","bg-tremor-background shadow-tremor-dropdown border-tremor-border","dark:bg-dark-tremor-background dark:shadow-dark-tremor-dropdown dark:border-dark-tremor-border")},t)},c=e=>{let{value:t,name:n,color:o}=e;return r.createElement("div",{className:"flex items-center justify-between space-x-8"},r.createElement("div",{className:"flex items-center space-x-2"},r.createElement("span",{className:(0,i.q)("shrink-0 rounded-tremor-full border-2 h-3 w-3","border-tremor-background shadow-tremor-card","dark:border-dark-tremor-background dark:shadow-dark-tremor-card",(0,l.bM)(o,a.K.background).bgColor)}),r.createElement("p",{className:(0,i.q)("text-right whitespace-nowrap","text-tremor-content","dark:text-dark-tremor-content")},n)),r.createElement("p",{className:(0,i.q)("font-medium tabular-nums text-right whitespace-nowrap","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},t))},u=e=>{let{active:t,payload:n,label:a,categoryColors:l,valueFormatter:u}=e;if(t&&n){let e=n.filter(e=>"none"!==e.type);return r.createElement(s,null,r.createElement("div",{className:(0,i.q)("border-tremor-border border-b px-4 py-2","dark:border-dark-tremor-border")},r.createElement("p",{className:(0,i.q)("font-medium","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},a)),r.createElement("div",{className:(0,i.q)("px-4 py-2 space-y-1")},e.map((e,t)=>{var n;let{value:a,name:i}=e;return r.createElement(c,{key:"id-".concat(t),value:u(a),name:i,color:null!==(n=l.get(i))&&void 0!==n?n:o.fr.Blue})})))}return null}},77448:function(e,t,n){n.d(t,{Z:function(){return p}});var r=n(99250),o=n(64090),a=n(69703);let i=(0,n(65492).fn)("Flex"),l={start:"justify-start",end:"justify-end",center:"justify-center",between:"justify-between",around:"justify-around",evenly:"justify-evenly"},s={start:"items-start",end:"items-end",center:"items-center",baseline:"items-baseline",stretch:"items-stretch"},c={row:"flex-row",col:"flex-col","row-reverse":"flex-row-reverse","col-reverse":"flex-col-reverse"},u=o.forwardRef((e,t)=>{let{flexDirection:n="row",justifyContent:u="between",alignItems:d="center",children:p,className:f}=e,m=(0,a._T)(e,["flexDirection","justifyContent","alignItems","children","className"]);return o.createElement("div",Object.assign({ref:t,className:(0,r.q)(i("root"),"flex w-full",c[n],l[u],s[d],f)},m),p)});u.displayName="Flex";var d=n(71801);let p=e=>{let{noDataText:t="No data"}=e;return o.createElement(u,{alignItems:"center",justifyContent:"center",className:(0,r.q)("w-full h-full border border-dashed rounded-tremor-default","border-tremor-border","dark:border-dark-tremor-border")},o.createElement(d.Z,{className:(0,r.q)("text-tremor-content","dark:text-dark-tremor-content")},t))}},36342:function(e,t,n){n.d(t,{FB:function(){return a},i4:function(){return o},me:function(){return r},vZ:function(){return function e(t,n){if(t===n)return!0;if("object"!=typeof t||"object"!=typeof n||null===t||null===n)return!1;let r=Object.keys(t),o=Object.keys(n);if(r.length!==o.length)return!1;for(let a of r)if(!o.includes(a)||!e(t[a],n[a]))return!1;return!0}}});let r=(e,t)=>{let n=new Map;return e.forEach((e,r)=>{n.set(e,t[r])}),n},o=(e,t,n)=>[e?"auto":null!=t?t:0,null!=n?n:"auto"];function a(e,t){let n=[];for(let r of e)if(Object.prototype.hasOwnProperty.call(r,t)&&(n.push(r[t]),n.length>1))return!1;return!0}},5:function(e,t,n){n.d(t,{Z:function(){return f}});var r=n(69703),o=n(64090),a=n(58437),i=n(54942),l=n(2898),s=n(99250),c=n(65492);let u={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},d={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},p=(0,c.fn)("Badge"),f=o.forwardRef((e,t)=>{let{color:n,icon:f,size:m=i.u8.SM,tooltip:g,className:h,children:b}=e,v=(0,r._T)(e,["color","icon","size","tooltip","className","children"]),y=f||null,{tooltipProps:E,getReferenceProps:w}=(0,a.l)();return o.createElement("span",Object.assign({ref:(0,c.lq)([t,E.refs.setReference]),className:(0,s.q)(p("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full",n?(0,s.q)((0,c.bM)(n,l.K.background).bgColor,(0,c.bM)(n,l.K.text).textColor,"bg-opacity-20 dark:bg-opacity-25"):(0,s.q)("bg-tremor-brand-muted text-tremor-brand-emphasis","dark:bg-dark-tremor-brand-muted dark:text-dark-tremor-brand-emphasis"),u[m].paddingX,u[m].paddingY,u[m].fontSize,h)},w,v),o.createElement(a.Z,Object.assign({text:g},E)),y?o.createElement(y,{className:(0,s.q)(p("icon"),"shrink-0 -ml-1 mr-1.5",d[m].height,d[m].width)}):null,o.createElement("p",{className:(0,s.q)(p("text"),"text-sm whitespace-nowrap")},b))});f.displayName="Badge"},61244:function(e,t,n){n.d(t,{Z:function(){return g}});var r=n(69703),o=n(64090),a=n(58437),i=n(54942),l=n(99250),s=n(65492),c=n(2898);let u={xs:{paddingX:"px-1.5",paddingY:"py-1.5"},sm:{paddingX:"px-1.5",paddingY:"py-1.5"},md:{paddingX:"px-2",paddingY:"py-2"},lg:{paddingX:"px-2",paddingY:"py-2"},xl:{paddingX:"px-2.5",paddingY:"py-2.5"}},d={xs:{height:"h-3",width:"w-3"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-7",width:"w-7"},xl:{height:"h-9",width:"w-9"}},p={simple:{rounded:"",border:"",ring:"",shadow:""},light:{rounded:"rounded-tremor-default",border:"",ring:"",shadow:""},shadow:{rounded:"rounded-tremor-default",border:"border",ring:"",shadow:"shadow-tremor-card dark:shadow-dark-tremor-card"},solid:{rounded:"rounded-tremor-default",border:"border-2",ring:"ring-1",shadow:""},outlined:{rounded:"rounded-tremor-default",border:"border",ring:"ring-2",shadow:""}},f=(e,t)=>{switch(e){case"simple":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:"",borderColor:"",ringColor:""};case"light":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand-muted dark:bg-dark-tremor-brand-muted",borderColor:"",ringColor:""};case"shadow":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:"border-tremor-border dark:border-dark-tremor-border",ringColor:""};case"solid":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand dark:bg-dark-tremor-brand",borderColor:"border-tremor-brand-inverted dark:border-dark-tremor-brand-inverted",ringColor:"ring-tremor-ring dark:ring-dark-tremor-ring"};case"outlined":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:t?(0,s.bM)(t,c.K.ring).borderColor:"border-tremor-brand-subtle dark:border-dark-tremor-brand-subtle",ringColor:t?(0,l.q)((0,s.bM)(t,c.K.ring).ringColor,"ring-opacity-40"):"ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted"}}},m=(0,s.fn)("Icon"),g=o.forwardRef((e,t)=>{let{icon:n,variant:c="simple",tooltip:g,size:h=i.u8.SM,color:b,className:v}=e,y=(0,r._T)(e,["icon","variant","tooltip","size","color","className"]),E=f(c,b),{tooltipProps:w,getReferenceProps:S}=(0,a.l)();return o.createElement("span",Object.assign({ref:(0,s.lq)([t,w.refs.setReference]),className:(0,l.q)(m("root"),"inline-flex flex-shrink-0 items-center",E.bgColor,E.textColor,E.borderColor,E.ringColor,p[c].rounded,p[c].border,p[c].shadow,p[c].ring,u[h].paddingX,u[h].paddingY,v)},S,y),o.createElement(a.Z,Object.assign({text:g},w)),o.createElement(n,{className:(0,l.q)(m("icon"),"shrink-0",d[h].height,d[h].width)}))});g.displayName="Icon"},16450:function(e,t,n){n.d(t,{Z:function(){return A}});var r=n(69703),o=n(58437),a=n(64090),i=n(70444),l=n(89988),s=n(89542),c={disabled:!1},u=a.createContext(null),d="unmounted",p="exited",f="entering",m="entered",g="exiting",h=function(e){function t(t,n){r=e.call(this,t,n)||this;var r,o,a=n&&!n.isMounting?t.enter:t.appear;return r.appearStatus=null,t.in?a?(o=p,r.appearStatus=f):o=m:o=t.unmountOnExit||t.mountOnEnter?d:p,r.state={status:o},r.nextCallback=null,r}t.prototype=Object.create(e.prototype),t.prototype.constructor=t,(0,l.Z)(t,e),t.getDerivedStateFromProps=function(e,t){return e.in&&t.status===d?{status:p}:null};var n=t.prototype;return n.componentDidMount=function(){this.updateStatus(!0,this.appearStatus)},n.componentDidUpdate=function(e){var t=null;if(e!==this.props){var n=this.state.status;this.props.in?n!==f&&n!==m&&(t=f):(n===f||n===m)&&(t=g)}this.updateStatus(!1,t)},n.componentWillUnmount=function(){this.cancelNextCallback()},n.getTimeouts=function(){var e,t,n,r=this.props.timeout;return e=t=n=r,null!=r&&"number"!=typeof r&&(e=r.exit,t=r.enter,n=void 0!==r.appear?r.appear:t),{exit:e,enter:t,appear:n}},n.updateStatus=function(e,t){if(void 0===e&&(e=!1),null!==t){if(this.cancelNextCallback(),t===f){if(this.props.unmountOnExit||this.props.mountOnEnter){var n=this.props.nodeRef?this.props.nodeRef.current:s.findDOMNode(this);n&&n.scrollTop}this.performEnter(e)}else this.performExit()}else this.props.unmountOnExit&&this.state.status===p&&this.setState({status:d})},n.performEnter=function(e){var t=this,n=this.props.enter,r=this.context?this.context.isMounting:e,o=this.props.nodeRef?[r]:[s.findDOMNode(this),r],a=o[0],i=o[1],l=this.getTimeouts(),u=r?l.appear:l.enter;if(!e&&!n||c.disabled){this.safeSetState({status:m},function(){t.props.onEntered(a)});return}this.props.onEnter(a,i),this.safeSetState({status:f},function(){t.props.onEntering(a,i),t.onTransitionEnd(u,function(){t.safeSetState({status:m},function(){t.props.onEntered(a,i)})})})},n.performExit=function(){var e=this,t=this.props.exit,n=this.getTimeouts(),r=this.props.nodeRef?void 0:s.findDOMNode(this);if(!t||c.disabled){this.safeSetState({status:p},function(){e.props.onExited(r)});return}this.props.onExit(r),this.safeSetState({status:g},function(){e.props.onExiting(r),e.onTransitionEnd(n.exit,function(){e.safeSetState({status:p},function(){e.props.onExited(r)})})})},n.cancelNextCallback=function(){null!==this.nextCallback&&(this.nextCallback.cancel(),this.nextCallback=null)},n.safeSetState=function(e,t){t=this.setNextCallback(t),this.setState(e,t)},n.setNextCallback=function(e){var t=this,n=!0;return this.nextCallback=function(r){n&&(n=!1,t.nextCallback=null,e(r))},this.nextCallback.cancel=function(){n=!1},this.nextCallback},n.onTransitionEnd=function(e,t){this.setNextCallback(t);var n=this.props.nodeRef?this.props.nodeRef.current:s.findDOMNode(this),r=null==e&&!this.props.addEndListener;if(!n||r){setTimeout(this.nextCallback,0);return}if(this.props.addEndListener){var o=this.props.nodeRef?[this.nextCallback]:[n,this.nextCallback],a=o[0],i=o[1];this.props.addEndListener(a,i)}null!=e&&setTimeout(this.nextCallback,e)},n.render=function(){var e=this.state.status;if(e===d)return null;var t=this.props,n=t.children,r=(t.in,t.mountOnEnter,t.unmountOnExit,t.appear,t.enter,t.exit,t.timeout,t.addEndListener,t.onEnter,t.onEntering,t.onEntered,t.onExit,t.onExiting,t.onExited,t.nodeRef,(0,i.Z)(t,["children","in","mountOnEnter","unmountOnExit","appear","enter","exit","timeout","addEndListener","onEnter","onEntering","onEntered","onExit","onExiting","onExited","nodeRef"]));return a.createElement(u.Provider,{value:null},"function"==typeof n?n(e,r):a.cloneElement(a.Children.only(n),r))},t}(a.Component);function b(){}h.contextType=u,h.propTypes={},h.defaultProps={in:!1,mountOnEnter:!1,unmountOnExit:!1,appear:!1,enter:!0,exit:!0,onEnter:b,onEntering:b,onEntered:b,onExit:b,onExiting:b,onExited:b},h.UNMOUNTED=d,h.EXITED=p,h.ENTERING=f,h.ENTERED=m,h.EXITING=g;var v=n(54942),y=n(99250),E=n(65492);let w=e=>{var t=(0,r._T)(e,[]);return a.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),a.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),a.createElement("path",{d:"M18.364 5.636L16.95 7.05A7 7 0 1 0 19 12h2a9 9 0 1 1-2.636-6.364z"}))};var S=n(2898);let x={xs:{height:"h-4",width:"w-4"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-6",width:"w-6"},xl:{height:"h-6",width:"w-6"}},O=e=>"light"!==e?{xs:{paddingX:"px-2.5",paddingY:"py-1.5",fontSize:"text-xs"},sm:{paddingX:"px-4",paddingY:"py-2",fontSize:"text-sm"},md:{paddingX:"px-4",paddingY:"py-2",fontSize:"text-md"},lg:{paddingX:"px-4",paddingY:"py-2.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-3",fontSize:"text-xl"}}:{xs:{paddingX:"",paddingY:"",fontSize:"text-xs"},sm:{paddingX:"",paddingY:"",fontSize:"text-sm"},md:{paddingX:"",paddingY:"",fontSize:"text-md"},lg:{paddingX:"",paddingY:"",fontSize:"text-lg"},xl:{paddingX:"",paddingY:"",fontSize:"text-xl"}},k=(e,t)=>{switch(e){case"primary":return{textColor:t?(0,E.bM)("white").textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",hoverTextColor:t?(0,E.bM)("white").textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,E.bM)(t,S.K.background).bgColor:"bg-tremor-brand dark:bg-dark-tremor-brand",hoverBgColor:t?(0,E.bM)(t,S.K.darkBackground).hoverBgColor:"hover:bg-tremor-brand-emphasis dark:hover:bg-dark-tremor-brand-emphasis",borderColor:t?(0,E.bM)(t,S.K.border).borderColor:"border-tremor-brand dark:border-dark-tremor-brand",hoverBorderColor:t?(0,E.bM)(t,S.K.darkBorder).hoverBorderColor:"hover:border-tremor-brand-emphasis dark:hover:border-dark-tremor-brand-emphasis"};case"secondary":return{textColor:t?(0,E.bM)(t,S.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",hoverTextColor:t?(0,E.bM)(t,S.K.text).textColor:"hover:text-tremor-brand-emphasis dark:hover:text-dark-tremor-brand-emphasis",bgColor:(0,E.bM)("transparent").bgColor,hoverBgColor:t?(0,y.q)((0,E.bM)(t,S.K.background).hoverBgColor,"hover:bg-opacity-20 dark:hover:bg-opacity-20"):"hover:bg-tremor-brand-faint dark:hover:bg-dark-tremor-brand-faint",borderColor:t?(0,E.bM)(t,S.K.border).borderColor:"border-tremor-brand dark:border-dark-tremor-brand"};case"light":return{textColor:t?(0,E.bM)(t,S.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",hoverTextColor:t?(0,E.bM)(t,S.K.darkText).hoverTextColor:"hover:text-tremor-brand-emphasis dark:hover:text-dark-tremor-brand-emphasis",bgColor:(0,E.bM)("transparent").bgColor,borderColor:"",hoverBorderColor:""}}},C=(0,E.fn)("Button"),T=e=>{let{loading:t,iconSize:n,iconPosition:r,Icon:o,needMargin:i,transitionState:l}=e,s=i?r===v.zS.Left?(0,y.q)("-ml-1","mr-1.5"):(0,y.q)("-mr-1","ml-1.5"):"",c=(0,y.q)("w-0 h-0"),u={default:c,entering:c,entered:n,exiting:n,exited:c};return t?a.createElement(w,{className:(0,y.q)(C("icon"),"animate-spin shrink-0",s,u.default,u[l]),style:{transition:"width 150ms"}}):a.createElement(o,{className:(0,y.q)(C("icon"),"shrink-0",n,s)})},A=a.forwardRef((e,t)=>{let{icon:n,iconPosition:i=v.zS.Left,size:l=v.u8.SM,color:s,variant:c="primary",disabled:u,loading:d=!1,loadingText:p,children:f,tooltip:m,className:g}=e,b=(0,r._T)(e,["icon","iconPosition","size","color","variant","disabled","loading","loadingText","children","tooltip","className"]),w=d||u,S=void 0!==n||d,A=d&&p,N=!(!f&&!A),I=(0,y.q)(x[l].height,x[l].width),R="light"!==c?(0,y.q)("rounded-tremor-default border","shadow-tremor-input","dark:shadow-dark-tremor-input"):"",_=k(c,s),P=O(c)[l],{tooltipProps:M,getReferenceProps:L}=(0,o.l)(300);return a.createElement(h,{in:d,timeout:50},e=>a.createElement("button",Object.assign({ref:(0,E.lq)([t,M.refs.setReference]),className:(0,y.q)(C("root"),"flex-shrink-0 inline-flex justify-center items-center group font-medium outline-none",R,P.paddingX,P.paddingY,P.fontSize,_.textColor,_.bgColor,_.borderColor,_.hoverBorderColor,w?"opacity-50 cursor-not-allowed":(0,y.q)(k(c,s).hoverTextColor,k(c,s).hoverBgColor,k(c,s).hoverBorderColor),g),disabled:w},L,b),a.createElement(o.Z,Object.assign({text:m},M)),S&&i!==v.zS.Right?a.createElement(T,{loading:d,iconSize:I,iconPosition:i,Icon:n,transitionState:e,needMargin:N}):null,A||f?a.createElement("span",{className:(0,y.q)(C("text"),"text-sm whitespace-nowrap")},A?p:f):null,S&&i===v.zS.Right?a.createElement(T,{loading:d,iconSize:I,iconPosition:i,Icon:n,transitionState:e,needMargin:N}):null))});A.displayName="Button"},5474:function(e,t,n){n.d(t,{Z:function(){return eD}});var r,o,a=n(69703),i=n(64090),l=n(73832),s=n(10641),c=n(15740),u=n(92381),d=n(39790),p=n(85235),f=n(71679),m=n(37130),g=n(71454),h=n(31820),b=n(36601),v=n(83839),y=n(37700),E=n(88358),w=n(84152),S=n(48803),x=n(72640),O=n(94819),k=n(18318),C=n(67409),T=((r=T||{})[r.Open=0]="Open",r[r.Closed=1]="Closed",r),A=((o=A||{})[o.TogglePopover=0]="TogglePopover",o[o.ClosePopover=1]="ClosePopover",o[o.SetButton=2]="SetButton",o[o.SetButtonId=3]="SetButtonId",o[o.SetPanel=4]="SetPanel",o[o.SetPanelId=5]="SetPanelId",o);let N={0:e=>{let t={...e,popoverState:(0,x.E)(e.popoverState,{0:1,1:0})};return 0===t.popoverState&&(t.__demoMode=!1),t},1:e=>1===e.popoverState?e:{...e,popoverState:1},2:(e,t)=>e.button===t.button?e:{...e,button:t.button},3:(e,t)=>e.buttonId===t.buttonId?e:{...e,buttonId:t.buttonId},4:(e,t)=>e.panel===t.panel?e:{...e,panel:t.panel},5:(e,t)=>e.panelId===t.panelId?e:{...e,panelId:t.panelId}},I=(0,i.createContext)(null);function R(e){let t=(0,i.useContext)(I);if(null===t){let t=Error("<".concat(e," /> is missing a parent component."));throw Error.captureStackTrace&&Error.captureStackTrace(t,R),t}return t}I.displayName="PopoverContext";let _=(0,i.createContext)(null);function P(e){let t=(0,i.useContext)(_);if(null===t){let t=Error("<".concat(e," /> is missing a parent component."));throw Error.captureStackTrace&&Error.captureStackTrace(t,P),t}return t}_.displayName="PopoverAPIContext";let M=(0,i.createContext)(null);function L(){return(0,i.useContext)(M)}M.displayName="PopoverGroupContext";let D=(0,i.createContext)(null);function j(e,t){return(0,x.E)(t.type,N,e,t)}D.displayName="PopoverPanelContext";let F=k.AN.RenderStrategy|k.AN.Static,B=k.AN.RenderStrategy|k.AN.Static,Z=Object.assign((0,k.yV)(function(e,t){var n;let{__demoMode:r=!1,...o}=e,a=(0,i.useRef)(null),u=(0,b.T)(t,(0,b.h)(e=>{a.current=e})),d=(0,i.useRef)([]),g=(0,i.useReducer)(j,{__demoMode:r,popoverState:r?0:1,buttons:d,button:null,buttonId:null,panel:null,panelId:null,beforePanelSentinel:(0,i.createRef)(),afterPanelSentinel:(0,i.createRef)()}),[{popoverState:v,button:y,buttonId:w,panel:O,panelId:C,beforePanelSentinel:T,afterPanelSentinel:A},N]=g,R=(0,m.i)(null!=(n=a.current)?n:y),P=(0,i.useMemo)(()=>{if(!y||!O)return!1;for(let e of document.querySelectorAll("body > *"))if(Number(null==e?void 0:e.contains(y))^Number(null==e?void 0:e.contains(O)))return!0;let e=(0,S.GO)(),t=e.indexOf(y),n=(t+e.length-1)%e.length,r=(t+1)%e.length,o=e[n],a=e[r];return!O.contains(o)&&!O.contains(a)},[y,O]),M=(0,p.E)(w),F=(0,p.E)(C),B=(0,i.useMemo)(()=>({buttonId:M,panelId:F,close:()=>N({type:1})}),[M,F,N]),Z=L(),U=null==Z?void 0:Z.registerPopover,z=(0,s.z)(()=>{var e;return null!=(e=null==Z?void 0:Z.isFocusWithinPopoverGroup())?e:(null==R?void 0:R.activeElement)&&((null==y?void 0:y.contains(R.activeElement))||(null==O?void 0:O.contains(R.activeElement)))});(0,i.useEffect)(()=>null==U?void 0:U(B),[U,B]);let[H,G]=(0,l.k)(),W=(0,h.v)({mainTreeNodeRef:null==Z?void 0:Z.mainTreeNodeRef,portals:H,defaultContainers:[y,O]});(0,c.O)(null==R?void 0:R.defaultView,"focus",e=>{var t,n,r,o;e.target!==window&&e.target instanceof HTMLElement&&0===v&&(z()||y&&O&&(W.contains(e.target)||null!=(n=null==(t=T.current)?void 0:t.contains)&&n.call(t,e.target)||null!=(o=null==(r=A.current)?void 0:r.contains)&&o.call(r,e.target)||N({type:1})))},!0),(0,f.O)(W.resolveContainers,(e,t)=>{N({type:1}),(0,S.sP)(t,S.tJ.Loose)||(e.preventDefault(),null==y||y.focus())},0===v);let $=(0,s.z)(e=>{N({type:1});let t=e?e instanceof HTMLElement?e:"current"in e&&e.current instanceof HTMLElement?e.current:y:y;null==t||t.focus()}),V=(0,i.useMemo)(()=>({close:$,isPortalled:P}),[$,P]),q=(0,i.useMemo)(()=>({open:0===v,close:$}),[v,$]);return i.createElement(D.Provider,{value:null},i.createElement(I.Provider,{value:g},i.createElement(_.Provider,{value:V},i.createElement(E.up,{value:(0,x.E)(v,{0:E.ZM.Open,1:E.ZM.Closed})},i.createElement(G,null,(0,k.sY)({ourProps:{ref:u},theirProps:o,slot:q,defaultTag:"div",name:"Popover"}),i.createElement(W.MainTreeNode,null))))))}),{Button:(0,k.yV)(function(e,t){let n=(0,u.M)(),{id:r="headlessui-popover-button-".concat(n),...o}=e,[a,l]=R("Popover.Button"),{isPortalled:c}=P("Popover.Button"),d=(0,i.useRef)(null),p="headlessui-focus-sentinel-".concat((0,u.M)()),f=L(),h=null==f?void 0:f.closeOthers,E=null!==(0,i.useContext)(D);(0,i.useEffect)(()=>{if(!E)return l({type:3,buttonId:r}),()=>{l({type:3,buttonId:null})}},[E,r,l]);let[O]=(0,i.useState)(()=>Symbol()),T=(0,b.T)(d,t,E?null:e=>{if(e)a.buttons.current.push(O);else{let e=a.buttons.current.indexOf(O);-1!==e&&a.buttons.current.splice(e,1)}a.buttons.current.length>1&&console.warn("You are already using a but only 1 is supported."),e&&l({type:2,button:e})}),A=(0,b.T)(d,t),N=(0,m.i)(d),I=(0,s.z)(e=>{var t,n,r;if(E){if(1===a.popoverState)return;switch(e.key){case C.R.Space:case C.R.Enter:e.preventDefault(),null==(n=(t=e.target).click)||n.call(t),l({type:1}),null==(r=a.button)||r.focus()}}else switch(e.key){case C.R.Space:case C.R.Enter:e.preventDefault(),e.stopPropagation(),1===a.popoverState&&(null==h||h(a.buttonId)),l({type:0});break;case C.R.Escape:if(0!==a.popoverState)return null==h?void 0:h(a.buttonId);if(!d.current||null!=N&&N.activeElement&&!d.current.contains(N.activeElement))return;e.preventDefault(),e.stopPropagation(),l({type:1})}}),_=(0,s.z)(e=>{E||e.key===C.R.Space&&e.preventDefault()}),M=(0,s.z)(t=>{var n,r;(0,w.P)(t.currentTarget)||e.disabled||(E?(l({type:1}),null==(n=a.button)||n.focus()):(t.preventDefault(),t.stopPropagation(),1===a.popoverState&&(null==h||h(a.buttonId)),l({type:0}),null==(r=a.button)||r.focus()))}),j=(0,s.z)(e=>{e.preventDefault(),e.stopPropagation()}),F=0===a.popoverState,B=(0,i.useMemo)(()=>({open:F}),[F]),Z=(0,g.f)(e,d),U=E?{ref:A,type:Z,onKeyDown:I,onClick:M}:{ref:T,id:a.buttonId,type:Z,"aria-expanded":0===a.popoverState,"aria-controls":a.panel?a.panelId:void 0,onKeyDown:I,onKeyUp:_,onClick:M,onMouseDown:j},z=(0,v.l)(),H=(0,s.z)(()=>{let e=a.panel;e&&(0,x.E)(z.current,{[v.N.Forwards]:()=>(0,S.jA)(e,S.TO.First),[v.N.Backwards]:()=>(0,S.jA)(e,S.TO.Last)})===S.fE.Error&&(0,S.jA)((0,S.GO)().filter(e=>"true"!==e.dataset.headlessuiFocusGuard),(0,x.E)(z.current,{[v.N.Forwards]:S.TO.Next,[v.N.Backwards]:S.TO.Previous}),{relativeTo:a.button})});return i.createElement(i.Fragment,null,(0,k.sY)({ourProps:U,theirProps:o,slot:B,defaultTag:"button",name:"Popover.Button"}),F&&!E&&c&&i.createElement(y._,{id:p,features:y.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:H}))}),Overlay:(0,k.yV)(function(e,t){let n=(0,u.M)(),{id:r="headlessui-popover-overlay-".concat(n),...o}=e,[{popoverState:a},l]=R("Popover.Overlay"),c=(0,b.T)(t),d=(0,E.oJ)(),p=null!==d?(d&E.ZM.Open)===E.ZM.Open:0===a,f=(0,s.z)(e=>{if((0,w.P)(e.currentTarget))return e.preventDefault();l({type:1})}),m=(0,i.useMemo)(()=>({open:0===a}),[a]);return(0,k.sY)({ourProps:{ref:c,id:r,"aria-hidden":!0,onClick:f},theirProps:o,slot:m,defaultTag:"div",features:F,visible:p,name:"Popover.Overlay"})}),Panel:(0,k.yV)(function(e,t){let n=(0,u.M)(),{id:r="headlessui-popover-panel-".concat(n),focus:o=!1,...a}=e,[l,c]=R("Popover.Panel"),{close:p,isPortalled:f}=P("Popover.Panel"),g="headlessui-focus-sentinel-before-".concat((0,u.M)()),h="headlessui-focus-sentinel-after-".concat((0,u.M)()),w=(0,i.useRef)(null),O=(0,b.T)(w,t,e=>{c({type:4,panel:e})}),T=(0,m.i)(w),A=(0,k.Y2)();(0,d.e)(()=>(c({type:5,panelId:r}),()=>{c({type:5,panelId:null})}),[r,c]);let N=(0,E.oJ)(),I=null!==N?(N&E.ZM.Open)===E.ZM.Open:0===l.popoverState,_=(0,s.z)(e=>{var t;if(e.key===C.R.Escape){if(0!==l.popoverState||!w.current||null!=T&&T.activeElement&&!w.current.contains(T.activeElement))return;e.preventDefault(),e.stopPropagation(),c({type:1}),null==(t=l.button)||t.focus()}});(0,i.useEffect)(()=>{var t;e.static||1===l.popoverState&&(null==(t=e.unmount)||t)&&c({type:4,panel:null})},[l.popoverState,e.unmount,e.static,c]),(0,i.useEffect)(()=>{if(l.__demoMode||!o||0!==l.popoverState||!w.current)return;let e=null==T?void 0:T.activeElement;w.current.contains(e)||(0,S.jA)(w.current,S.TO.First)},[l.__demoMode,o,w,l.popoverState]);let M=(0,i.useMemo)(()=>({open:0===l.popoverState,close:p}),[l,p]),L={ref:O,id:r,onKeyDown:_,onBlur:o&&0===l.popoverState?e=>{var t,n,r,o,a;let i=e.relatedTarget;i&&w.current&&(null!=(t=w.current)&&t.contains(i)||(c({type:1}),(null!=(r=null==(n=l.beforePanelSentinel.current)?void 0:n.contains)&&r.call(n,i)||null!=(a=null==(o=l.afterPanelSentinel.current)?void 0:o.contains)&&a.call(o,i))&&i.focus({preventScroll:!0})))}:void 0,tabIndex:-1},j=(0,v.l)(),F=(0,s.z)(()=>{let e=w.current;e&&(0,x.E)(j.current,{[v.N.Forwards]:()=>{var t;(0,S.jA)(e,S.TO.First)===S.fE.Error&&(null==(t=l.afterPanelSentinel.current)||t.focus())},[v.N.Backwards]:()=>{var e;null==(e=l.button)||e.focus({preventScroll:!0})}})}),Z=(0,s.z)(()=>{let e=w.current;e&&(0,x.E)(j.current,{[v.N.Forwards]:()=>{var e;if(!l.button)return;let t=(0,S.GO)(),n=t.indexOf(l.button),r=t.slice(0,n+1),o=[...t.slice(n+1),...r];for(let t of o.slice())if("true"===t.dataset.headlessuiFocusGuard||null!=(e=l.panel)&&e.contains(t)){let e=o.indexOf(t);-1!==e&&o.splice(e,1)}(0,S.jA)(o,S.TO.First,{sorted:!1})},[v.N.Backwards]:()=>{var t;(0,S.jA)(e,S.TO.Previous)===S.fE.Error&&(null==(t=l.button)||t.focus())}})});return i.createElement(D.Provider,{value:r},I&&f&&i.createElement(y._,{id:g,ref:l.beforePanelSentinel,features:y.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:F}),(0,k.sY)({mergeRefs:A,ourProps:L,theirProps:a,slot:M,defaultTag:"div",features:B,visible:I,name:"Popover.Panel"}),I&&f&&i.createElement(y._,{id:h,ref:l.afterPanelSentinel,features:y.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:Z}))}),Group:(0,k.yV)(function(e,t){let n=(0,i.useRef)(null),r=(0,b.T)(n,t),[o,a]=(0,i.useState)([]),l=(0,h.H)(),c=(0,s.z)(e=>{a(t=>{let n=t.indexOf(e);if(-1!==n){let e=t.slice();return e.splice(n,1),e}return t})}),u=(0,s.z)(e=>(a(t=>[...t,e]),()=>c(e))),d=(0,s.z)(()=>{var e;let t=(0,O.r)(n);if(!t)return!1;let r=t.activeElement;return!!(null!=(e=n.current)&&e.contains(r))||o.some(e=>{var n,o;return(null==(n=t.getElementById(e.buttonId.current))?void 0:n.contains(r))||(null==(o=t.getElementById(e.panelId.current))?void 0:o.contains(r))})}),p=(0,s.z)(e=>{for(let t of o)t.buttonId.current!==e&&t.close()}),f=(0,i.useMemo)(()=>({registerPopover:u,unregisterPopover:c,isFocusWithinPopoverGroup:d,closeOthers:p,mainTreeNodeRef:l.mainTreeNodeRef}),[u,c,d,p,l.mainTreeNodeRef]),m=(0,i.useMemo)(()=>({}),[]);return i.createElement(M.Provider,{value:f},(0,k.sY)({ourProps:{ref:r},theirProps:e,slot:m,defaultTag:"div",name:"Popover.Group"}),i.createElement(l.MainTreeNode,null))})});var U=n(70129),z=n(25163);let H=e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 20 20",fill:"currentColor"}),i.createElement("path",{fillRule:"evenodd",d:"M6 2a1 1 0 00-1 1v1H4a2 2 0 00-2 2v10a2 2 0 002 2h12a2 2 0 002-2V6a2 2 0 00-2-2h-1V3a1 1 0 10-2 0v1H7V3a1 1 0 00-1-1zm0 5a1 1 0 000 2h8a1 1 0 100-2H6z",clipRule:"evenodd"}))};var G=n(8903),W=n(49492);function $(){return(0,W.Z)(Date.now())}var V=n(32633),q=n(99250),Y=n(91753),K=n(74416),X=n(50295),Q=n(6976),J=n(13256),ee=n(68309),et=n(84120),en=n(27552);function er(e,t){if((0,en.Z)(2,arguments),!t||"object"!==(0,Q.Z)(t))return new Date(NaN);var n=t.years?(0,ee.Z)(t.years):0,r=t.months?(0,ee.Z)(t.months):0,o=t.weeks?(0,ee.Z)(t.weeks):0,a=t.days?(0,ee.Z)(t.days):0,i=t.hours?(0,ee.Z)(t.hours):0,l=t.minutes?(0,ee.Z)(t.minutes):0,s=t.seconds?(0,ee.Z)(t.seconds):0,c=function(e,t){(0,en.Z)(2,arguments);var n=(0,ee.Z)(t);return(0,et.Z)(e,-n)}(e,r+12*n);return new Date((0,J.Z)(c,a+7*o).getTime()-1e3*(s+60*(l+60*i)))}var eo=n(8053),ea=n(68005),ei=n(22893),el=n(65492);let es=(0,el.fn)("DateRangePicker"),ec=(e,t,n,r)=>{var o;if(n&&(e=null===(o=r.get(n))||void 0===o?void 0:o.from),e)return(0,W.Z)(e&&!t?e:(0,K.Z)([e,t]))},eu=(e,t,n,r)=>{var o,a;if(n&&(e=(0,W.Z)(null!==(a=null===(o=r.get(n))||void 0===o?void 0:o.to)&&void 0!==a?a:$())),e)return(0,W.Z)(e&&!t?e:(0,X.Z)([e,t]))},ed=[{value:"tdy",text:"Today",from:$()},{value:"w",text:"Last 7 days",from:er($(),{days:7})},{value:"t",text:"Last 30 days",from:er($(),{days:30})},{value:"m",text:"Month to Date",from:(0,V.Z)($())},{value:"y",text:"Year to Date",from:(0,eo.Z)($())}],ep=(e,t,n,r)=>{let o=(null==n?void 0:n.code)||"en-US";if(!e&&!t)return"";if(e&&!t)return r?(0,ea.Z)(e,r):e.toLocaleDateString(o,{year:"numeric",month:"short",day:"numeric"});if(e&&t){if(function(e,t){(0,en.Z)(2,arguments);var n=(0,ei.Z)(e),r=(0,ei.Z)(t);return n.getTime()===r.getTime()}(e,t))return r?(0,ea.Z)(e,r):e.toLocaleDateString(o,{year:"numeric",month:"short",day:"numeric"});if(e.getMonth()===t.getMonth()&&e.getFullYear()===t.getFullYear())return r?"".concat((0,ea.Z)(e,r)," - ").concat((0,ea.Z)(t,r)):"".concat(e.toLocaleDateString(o,{month:"short",day:"numeric"})," - \n ").concat(t.getDate(),", ").concat(t.getFullYear());{if(r)return"".concat((0,ea.Z)(e,r)," - ").concat((0,ea.Z)(t,r));let n={year:"numeric",month:"short",day:"numeric"};return"".concat(e.toLocaleDateString(o,n)," - \n ").concat(t.toLocaleDateString(o,n))}}return""};var ef=n(26463);let em=e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"},t),i.createElement("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M15 19l-7-7 7-7"}))},eg=e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"},t),i.createElement("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M9 5l7 7-7 7"}))},eh=e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),i.createElement("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M11 19l-7-7 7-7m8 14l-7-7 7-7"}))},eb=e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),i.createElement("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M13 5l7 7-7 7M5 5l7 7-7 7"}))};var ev=n(45503),ey=n(71801);n(5);var eE=n(58437),ew=n(54942),eS=n(2898);let ex={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-1",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-1.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-1.5",fontSize:"text-lg"},xl:{paddingX:"px-3.5",paddingY:"py-1.5",fontSize:"text-xl"}},eO={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},ek={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},eC={[ew.wu.Increase]:{bgColor:(0,el.bM)(ew.fr.Emerald,eS.K.background).bgColor,textColor:(0,el.bM)(ew.fr.Emerald,eS.K.text).textColor},[ew.wu.ModerateIncrease]:{bgColor:(0,el.bM)(ew.fr.Emerald,eS.K.background).bgColor,textColor:(0,el.bM)(ew.fr.Emerald,eS.K.text).textColor},[ew.wu.Decrease]:{bgColor:(0,el.bM)(ew.fr.Rose,eS.K.background).bgColor,textColor:(0,el.bM)(ew.fr.Rose,eS.K.text).textColor},[ew.wu.ModerateDecrease]:{bgColor:(0,el.bM)(ew.fr.Rose,eS.K.background).bgColor,textColor:(0,el.bM)(ew.fr.Rose,eS.K.text).textColor},[ew.wu.Unchanged]:{bgColor:(0,el.bM)(ew.fr.Orange,eS.K.background).bgColor,textColor:(0,el.bM)(ew.fr.Orange,eS.K.text).textColor}},eT={[ew.wu.Increase]:e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",width:"24",height:"24"}),i.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),i.createElement("path",{fill:"currentColor",d:"M13 7.828V20h-2V7.828l-5.364 5.364-1.414-1.414L12 4l7.778 7.778-1.414 1.414L13 7.828z"}))},[ew.wu.ModerateIncrease]:e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",width:"24",height:"24"}),i.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),i.createElement("path",{fill:"currentColor",d:"M16.004 9.414l-8.607 8.607-1.414-1.414L14.589 8H7.004V6h11v11h-2V9.414z"}))},[ew.wu.Decrease]:e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",width:"24",height:"24"}),i.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),i.createElement("path",{fill:"currentColor",d:"M13 16.172l5.364-5.364 1.414 1.414L12 20l-7.778-7.778 1.414-1.414L11 16.172V4h2v12.172z"}))},[ew.wu.ModerateDecrease]:e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",width:"24",height:"24"}),i.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),i.createElement("path",{fill:"currentColor",d:"M14.59 16.004L5.982 7.397l1.414-1.414 8.607 8.606V7.004h2v11h-11v-2z"}))},[ew.wu.Unchanged]:e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",width:"24",height:"24"}),i.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),i.createElement("path",{fill:"currentColor",d:"M16.172 11l-5.364-5.364 1.414-1.414L20 12l-7.778 7.778-1.414-1.414L16.172 13H4v-2z"}))}},eA=(0,el.fn)("BadgeDelta");i.forwardRef((e,t)=>{let{deltaType:n=ew.wu.Increase,isIncreasePositive:r=!0,size:o=ew.u8.SM,tooltip:l,children:s,className:c}=e,u=(0,a._T)(e,["deltaType","isIncreasePositive","size","tooltip","children","className"]),d=eT[n],p=(0,el.Fo)(n,r),f=s?eO:ex,{tooltipProps:m,getReferenceProps:g}=(0,eE.l)();return i.createElement("span",Object.assign({ref:(0,el.lq)([t,m.refs.setReference]),className:(0,q.q)(eA("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full bg-opacity-20 dark:bg-opacity-25",eC[p].bgColor,eC[p].textColor,f[o].paddingX,f[o].paddingY,f[o].fontSize,c)},g,u),i.createElement(eE.Z,Object.assign({text:l},m)),i.createElement(d,{className:(0,q.q)(eA("icon"),"shrink-0",s?(0,q.q)("-ml-1 mr-1.5"):ek[o].height,ek[o].width)}),s?i.createElement("p",{className:(0,q.q)(eA("text"),"text-sm whitespace-nowrap")},s):null)}).displayName="BadgeDelta";var eN=n(61244);let eI=e=>{var{onClick:t,icon:n}=e,r=(0,a._T)(e,["onClick","icon"]);return i.createElement("button",Object.assign({type:"button",className:(0,q.q)("flex items-center justify-center p-1 h-7 w-7 outline-none focus:ring-2 transition duration-100 border border-tremor-border dark:border-dark-tremor-border hover:bg-tremor-background-muted dark:hover:bg-dark-tremor-background-muted rounded-tremor-small focus:border-tremor-brand-subtle select-none dark:focus:border-dark-tremor-brand-subtle focus:ring-tremor-brand-muted dark:focus:ring-dark-tremor-brand-muted text-tremor-content-subtle dark:text-dark-tremor-content-subtle hover:text-tremor-content dark:hover:text-dark-tremor-content")},r),i.createElement(eN.Z,{onClick:t,icon:n,variant:"simple",color:"slate",size:"xs"}))};function eR(e){var{mode:t,defaultMonth:n,selected:r,onSelect:o,locale:l,disabled:s,enableYearNavigation:c,classNames:u,weekStartsOn:d=0}=e,p=(0,a._T)(e,["mode","defaultMonth","selected","onSelect","locale","disabled","enableYearNavigation","classNames","weekStartsOn"]);return i.createElement(ef._W,Object.assign({showOutsideDays:!0,mode:t,defaultMonth:n,selected:r,onSelect:o,locale:l,disabled:s,weekStartsOn:d,classNames:Object.assign({months:"flex flex-col sm:flex-row space-y-4 sm:space-x-4 sm:space-y-0",month:"space-y-4",caption:"flex justify-center pt-2 relative items-center",caption_label:"text-tremor-default text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis font-medium",nav:"space-x-1 flex items-center",nav_button:"flex items-center justify-center p-1 h-7 w-7 outline-none focus:ring-2 transition duration-100 border border-tremor-border dark:border-dark-tremor-border hover:bg-tremor-background-muted dark:hover:bg-dark-tremor-background-muted rounded-tremor-small focus:border-tremor-brand-subtle dark:focus:border-dark-tremor-brand-subtle focus:ring-tremor-brand-muted dark:focus:ring-dark-tremor-brand-muted text-tremor-content-subtle dark:text-dark-tremor-content-subtle hover:text-tremor-content dark:hover:text-dark-tremor-content",nav_button_previous:"absolute left-1",nav_button_next:"absolute right-1",table:"w-full border-collapse space-y-1",head_row:"flex",head_cell:"w-9 font-normal text-center text-tremor-content-subtle dark:text-dark-tremor-content-subtle",row:"flex w-full mt-0.5",cell:"text-center p-0 relative focus-within:relative text-tremor-default text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis",day:"h-9 w-9 p-0 hover:bg-tremor-background-subtle dark:hover:bg-dark-tremor-background-subtle outline-tremor-brand dark:outline-dark-tremor-brand rounded-tremor-default",day_today:"font-bold",day_selected:"aria-selected:bg-tremor-background-emphasis aria-selected:text-tremor-content-inverted dark:aria-selected:bg-dark-tremor-background-emphasis dark:aria-selected:text-dark-tremor-content-inverted ",day_disabled:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle disabled:hover:bg-transparent",day_outside:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle"},u),components:{IconLeft:e=>{var t=(0,a._T)(e,[]);return i.createElement(em,Object.assign({className:"h-4 w-4"},t))},IconRight:e=>{var t=(0,a._T)(e,[]);return i.createElement(eg,Object.assign({className:"h-4 w-4"},t))},Caption:e=>{var t=(0,a._T)(e,[]);let{goToMonth:n,nextMonth:r,previousMonth:o,currentMonth:s}=(0,ef.HJ)();return i.createElement("div",{className:"flex justify-between items-center"},i.createElement("div",{className:"flex items-center space-x-1"},c&&i.createElement(eI,{onClick:()=>s&&n((0,ev.Z)(s,-1)),icon:eh}),i.createElement(eI,{onClick:()=>o&&n(o),icon:em})),i.createElement(ey.Z,{className:"text-tremor-default tabular-nums capitalize text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis font-medium"},(0,ea.Z)(t.displayMonth,"LLLL yyy",{locale:l})),i.createElement("div",{className:"flex items-center space-x-1"},i.createElement(eI,{onClick:()=>r&&n(r),icon:eg}),c&&i.createElement(eI,{onClick:()=>s&&n((0,ev.Z)(s,1)),icon:eb})))}}},p))}eR.displayName="DateRangePicker",n(95093);var e_=n(27166),eP=n(82985),eM=n(46457);let eL=$(),eD=i.forwardRef((e,t)=>{var n,r;let{value:o,defaultValue:l,onValueChange:s,enableSelect:c=!0,minDate:u,maxDate:d,placeholder:p="Select range",selectPlaceholder:f="Select range",disabled:m=!1,locale:g=eP.Z,enableClear:h=!0,displayFormat:b,children:v,className:y,enableYearNavigation:E=!1,weekStartsOn:w=0,disabledDates:S}=e,x=(0,a._T)(e,["value","defaultValue","onValueChange","enableSelect","minDate","maxDate","placeholder","selectPlaceholder","disabled","locale","enableClear","displayFormat","children","className","enableYearNavigation","weekStartsOn","disabledDates"]),[O,k]=(0,eM.Z)(l,o),[C,T]=(0,i.useState)(!1),[A,N]=(0,i.useState)(!1),I=(0,i.useMemo)(()=>{let e=[];return u&&e.push({before:u}),d&&e.push({after:d}),[...e,...null!=S?S:[]]},[u,d,S]),R=(0,i.useMemo)(()=>{let e=new Map;return v?i.Children.forEach(v,t=>{var n;e.set(t.props.value,{text:null!==(n=(0,Y.qg)(t))&&void 0!==n?n:t.props.value,from:t.props.from,to:t.props.to})}):ed.forEach(t=>{e.set(t.value,{text:t.text,from:t.from,to:eL})}),e},[v]),_=(0,i.useMemo)(()=>{if(v)return(0,Y.sl)(v);let e=new Map;return ed.forEach(t=>e.set(t.value,t.text)),e},[v]),P=(null==O?void 0:O.selectValue)||"",M=ec(null==O?void 0:O.from,u,P,R),L=eu(null==O?void 0:O.to,d,P,R),D=M||L?ep(M,L,g,b):p,j=(0,V.Z)(null!==(r=null!==(n=null!=L?L:M)&&void 0!==n?n:d)&&void 0!==r?r:eL),F=h&&!m;return i.createElement("div",Object.assign({ref:t,className:(0,q.q)("w-full min-w-[10rem] relative flex justify-between text-tremor-default max-w-sm shadow-tremor-input dark:shadow-dark-tremor-input rounded-tremor-default",y)},x),i.createElement(Z,{as:"div",className:(0,q.q)("w-full",c?"rounded-l-tremor-default":"rounded-tremor-default",C&&"ring-2 ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted z-10")},i.createElement("div",{className:"relative w-full"},i.createElement(Z.Button,{onFocus:()=>T(!0),onBlur:()=>T(!1),disabled:m,className:(0,q.q)("w-full outline-none text-left whitespace-nowrap truncate focus:ring-2 transition duration-100 rounded-l-tremor-default flex flex-nowrap border pl-3 py-2","rounded-l-tremor-default border-tremor-border text-tremor-content-emphasis focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:text-dark-tremor-content-emphasis dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",c?"rounded-l-tremor-default":"rounded-tremor-default",F?"pr-8":"pr-4",(0,Y.um)((0,Y.Uh)(M||L),m))},i.createElement(H,{className:(0,q.q)(es("calendarIcon"),"flex-none shrink-0 h-5 w-5 -ml-0.5 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle"),"aria-hidden":"true"}),i.createElement("p",{className:"truncate"},D)),F&&M?i.createElement("button",{type:"button",className:(0,q.q)("absolute outline-none inset-y-0 right-0 flex items-center transition duration-100 mr-4"),onClick:e=>{e.preventDefault(),null==s||s({}),k({})}},i.createElement(G.Z,{className:(0,q.q)(es("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null),i.createElement(U.u,{className:"absolute z-10 min-w-min left-0",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},i.createElement(Z.Panel,{focus:!0,className:(0,q.q)("divide-y overflow-y-auto outline-none rounded-tremor-default p-3 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},i.createElement(eR,Object.assign({mode:"range",showOutsideDays:!0,defaultMonth:j,selected:{from:M,to:L},onSelect:e=>{null==s||s({from:null==e?void 0:e.from,to:null==e?void 0:e.to}),k({from:null==e?void 0:e.from,to:null==e?void 0:e.to})},locale:g,disabled:I,enableYearNavigation:E,classNames:{day_range_middle:(0,q.q)("!rounded-none aria-selected:!bg-tremor-background-subtle aria-selected:dark:!bg-dark-tremor-background-subtle aria-selected:!text-tremor-content aria-selected:dark:!bg-dark-tremor-background-subtle"),day_range_start:"rounded-r-none rounded-l-tremor-small aria-selected:text-tremor-brand-inverted dark:aria-selected:text-dark-tremor-brand-inverted",day_range_end:"rounded-l-none rounded-r-tremor-small aria-selected:text-tremor-brand-inverted dark:aria-selected:text-dark-tremor-brand-inverted"},weekStartsOn:w},e))))),c&&i.createElement(z.R,{as:"div",className:(0,q.q)("w-48 -ml-px rounded-r-tremor-default",A&&"ring-2 ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted z-10"),value:P,onChange:e=>{let{from:t,to:n}=R.get(e),r=null!=n?n:eL;null==s||s({from:t,to:r,selectValue:e}),k({from:t,to:r,selectValue:e})},disabled:m},e=>{var t;let{value:n}=e;return i.createElement(i.Fragment,null,i.createElement(z.R.Button,{onFocus:()=>N(!0),onBlur:()=>N(!1),className:(0,q.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-r-tremor-default transition duration-100 border px-4 py-2","border-tremor-border shadow-tremor-input text-tremor-content-emphasis focus:border-tremor-brand-subtle","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:text-dark-tremor-content-emphasis dark:focus:border-dark-tremor-brand-subtle",(0,Y.um)((0,Y.Uh)(n),m))},n&&null!==(t=_.get(n))&&void 0!==t?t:f),i.createElement(U.u,{className:"absolute z-10 w-full inset-x-0 right-0",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},i.createElement(z.R.Options,{className:(0,q.q)("divide-y overflow-y-auto outline-none border my-1","shadow-tremor-dropdown bg-tremor-background border-tremor-border divide-tremor-border rounded-tremor-default","dark:shadow-dark-tremor-dropdown dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border")},null!=v?v:ed.map(e=>i.createElement(e_.Z,{key:e.value,value:e.value},e.text)))))}))});eD.displayName="DateRangePicker"},47047:function(e,t,n){n.d(t,{Z:function(){return b}});var r=n(69703),o=n(64090);n(50027),n(18174),n(21871);var a=n(41213),i=n(46457),l=n(54518);let s=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 20 20",fill:"currentColor"}),o.createElement("path",{fillRule:"evenodd",d:"M8 4a4 4 0 100 8 4 4 0 000-8zM2 8a6 6 0 1110.89 3.476l4.817 4.817a1 1 0 01-1.414 1.414l-4.816-4.816A6 6 0 012 8z",clipRule:"evenodd"}))};var c=n(8903),u=n(25163),d=n(70129);let p=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",width:"100%",height:"100%",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"},t),o.createElement("line",{x1:"18",y1:"6",x2:"6",y2:"18"}),o.createElement("line",{x1:"6",y1:"6",x2:"18",y2:"18"}))};var f=n(99250),m=n(65492),g=n(91753);let h=(0,m.fn)("MultiSelect"),b=o.forwardRef((e,t)=>{let{defaultValue:n,value:m,onValueChange:b,placeholder:v="Select...",placeholderSearch:y="Search",disabled:E=!1,icon:w,children:S,className:x}=e,O=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","placeholderSearch","disabled","icon","children","className"]),[k,C]=(0,i.Z)(n,m),{reactElementChildren:T,optionsAvailable:A}=(0,o.useMemo)(()=>{let e=o.Children.toArray(S).filter(o.isValidElement);return{reactElementChildren:e,optionsAvailable:(0,g.n0)("",e)}},[S]),[N,I]=(0,o.useState)(""),R=(null!=k?k:[]).length>0,_=(0,o.useMemo)(()=>N?(0,g.n0)(N,T):A,[N,T,A]),P=()=>{I("")};return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:k,value:k,onChange:e=>{null==b||b(e),C(e)},disabled:E,className:(0,f.q)("w-full min-w-[10rem] relative text-tremor-default",x)},O,{multiple:!0}),e=>{let{value:t}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,f.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-1.5","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",w?"p-10 -ml-0.5":"pl-3",(0,g.um)(t.length>0,E))},w&&o.createElement("span",{className:(0,f.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(w,{className:(0,f.q)(h("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("div",{className:"h-6 flex items-center"},t.length>0?o.createElement("div",{className:"flex flex-nowrap overflow-x-scroll [&::-webkit-scrollbar]:hidden [scrollbar-width:none] gap-x-1 mr-5 -ml-1.5 relative"},A.filter(e=>t.includes(e.props.value)).map((e,n)=>{var r;return o.createElement("div",{key:n,className:(0,f.q)("max-w-[100px] lg:max-w-[200px] flex justify-center items-center pl-2 pr-1.5 py-1 font-medium","rounded-tremor-small","bg-tremor-background-muted dark:bg-dark-tremor-background-muted","bg-tremor-background-subtle dark:bg-dark-tremor-background-subtle","text-tremor-content-default dark:text-dark-tremor-content-default","text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis")},o.createElement("div",{className:"text-xs truncate "},null!==(r=e.props.children)&&void 0!==r?r:e.props.value),o.createElement("div",{onClick:n=>{n.preventDefault();let r=t.filter(t=>t!==e.props.value);null==b||b(r),C(r)}},o.createElement(p,{className:(0,f.q)(h("clearIconItem"),"cursor-pointer rounded-tremor-full w-3.5 h-3.5 ml-2","text-tremor-content-subtle hover:text-tremor-content","dark:text-dark-tremor-content-subtle dark:hover:text-tremor-content")})))})):o.createElement("span",null,v)),o.createElement("span",{className:(0,f.q)("absolute inset-y-0 right-0 flex items-center mr-2.5")},o.createElement(l.Z,{className:(0,f.q)(h("arrowDownIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),R&&!E?o.createElement("button",{type:"button",className:(0,f.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),C([]),null==b||b([])}},o.createElement(c.Z,{className:(0,f.q)(h("clearIconAllItems"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,f.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},o.createElement("div",{className:(0,f.q)("flex items-center w-full px-2.5","bg-tremor-background-muted","dark:bg-dark-tremor-background-muted")},o.createElement("span",null,o.createElement(s,{className:(0,f.q)("flex-none w-4 h-4 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("input",{name:"search",type:"input",autoComplete:"off",placeholder:y,className:(0,f.q)("w-full focus:outline-none focus:ring-none bg-transparent text-tremor-default py-2","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis"),onKeyDown:e=>{"Space"===e.code&&""!==e.target.value&&e.stopPropagation()},onChange:e=>I(e.target.value),value:N})),o.createElement(a.Z.Provider,Object.assign({},{onBlur:{handleResetSearch:P}},{value:{selectedValue:t}}),_))))})});b.displayName="MultiSelect"},76628:function(e,t,n){n.d(t,{Z:function(){return u}});var r=n(69703);n(50027),n(18174),n(21871);var o=n(41213),a=n(64090),i=n(99250),l=n(65492),s=n(25163);let c=(0,l.fn)("MultiSelectItem"),u=a.forwardRef((e,t)=>{let{value:n,className:u,children:d}=e,p=(0,r._T)(e,["value","className","children"]),{selectedValue:f}=(0,a.useContext)(o.Z),m=(0,l.NZ)(n,f);return a.createElement(s.R.Option,Object.assign({className:(0,i.q)(c("root"),"flex justify-start items-center cursor-default text-tremor-default p-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",u),ref:t,key:n,value:n},p),a.createElement("input",{type:"checkbox",className:(0,i.q)(c("checkbox"),"flex-none focus:ring-none focus:outline-none cursor-pointer mr-2.5","accent-tremor-brand","dark:accent-dark-tremor-brand"),checked:m,readOnly:!0}),a.createElement("span",{className:"whitespace-nowrap truncate"},null!=d?d:n))});u.displayName="MultiSelectItem"},95093:function(e,t,n){n.d(t,{Z:function(){return m}});var r=n(69703),o=n(64090),a=n(54518),i=n(8903),l=n(99250),s=n(65492),c=n(91753),u=n(25163),d=n(70129),p=n(46457);let f=(0,s.fn)("Select"),m=o.forwardRef((e,t)=>{let{defaultValue:n,value:s,onValueChange:m,placeholder:g="Select...",disabled:h=!1,icon:b,enableClear:v=!0,children:y,className:E}=e,w=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","disabled","icon","enableClear","children","className"]),[S,x]=(0,p.Z)(n,s),O=(0,o.useMemo)(()=>{let e=o.Children.toArray(y).filter(o.isValidElement);return(0,c.sl)(e)},[y]);return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:S,value:S,onChange:e=>{null==m||m(e),x(e)},disabled:h,className:(0,l.q)("w-full min-w-[10rem] relative text-tremor-default",E)},w),e=>{var t;let{value:n}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,l.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-2","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",b?"p-10 -ml-0.5":"pl-3",(0,c.um)((0,c.Uh)(n),h))},b&&o.createElement("span",{className:(0,l.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(b,{className:(0,l.q)(f("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("span",{className:"w-[90%] block truncate"},n&&null!==(t=O.get(n))&&void 0!==t?t:g),o.createElement("span",{className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-3")},o.createElement(a.Z,{className:(0,l.q)(f("arrowDownIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),v&&S?o.createElement("button",{type:"button",className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),x(""),null==m||m("")}},o.createElement(i.Z,{className:(0,l.q)(f("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,l.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},y)))})});m.displayName="Select"},27166:function(e,t,n){n.d(t,{Z:function(){return s}});var r=n(69703),o=n(64090),a=n(25163),i=n(99250);let l=(0,n(65492).fn)("SelectItem"),s=o.forwardRef((e,t)=>{let{value:n,icon:s,className:c,children:u}=e,d=(0,r._T)(e,["value","icon","className","children"]);return o.createElement(a.R.Option,Object.assign({className:(0,i.q)(l("root"),"flex justify-start items-center cursor-default text-tremor-default px-2.5 py-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong ui-selected:bg-tremor-background-muted text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",c),ref:t,key:n,value:n},d),s&&o.createElement(s,{className:(0,i.q)(l("icon"),"flex-none w-5 h-5 mr-1.5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}),o.createElement("span",{className:"whitespace-nowrap truncate"},null!=u?u:n))});s.displayName="SelectItem"},12224:function(e,t,n){n.d(t,{Z:function(){return N}});var r=n(69703),o=n(64090),a=n(83891),i=n(20044),l=n(10641),s=n(92381),c=n(71454),u=n(36601),d=n(37700),p=n(84152),f=n(34797),m=n(18318),g=n(71014),h=n(67409),b=n(39790);let v=(0,o.createContext)(null),y=Object.assign((0,m.yV)(function(e,t){let n=(0,s.M)(),{id:r="headlessui-label-".concat(n),passive:a=!1,...i}=e,l=function e(){let t=(0,o.useContext)(v);if(null===t){let t=Error("You used a