diff --git a/.circleci/config.yml b/.circleci/config.yml index 1ef8c0e33b..2727cd221b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -58,6 +58,8 @@ jobs: pip install python-multipart pip install google-cloud-aiplatform pip install prometheus-client==0.20.0 + pip install "pydantic==2.7.1" + pip install "diskcache==5.6.1" - save_cache: paths: - ./venv @@ -198,6 +200,7 @@ jobs: -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ -e AWS_REGION_NAME=$AWS_REGION_NAME \ + -e AUTO_INFER_REGION=True \ -e OPENAI_API_KEY=$OPENAI_API_KEY \ -e LANGFUSE_PROJECT1_PUBLIC=$LANGFUSE_PROJECT1_PUBLIC \ -e LANGFUSE_PROJECT2_PUBLIC=$LANGFUSE_PROJECT2_PUBLIC \ diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..f0ced6bedb --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,10 @@ +# Add the commit hash of any commit you want to ignore in `git blame` here. +# One commit hash per line. +# +# The GitHub Blame UI will use this file automatically! +# +# Run this command to always ignore formatting commits in `git blame` +# git config blame.ignoreRevsFile .git-blame-ignore-revs + +# Update pydantic code to fix warnings (GH-3600) +876840e9957bc7e9f7d6a2b58c4d7c53dad16481 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 51c5789717..b7a1643686 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,6 +1,3 @@ - - - ## Title @@ -18,7 +15,6 @@ ๐Ÿ› Bug Fix ๐Ÿงน Refactoring ๐Ÿ“– Documentation -๐Ÿ’ป Development Environment ๐Ÿš„ Infrastructure โœ… Test @@ -26,22 +22,8 @@ -## Testing +## [REQUIRED] Testing - Attach a screenshot of any new tests passing locall +If UI changes, send a screenshot/GIF of working UI fixes -## Notes - - - - - -## Pre-Submission Checklist (optional but appreciated): - -- [ ] I have included relevant documentation updates (stored in /docs/my-website) - -## OS Tests (optional but appreciated): - -- [ ] Tested on Windows -- [ ] Tested on MacOS -- [ ] Tested on Linux diff --git a/cookbook/liteLLM_clarifai_Demo.ipynb b/cookbook/liteLLM_clarifai_Demo.ipynb new file mode 100644 index 0000000000..40ef2fcf93 --- /dev/null +++ b/cookbook/liteLLM_clarifai_Demo.ipynb @@ -0,0 +1,187 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# LiteLLM Clarifai \n", + "This notebook walks you through on how to use liteLLM integration of Clarifai and call LLM model from clarifai with response in openAI output format." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pre-Requisites" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#install necessary packages\n", + "!pip install litellm\n", + "!pip install clarifai" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To obtain Clarifai Personal Access Token follow the steps mentioned in the [link](https://docs.clarifai.com/clarifai-basics/authentication/personal-access-tokens/)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "## Set Clarifai Credentials\n", + "import os\n", + "os.environ[\"CLARIFAI_API_KEY\"]= \"YOUR_CLARIFAI_PAT\" # Clarifai PAT" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Mistral-large" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import litellm\n", + "\n", + "litellm.set_verbose=False" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mistral large response : ModelResponse(id='chatcmpl-6eed494d-7ae2-4870-b9c2-6a64d50a6151', choices=[Choices(finish_reason='stop', index=1, message=Message(content=\"In the grand tapestry of time, where tales unfold,\\nLies the chronicle of ages, a sight to behold.\\nA tale of empires rising, and kings of old,\\nOf civilizations lost, and stories untold.\\n\\nOnce upon a yesterday, in a time so vast,\\nHumans took their first steps, casting shadows in the past.\\nFrom the cradle of mankind, a journey they embarked,\\nThrough stone and bronze and iron, their skills they sharpened and marked.\\n\\nEgyptians built pyramids, reaching for the skies,\\nWhile Greeks sought wisdom, truth, in philosophies that lie.\\nRoman legions marched, their empire to expand,\\nAnd in the East, the Silk Road joined the world, hand in hand.\\n\\nThe Middle Ages came, with knights in shining armor,\\nFeudal lords and serfs, a time of both clamor and calm order.\\nThen Renaissance bloomed, like a flower in the sun,\\nA rebirth of art and science, a new age had begun.\\n\\nAcross the vast oceans, explorers sailed with courage bold,\\nDiscovering new lands, stories of adventure, untold.\\nIndustrial Revolution churned, progress in its wake,\\nMachines and factories, a whole new world to make.\\n\\nTwo World Wars raged, a testament to man's strife,\\nYet from the ashes rose hope, a renewed will for life.\\nInto the modern era, technology took flight,\\nConnecting every corner, bathed in digital light.\\n\\nHistory, a symphony, a melody of time,\\nA testament to human will, resilience so sublime.\\nIn every page, a lesson, in every tale, a guide,\\nFor understanding our past, shapes our future's tide.\", role='assistant'))], created=1713896412, model='https://api.clarifai.com/v2/users/mistralai/apps/completion/models/mistral-large/outputs', object='chat.completion', system_fingerprint=None, usage=Usage(prompt_tokens=13, completion_tokens=338, total_tokens=351))\n" + ] + } + ], + "source": [ + "from litellm import completion\n", + "\n", + "messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n", + "response=completion(\n", + " model=\"clarifai/mistralai.completion.mistral-large\",\n", + " messages=messages,\n", + " )\n", + "\n", + "print(f\"Mistral large response : {response}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Claude-2.1 " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Claude-2.1 response : ModelResponse(id='chatcmpl-d126c919-4db4-4aa3-ac8f-7edea41e0b93', choices=[Choices(finish_reason='stop', index=1, message=Message(content=\" Here's a poem I wrote about history:\\n\\nThe Tides of Time\\n\\nThe tides of time ebb and flow,\\nCarrying stories of long ago.\\nFigures and events come into light,\\nShaping the future with all their might.\\n\\nKingdoms rise, empires fall, \\nLeaving traces that echo down every hall.\\nRevolutions bring change with a fiery glow,\\nToppling structures from long ago.\\n\\nExplorers traverse each ocean and land,\\nSeeking treasures they don't understand.\\nWhile artists and writers try to make their mark,\\nHoping their works shine bright in the dark.\\n\\nThe cycle repeats again and again,\\nAs humanity struggles to learn from its pain.\\nThough the players may change on history's stage,\\nThe themes stay the same from age to age.\\n\\nWar and peace, life and death,\\nLove and strife with every breath.\\nThe tides of time continue their dance,\\nAs we join in, by luck or by chance.\\n\\nSo we study the past to light the way forward, \\nHeeding warnings from stories told and heard.\\nThe future unfolds from this unending flow -\\nWhere the tides of time ultimately go.\", role='assistant'))], created=1713896579, model='https://api.clarifai.com/v2/users/anthropic/apps/completion/models/claude-2_1/outputs', object='chat.completion', system_fingerprint=None, usage=Usage(prompt_tokens=12, completion_tokens=232, total_tokens=244))\n" + ] + } + ], + "source": [ + "from litellm import completion\n", + "\n", + "messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n", + "response=completion(\n", + " model=\"clarifai/anthropic.completion.claude-2_1\",\n", + " messages=messages,\n", + " )\n", + "\n", + "print(f\"Claude-2.1 response : {response}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### OpenAI GPT-4 (Streaming)\n", + "Though clarifai doesn't support streaming, still you can call stream and get the response in standard StreamResponse format of liteLLM" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ModelResponse(id='chatcmpl-40ae19af-3bf0-4eb4-99f2-33aec3ba84af', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=\"In the quiet corners of time's grand hall,\\nLies the tale of rise and fall.\\nFrom ancient ruins to modern sprawl,\\nHistory, the greatest story of them all.\\n\\nEmpires have risen, empires have decayed,\\nThrough the eons, memories have stayed.\\nIn the book of time, history is laid,\\nA tapestry of events, meticulously displayed.\\n\\nThe pyramids of Egypt, standing tall,\\nThe Roman Empire's mighty sprawl.\\nFrom Alexander's conquest, to the Berlin Wall,\\nHistory, a silent witness to it all.\\n\\nIn the shadow of the past we tread,\\nWhere once kings and prophets led.\\nTheir stories in our hearts are spread,\\nEchoes of their words, in our minds are read.\\n\\nBattles fought and victories won,\\nActs of courage under the sun.\\nTales of love, of deeds done,\\nIn history's grand book, they all run.\\n\\nHeroes born, legends made,\\nIn the annals of time, they'll never fade.\\nTheir triumphs and failures all displayed,\\nIn the eternal march of history's parade.\\n\\nThe ink of the past is forever dry,\\nBut its lessons, we cannot deny.\\nIn its stories, truths lie,\\nIn its wisdom, we rely.\\n\\nHistory, a mirror to our past,\\nA guide for the future vast.\\nThrough its lens, we're ever cast,\\nIn the drama of life, forever vast.\", role='assistant', function_call=None, tool_calls=None), logprobs=None)], created=1714744515, model='https://api.clarifai.com/v2/users/openai/apps/chat-completion/models/GPT-4/outputs', object='chat.completion.chunk', system_fingerprint=None)\n", + "ModelResponse(id='chatcmpl-40ae19af-3bf0-4eb4-99f2-33aec3ba84af', choices=[StreamingChoices(finish_reason='stop', index=0, delta=Delta(content=None, role=None, function_call=None, tool_calls=None), logprobs=None)], created=1714744515, model='https://api.clarifai.com/v2/users/openai/apps/chat-completion/models/GPT-4/outputs', object='chat.completion.chunk', system_fingerprint=None)\n" + ] + } + ], + "source": [ + "from litellm import completion\n", + "\n", + "messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n", + "response = completion(\n", + " model=\"clarifai/openai.chat-completion.GPT-4\",\n", + " messages=messages,\n", + " stream=True,\n", + " api_key = \"c75cc032415e45368be331fdd2c06db0\")\n", + "\n", + "for chunk in response:\n", + " print(chunk)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/my-website/docs/caching/redis_cache.md b/docs/my-website/docs/caching/all_caches.md similarity index 80% rename from docs/my-website/docs/caching/redis_cache.md rename to docs/my-website/docs/caching/all_caches.md index b00a118c12..eb309f9b8b 100644 --- a/docs/my-website/docs/caching/redis_cache.md +++ b/docs/my-website/docs/caching/all_caches.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Caching - In-Memory, Redis, s3, Redis Semantic Cache +# Caching - In-Memory, Redis, s3, Redis Semantic Cache, Disk [**See Code**](https://github.com/BerriAI/litellm/blob/main/litellm/caching.py) @@ -11,7 +11,7 @@ Need to use Caching on LiteLLM Proxy Server? Doc here: [Caching Proxy Server](ht ::: -## Initialize Cache - In Memory, Redis, s3 Bucket, Redis Semantic Cache +## Initialize Cache - In Memory, Redis, s3 Bucket, Redis Semantic, Disk Cache @@ -159,7 +159,7 @@ litellm.cache = Cache() # Make completion calls response1 = completion( model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Tell me a joke."}] + messages=[{"role": "user", "content": "Tell me a joke."}], caching=True ) response2 = completion( @@ -174,6 +174,43 @@ response2 = completion( + + +### Quick Start + +Install diskcache: + +```shell +pip install diskcache +``` + +Then you can use the disk cache as follows. + +```python +import litellm +from litellm import completion +from litellm.caching import Cache +litellm.cache = Cache(type="disk") + +# Make completion calls +response1 = completion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Tell me a joke."}], + caching=True +) +response2 = completion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Tell me a joke."}], + caching=True +) + +# response1 == response2, response 1 is cached + +``` + +If you run the code two times, response1 will use the cache from the first run that was stored in a cache file. + + @@ -191,13 +228,13 @@ Advanced Params ```python litellm.enable_cache( - type: Optional[Literal["local", "redis"]] = "local", + type: Optional[Literal["local", "redis", "s3", "disk"]] = "local", host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, supported_call_types: Optional[ - List[Literal["completion", "acompletion", "embedding", "aembedding"]] - ] = ["completion", "acompletion", "embedding", "aembedding"], + List[Literal["completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription"]] + ] = ["completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription"], **kwargs, ) ``` @@ -215,13 +252,13 @@ Update the Cache params ```python litellm.update_cache( - type: Optional[Literal["local", "redis"]] = "local", + type: Optional[Literal["local", "redis", "s3", "disk"]] = "local", host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, supported_call_types: Optional[ - List[Literal["completion", "acompletion", "embedding", "aembedding"]] - ] = ["completion", "acompletion", "embedding", "aembedding"], + List[Literal["completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription"]] + ] = ["completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription"], **kwargs, ) ``` @@ -276,22 +313,29 @@ cache.get_cache = get_cache ```python def __init__( self, - type: Optional[Literal["local", "redis", "s3"]] = "local", + type: Optional[Literal["local", "redis", "redis-semantic", "s3", "disk"]] = "local", supported_call_types: Optional[ - List[Literal["completion", "acompletion", "embedding", "aembedding"]] - ] = ["completion", "acompletion", "embedding", "aembedding"], # A list of litellm call types to cache for. Defaults to caching for all litellm call types. - + List[Literal["completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription"]] + ] = ["completion", "acompletion", "embedding", "aembedding", "atranscription", "transcription"], + ttl: Optional[float] = None, + default_in_memory_ttl: Optional[float] = None, + # redis cache params host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, - + namespace: Optional[str] = None, + default_in_redis_ttl: Optional[float] = None, + similarity_threshold: Optional[float] = None, + redis_semantic_cache_use_async=False, + redis_semantic_cache_embedding_model="text-embedding-ada-002", + redis_flush_size=None, # s3 Bucket, boto3 configuration s3_bucket_name: Optional[str] = None, s3_region_name: Optional[str] = None, s3_api_version: Optional[str] = None, - s3_path: Optional[str] = None, # if you wish to save to a spefic path + s3_path: Optional[str] = None, # if you wish to save to a specific path s3_use_ssl: Optional[bool] = True, s3_verify: Optional[Union[bool, str]] = None, s3_endpoint_url: Optional[str] = None, @@ -299,7 +343,11 @@ def __init__( s3_aws_secret_access_key: Optional[str] = None, s3_aws_session_token: Optional[str] = None, s3_config: Optional[Any] = None, - **kwargs, + + # disk cache params + disk_cache_dir=None, + + **kwargs ): ``` diff --git a/docs/my-website/docs/caching/local_caching.md b/docs/my-website/docs/caching/local_caching.md index d0e26e4bf9..81c4edcb82 100644 --- a/docs/my-website/docs/caching/local_caching.md +++ b/docs/my-website/docs/caching/local_caching.md @@ -40,7 +40,7 @@ cache = Cache() cache.add_cache(cache_key="test-key", result="1234") -cache.get_cache(cache_key="test-key) +cache.get_cache(cache_key="test-key") ``` ## Caching with Streaming diff --git a/docs/my-website/docs/completion/batching.md b/docs/my-website/docs/completion/batching.md index 05683b3ddb..09f59f743d 100644 --- a/docs/my-website/docs/completion/batching.md +++ b/docs/my-website/docs/completion/batching.md @@ -4,6 +4,12 @@ LiteLLM allows you to: * Send 1 completion call to many models: Return Fastest Response * Send 1 completion call to many models: Return All Responses +:::info + +Trying to do batch completion on LiteLLM Proxy ? Go here: https://docs.litellm.ai/docs/proxy/user_keys#beta-batch-completions---pass-model-as-list + +::: + ## Send multiple completion calls to 1 model In the batch_completion method, you provide a list of `messages` where each sub-list of messages is passed to `litellm.completion()`, allowing you to process multiple prompts efficiently in a single API call. diff --git a/docs/my-website/docs/completion/input.md b/docs/my-website/docs/completion/input.md index 451deaac47..ba01dd9d87 100644 --- a/docs/my-website/docs/completion/input.md +++ b/docs/my-website/docs/completion/input.md @@ -37,11 +37,12 @@ print(response) # ["max_tokens", "tools", "tool_choice", "stream"] This is a list of openai params we translate across providers. -This list is constantly being updated. +Use `litellm.get_supported_openai_params()` for an updated list of params for each model + provider | Provider | temperature | max_tokens | top_p | stream | stop | n | presence_penalty | frequency_penalty | functions | function_call | logit_bias | user | response_format | seed | tools | tool_choice | logprobs | top_logprobs | extra_headers | |---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|--| -|Anthropic| โœ… | โœ… | โœ… | โœ… | โœ… | | | | | | +|Anthropic| โœ… | โœ… | โœ… | โœ… | โœ… | | | | | | | | | | โœ… | โœ… | +|Anthropic| โœ… | โœ… | โœ… | โœ… | โœ… | | | | | | | | โœ… | โœ… | โœ… | โœ… | |OpenAI| โœ… | โœ… | โœ… | โœ… | โœ… | โœ… | โœ… | โœ… | โœ… | โœ… |โœ… | โœ… | โœ… | โœ… |โœ… | โœ… | โœ… | โœ… | โœ… | |Azure OpenAI| โœ… | โœ… | โœ… | โœ… | โœ… | โœ… | โœ… | โœ… | โœ… | โœ… |โœ… | โœ… | โœ… | โœ… |โœ… | โœ… | | | โœ… | |Replicate | โœ… | โœ… | โœ… | โœ… | โœ… | | | | | | diff --git a/docs/my-website/docs/exception_mapping.md b/docs/my-website/docs/exception_mapping.md index 2345e9f838..5e6006ebee 100644 --- a/docs/my-website/docs/exception_mapping.md +++ b/docs/my-website/docs/exception_mapping.md @@ -106,11 +106,12 @@ To see how it's implemented - [check out the code](https://github.com/BerriAI/li ## Custom mapping list -Base case - we return the original exception. +Base case - we return `litellm.APIConnectionError` exception (inherits from openai's APIConnectionError exception). | custom_llm_provider | Timeout | ContextWindowExceededError | BadRequestError | NotFoundError | ContentPolicyViolationError | AuthenticationError | APIError | RateLimitError | ServiceUnavailableError | PermissionDeniedError | UnprocessableEntityError | |----------------------------|---------|----------------------------|------------------|---------------|-----------------------------|---------------------|----------|----------------|-------------------------|-----------------------|-------------------------| | openai | โœ“ | โœ“ | โœ“ | | โœ“ | โœ“ | | | | | | +| watsonx | | | | | | | |โœ“| | | | | text-completion-openai | โœ“ | โœ“ | โœ“ | | โœ“ | โœ“ | | | | | | | custom_openai | โœ“ | โœ“ | โœ“ | | โœ“ | โœ“ | | | | | | | openai_compatible_providers| โœ“ | โœ“ | โœ“ | | โœ“ | โœ“ | | | | | | diff --git a/docs/my-website/docs/observability/lago.md b/docs/my-website/docs/observability/lago.md new file mode 100644 index 0000000000..337a2b553e --- /dev/null +++ b/docs/my-website/docs/observability/lago.md @@ -0,0 +1,173 @@ +import Image from '@theme/IdealImage'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Lago - Usage Based Billing + +[Lago](https://www.getlago.com/) offers a self-hosted and cloud, metering and usage-based billing solution. + + + +## Quick Start +Use just 1 lines of code, to instantly log your responses **across all providers** with Lago + +Get your Lago [API Key](https://docs.getlago.com/guide/self-hosted/docker#find-your-api-key) + +```python +litellm.callbacks = ["lago"] # logs cost + usage of successful calls to lago +``` + + + + + +```python +# pip install lago +import litellm +import os + +os.environ["LAGO_API_BASE"] = "" # http://0.0.0.0:3000 +os.environ["LAGO_API_KEY"] = "" +os.environ["LAGO_API_EVENT_CODE"] = "" # The billable metric's code - https://docs.getlago.com/guide/events/ingesting-usage#define-a-billable-metric + +# LLM API Keys +os.environ['OPENAI_API_KEY']="" + +# set lago as a callback, litellm will send the data to lago +litellm.success_callback = ["lago"] + +# openai call +response = litellm.completion( + model="gpt-3.5-turbo", + messages=[ + {"role": "user", "content": "Hi ๐Ÿ‘‹ - i'm openai"} + ], + user="your_customer_id" # ๐Ÿ‘ˆ SET YOUR CUSTOMER ID HERE +) +``` + + + + +1. Add to Config.yaml +```yaml +model_list: +- litellm_params: + api_base: https://openai-function-calling-workers.tasslexyz.workers.dev/ + api_key: my-fake-key + model: openai/my-fake-model + model_name: fake-openai-endpoint + +litellm_settings: + callbacks: ["lago"] # ๐Ÿ‘ˆ KEY CHANGE +``` + +2. Start Proxy + +``` +litellm --config /path/to/config.yaml +``` + +3. Test it! + + + + +```bash +curl --location 'http://0.0.0.0:4000/chat/completions' \ +--header 'Content-Type: application/json' \ +--data ' { + "model": "fake-openai-endpoint", + "messages": [ + { + "role": "user", + "content": "what llm are you" + } + ], + "user": "your-customer-id" # ๐Ÿ‘ˆ SET YOUR CUSTOMER ID + } +' +``` + + + +```python +import openai +client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:4000" +) + +# request sent to model set on litellm proxy, `litellm --model` +response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } +], user="my_customer_id") # ๐Ÿ‘ˆ whatever your customer id is + +print(response) +``` + + + +```python +from langchain.chat_models import ChatOpenAI +from langchain.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain.schema import HumanMessage, SystemMessage +import os + +os.environ["OPENAI_API_KEY"] = "anything" + +chat = ChatOpenAI( + openai_api_base="http://0.0.0.0:4000", + model = "gpt-3.5-turbo", + temperature=0.1, + extra_body={ + "user": "my_customer_id" # ๐Ÿ‘ˆ whatever your customer id is + } +) + +messages = [ + SystemMessage( + content="You are a helpful assistant that im using to make a test request to." + ), + HumanMessage( + content="test from litellm. tell me why it's amazing in 1 sentence" + ), +] +response = chat(messages) + +print(response) +``` + + + + + + + + +## Advanced - Lagos Logging object + +This is what LiteLLM will log to Lagos + +``` +{ + "event": { + "transaction_id": "", + "external_customer_id": , # passed via `user` param in /chat/completion call - https://platform.openai.com/docs/api-reference/chat/create + "code": os.getenv("LAGO_API_EVENT_CODE"), + "properties": { + "input_tokens": , + "output_tokens": , + "model": , + "response_cost": , # ๐Ÿ‘ˆ LITELLM CALCULATED RESPONSE COST - https://github.com/BerriAI/litellm/blob/d43f75150a65f91f60dc2c0c9462ce3ffc713c1f/litellm/utils.py#L1473 + } + } +} +``` \ No newline at end of file diff --git a/docs/my-website/docs/observability/langfuse_integration.md b/docs/my-website/docs/observability/langfuse_integration.md index ebf20b6335..6dd5377ea7 100644 --- a/docs/my-website/docs/observability/langfuse_integration.md +++ b/docs/my-website/docs/observability/langfuse_integration.md @@ -136,6 +136,7 @@ response = completion( "existing_trace_id": "trace-id22", "trace_metadata": {"key": "updated_trace_value"}, # The new value to use for the langfuse Trace Metadata "update_trace_keys": ["input", "output", "trace_metadata"], # Updates the trace input & output to be this generations input & output also updates the Trace Metadata to match the passed in value + "debug_langfuse": True, # Will log the exact metadata sent to litellm for the trace/generation as `metadata_passed_to_litellm` }, ) @@ -147,7 +148,7 @@ print(response) #### Trace Specific Parameters -* `trace_id` - Identifier for the trace, must use `existing_trace_id` instead or in conjunction with `trace_id` if this is an existing trace, auto-generated by default +* `trace_id` - Identifier for the trace, must use `existing_trace_id` instead of `trace_id` if this is an existing trace, auto-generated by default * `trace_name` - Name of the trace, auto-generated by default * `session_id` - Session identifier for the trace, defaults to `None` * `trace_version` - Version for the trace, defaults to value for `version` @@ -212,8 +213,20 @@ chat(messages) ## Redacting Messages, Response Content from Langfuse Logging +### Redact Messages and Responses from all Langfuse Logging + Set `litellm.turn_off_message_logging=True` This will prevent the messages and responses from being logged to langfuse, but request metadata will still be logged. +### Redact Messages and Responses from specific Langfuse Logging + +In the metadata typically passed for text completion or embedding calls you can set specific keys to mask the messages and responses for this call. + +Setting `mask_input` to `True` will mask the input from being logged for this call + +Setting `mask_output` to `True` will make the output from being logged for this call. + +Be aware that if you are continuing an existing trace, and you set `update_trace_keys` to include either `input` or `output` and you set the corresponding `mask_input` or `mask_output`, then that trace will have its existing input and/or output replaced with a redacted message. + ## Troubleshooting & Errors ### Data not getting logged to Langfuse ? - Ensure you're on the latest version of langfuse `pip install langfuse -U`. The latest version allows litellm to log JSON input/outputs to langfuse diff --git a/docs/my-website/docs/observability/openmeter.md b/docs/my-website/docs/observability/openmeter.md index 64d9c39d21..2f53568757 100644 --- a/docs/my-website/docs/observability/openmeter.md +++ b/docs/my-website/docs/observability/openmeter.md @@ -20,7 +20,7 @@ Use just 2 lines of code, to instantly log your responses **across all providers Get your OpenMeter API Key from https://openmeter.cloud/meters ```python -litellm.success_callback = ["openmeter"] # logs cost + usage of successful calls to openmeter +litellm.callbacks = ["openmeter"] # logs cost + usage of successful calls to openmeter ``` @@ -28,7 +28,7 @@ litellm.success_callback = ["openmeter"] # logs cost + usage of successful calls ```python -# pip install langfuse +# pip install openmeter import litellm import os @@ -39,8 +39,8 @@ os.environ["OPENMETER_API_KEY"] = "" # LLM API Keys os.environ['OPENAI_API_KEY']="" -# set langfuse as a callback, litellm will send the data to langfuse -litellm.success_callback = ["openmeter"] +# set openmeter as a callback, litellm will send the data to openmeter +litellm.callbacks = ["openmeter"] # openai call response = litellm.completion( @@ -64,7 +64,7 @@ model_list: model_name: fake-openai-endpoint litellm_settings: - success_callback: ["openmeter"] # ๐Ÿ‘ˆ KEY CHANGE + callbacks: ["openmeter"] # ๐Ÿ‘ˆ KEY CHANGE ``` 2. Start Proxy diff --git a/docs/my-website/docs/providers/clarifai.md b/docs/my-website/docs/providers/clarifai.md new file mode 100644 index 0000000000..acc8c54bef --- /dev/null +++ b/docs/my-website/docs/providers/clarifai.md @@ -0,0 +1,177 @@ + +# Clarifai +Anthropic, OpenAI, Mistral, Llama and Gemini LLMs are Supported on Clarifai. + +## Pre-Requisites + +`pip install clarifai` + +`pip install litellm` + +## Required Environment Variables +To obtain your Clarifai Personal access token follow this [link](https://docs.clarifai.com/clarifai-basics/authentication/personal-access-tokens/). Optionally the PAT can also be passed in `completion` function. + +```python +os.environ["CALRIFAI_API_KEY"] = "YOUR_CLARIFAI_PAT" # CLARIFAI_PAT +``` + +## Usage + +```python +import os +from litellm import completion + +os.environ["CLARIFAI_API_KEY"] = "" + +response = completion( + model="clarifai/mistralai.completion.mistral-large", + messages=[{ "content": "Tell me a joke about physics?","role": "user"}] +) +``` + +**Output** +```json +{ + "id": "chatcmpl-572701ee-9ab2-411c-ac75-46c1ba18e781", + "choices": [ + { + "finish_reason": "stop", + "index": 1, + "message": { + "content": "Sure, here's a physics joke for you:\n\nWhy can't you trust an atom?\n\nBecause they make up everything!", + "role": "assistant" + } + } + ], + "created": 1714410197, + "model": "https://api.clarifai.com/v2/users/mistralai/apps/completion/models/mistral-large/outputs", + "object": "chat.completion", + "system_fingerprint": null, + "usage": { + "prompt_tokens": 14, + "completion_tokens": 24, + "total_tokens": 38 + } + } +``` + +## Clarifai models +liteLLM supports non-streaming requests to all models on [Clarifai community](https://clarifai.com/explore/models?filterData=%5B%7B%22field%22%3A%22use_cases%22%2C%22value%22%3A%5B%22llm%22%5D%7D%5D&page=1&perPage=24) + +Example Usage - Note: liteLLM supports all models deployed on Clarifai + +## Llama LLMs +| Model Name | Function Call | +---------------------------|---------------------------------| +| clarifai/meta.Llama-2.llama2-7b-chat | `completion('clarifai/meta.Llama-2.llama2-7b-chat', messages)` +| clarifai/meta.Llama-2.llama2-13b-chat | `completion('clarifai/meta.Llama-2.llama2-13b-chat', messages)` +| clarifai/meta.Llama-2.llama2-70b-chat | `completion('clarifai/meta.Llama-2.llama2-70b-chat', messages)` | +| clarifai/meta.Llama-2.codeLlama-70b-Python | `completion('clarifai/meta.Llama-2.codeLlama-70b-Python', messages)`| +| clarifai/meta.Llama-2.codeLlama-70b-Instruct | `completion('clarifai/meta.Llama-2.codeLlama-70b-Instruct', messages)` | + +## Mistal LLMs +| Model Name | Function Call | +|---------------------------------------------|------------------------------------------------------------------------| +| clarifai/mistralai.completion.mixtral-8x22B | `completion('clarifai/mistralai.completion.mixtral-8x22B', messages)` | +| clarifai/mistralai.completion.mistral-large | `completion('clarifai/mistralai.completion.mistral-large', messages)` | +| clarifai/mistralai.completion.mistral-medium | `completion('clarifai/mistralai.completion.mistral-medium', messages)` | +| clarifai/mistralai.completion.mistral-small | `completion('clarifai/mistralai.completion.mistral-small', messages)` | +| clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1 | `completion('clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1', messages)` +| clarifai/mistralai.completion.mistral-7B-OpenOrca | `completion('clarifai/mistralai.completion.mistral-7B-OpenOrca', messages)` | +| clarifai/mistralai.completion.openHermes-2-mistral-7B | `completion('clarifai/mistralai.completion.openHermes-2-mistral-7B', messages)` | + + +## Jurassic LLMs +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/ai21.complete.Jurassic2-Grande | `completion('clarifai/ai21.complete.Jurassic2-Grande', messages)` | +| clarifai/ai21.complete.Jurassic2-Grande-Instruct | `completion('clarifai/ai21.complete.Jurassic2-Grande-Instruct', messages)` | +| clarifai/ai21.complete.Jurassic2-Jumbo-Instruct | `completion('clarifai/ai21.complete.Jurassic2-Jumbo-Instruct', messages)` | +| clarifai/ai21.complete.Jurassic2-Jumbo | `completion('clarifai/ai21.complete.Jurassic2-Jumbo', messages)` | +| clarifai/ai21.complete.Jurassic2-Large | `completion('clarifai/ai21.complete.Jurassic2-Large', messages)` | + +## Wizard LLMs + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/wizardlm.generate.wizardCoder-Python-34B | `completion('clarifai/wizardlm.generate.wizardCoder-Python-34B', messages)` | +| clarifai/wizardlm.generate.wizardLM-70B | `completion('clarifai/wizardlm.generate.wizardLM-70B', messages)` | +| clarifai/wizardlm.generate.wizardLM-13B | `completion('clarifai/wizardlm.generate.wizardLM-13B', messages)` | +| clarifai/wizardlm.generate.wizardCoder-15B | `completion('clarifai/wizardlm.generate.wizardCoder-15B', messages)` | + +## Anthropic models + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/anthropic.completion.claude-v1 | `completion('clarifai/anthropic.completion.claude-v1', messages)` | +| clarifai/anthropic.completion.claude-instant-1_2 | `completion('clarifai/anthropic.completion.claude-instant-1_2', messages)` | +| clarifai/anthropic.completion.claude-instant | `completion('clarifai/anthropic.completion.claude-instant', messages)` | +| clarifai/anthropic.completion.claude-v2 | `completion('clarifai/anthropic.completion.claude-v2', messages)` | +| clarifai/anthropic.completion.claude-2_1 | `completion('clarifai/anthropic.completion.claude-2_1', messages)` | +| clarifai/anthropic.completion.claude-3-opus | `completion('clarifai/anthropic.completion.claude-3-opus', messages)` | +| clarifai/anthropic.completion.claude-3-sonnet | `completion('clarifai/anthropic.completion.claude-3-sonnet', messages)` | + +## OpenAI GPT LLMs + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/openai.chat-completion.GPT-4 | `completion('clarifai/openai.chat-completion.GPT-4', messages)` | +| clarifai/openai.chat-completion.GPT-3_5-turbo | `completion('clarifai/openai.chat-completion.GPT-3_5-turbo', messages)` | +| clarifai/openai.chat-completion.gpt-4-turbo | `completion('clarifai/openai.chat-completion.gpt-4-turbo', messages)` | +| clarifai/openai.completion.gpt-3_5-turbo-instruct | `completion('clarifai/openai.completion.gpt-3_5-turbo-instruct', messages)` | + +## GCP LLMs + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/gcp.generate.gemini-1_5-pro | `completion('clarifai/gcp.generate.gemini-1_5-pro', messages)` | +| clarifai/gcp.generate.imagen-2 | `completion('clarifai/gcp.generate.imagen-2', messages)` | +| clarifai/gcp.generate.code-gecko | `completion('clarifai/gcp.generate.code-gecko', messages)` | +| clarifai/gcp.generate.code-bison | `completion('clarifai/gcp.generate.code-bison', messages)` | +| clarifai/gcp.generate.text-bison | `completion('clarifai/gcp.generate.text-bison', messages)` | +| clarifai/gcp.generate.gemma-2b-it | `completion('clarifai/gcp.generate.gemma-2b-it', messages)` | +| clarifai/gcp.generate.gemma-7b-it | `completion('clarifai/gcp.generate.gemma-7b-it', messages)` | +| clarifai/gcp.generate.gemini-pro | `completion('clarifai/gcp.generate.gemini-pro', messages)` | +| clarifai/gcp.generate.gemma-1_1-7b-it | `completion('clarifai/gcp.generate.gemma-1_1-7b-it', messages)` | + +## Cohere LLMs +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/cohere.generate.cohere-generate-command | `completion('clarifai/cohere.generate.cohere-generate-command', messages)` | + clarifai/cohere.generate.command-r-plus' | `completion('clarifai/clarifai/cohere.generate.command-r-plus', messages)`| + +## Databricks LLMs + +| Model Name | Function Call | +|---------------------------------------------------|---------------------------------------------------------------------| +| clarifai/databricks.drbx.dbrx-instruct | `completion('clarifai/databricks.drbx.dbrx-instruct', messages)` | +| clarifai/databricks.Dolly-v2.dolly-v2-12b | `completion('clarifai/databricks.Dolly-v2.dolly-v2-12b', messages)`| + +## Microsoft LLMs + +| Model Name | Function Call | +|---------------------------------------------------|---------------------------------------------------------------------| +| clarifai/microsoft.text-generation.phi-2 | `completion('clarifai/microsoft.text-generation.phi-2', messages)` | +| clarifai/microsoft.text-generation.phi-1_5 | `completion('clarifai/microsoft.text-generation.phi-1_5', messages)`| + +## Salesforce models + +| Model Name | Function Call | +|-----------------------------------------------------------|-------------------------------------------------------------------------------| +| clarifai/salesforce.blip.general-english-image-caption-blip-2 | `completion('clarifai/salesforce.blip.general-english-image-caption-blip-2', messages)` | +| clarifai/salesforce.xgen.xgen-7b-8k-instruct | `completion('clarifai/salesforce.xgen.xgen-7b-8k-instruct', messages)` | + + +## Other Top performing LLMs + +| Model Name | Function Call | +|---------------------------------------------------|---------------------------------------------------------------------| +| clarifai/deci.decilm.deciLM-7B-instruct | `completion('clarifai/deci.decilm.deciLM-7B-instruct', messages)` | +| clarifai/upstage.solar.solar-10_7b-instruct | `completion('clarifai/upstage.solar.solar-10_7b-instruct', messages)` | +| clarifai/openchat.openchat.openchat-3_5-1210 | `completion('clarifai/openchat.openchat.openchat-3_5-1210', messages)` | +| clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B | `completion('clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B', messages)` | +| clarifai/fblgit.una-cybertron.una-cybertron-7b-v2 | `completion('clarifai/fblgit.una-cybertron.una-cybertron-7b-v2', messages)` | +| clarifai/tiiuae.falcon.falcon-40b-instruct | `completion('clarifai/tiiuae.falcon.falcon-40b-instruct', messages)` | +| clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat | `completion('clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat', messages)` | +| clarifai/bigcode.code.StarCoder | `completion('clarifai/bigcode.code.StarCoder', messages)` | +| clarifai/mosaicml.mpt.mpt-7b-instruct | `completion('clarifai/mosaicml.mpt.mpt-7b-instruct', messages)` | diff --git a/docs/my-website/docs/providers/huggingface.md b/docs/my-website/docs/providers/huggingface.md index f8ebadfcfa..35befd3e20 100644 --- a/docs/my-website/docs/providers/huggingface.md +++ b/docs/my-website/docs/providers/huggingface.md @@ -21,6 +21,11 @@ This is done by adding the "huggingface/" prefix to `model`, example `completion +By default, LiteLLM will assume a huggingface call follows the TGI format. + + + + ```python import os from litellm import completion @@ -40,9 +45,58 @@ response = completion( print(response) ``` + + + +1. Add models to your config.yaml + + ```yaml + model_list: + - model_name: wizard-coder + litellm_params: + model: huggingface/WizardLM/WizardCoder-Python-34B-V1.0 + api_key: os.environ/HUGGINGFACE_API_KEY + api_base: "https://my-endpoint.endpoints.huggingface.cloud" + ``` + + + +2. Start the proxy + + ```bash + $ litellm --config /path/to/config.yaml --debug + ``` + +3. Test it! + + ```shell + curl --location 'http://0.0.0.0:4000/chat/completions' \ + --header 'Authorization: Bearer sk-1234' \ + --header 'Content-Type: application/json' \ + --data '{ + "model": "wizard-coder", + "messages": [ + { + "role": "user", + "content": "I like you!" + } + ], + }' + ``` + + + + +Append `conversational` to the model name + +e.g. `huggingface/conversational/` + + + + ```python import os from litellm import completion @@ -54,7 +108,7 @@ messages = [{ "content": "There's a llama in my garden ๐Ÿ˜ฑ What should I do?"," # e.g. Call 'facebook/blenderbot-400M-distill' hosted on HF Inference endpoints response = completion( - model="huggingface/facebook/blenderbot-400M-distill", + model="huggingface/conversational/facebook/blenderbot-400M-distill", messages=messages, api_base="https://my-endpoint.huggingface.cloud" ) @@ -62,7 +116,123 @@ response = completion( print(response) ``` - + + +1. Add models to your config.yaml + + ```yaml + model_list: + - model_name: blenderbot + litellm_params: + model: huggingface/conversational/facebook/blenderbot-400M-distill + api_key: os.environ/HUGGINGFACE_API_KEY + api_base: "https://my-endpoint.endpoints.huggingface.cloud" + ``` + + + +2. Start the proxy + + ```bash + $ litellm --config /path/to/config.yaml --debug + ``` + +3. Test it! + + ```shell + curl --location 'http://0.0.0.0:4000/chat/completions' \ + --header 'Authorization: Bearer sk-1234' \ + --header 'Content-Type: application/json' \ + --data '{ + "model": "blenderbot", + "messages": [ + { + "role": "user", + "content": "I like you!" + } + ], + }' + ``` + + + + + + + +Append `text-classification` to the model name + +e.g. `huggingface/text-classification/` + + + + +```python +import os +from litellm import completion + +# [OPTIONAL] set env var +os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key" + +messages = [{ "content": "I like you, I love you!","role": "user"}] + +# e.g. Call 'shahrukhx01/question-vs-statement-classifier' hosted on HF Inference endpoints +response = completion( + model="huggingface/text-classification/shahrukhx01/question-vs-statement-classifier", + messages=messages, + api_base="https://my-endpoint.endpoints.huggingface.cloud", +) + +print(response) +``` + + + +1. Add models to your config.yaml + + ```yaml + model_list: + - model_name: bert-classifier + litellm_params: + model: huggingface/text-classification/shahrukhx01/question-vs-statement-classifier + api_key: os.environ/HUGGINGFACE_API_KEY + api_base: "https://my-endpoint.endpoints.huggingface.cloud" + ``` + + + +2. Start the proxy + + ```bash + $ litellm --config /path/to/config.yaml --debug + ``` + +3. Test it! + + ```shell + curl --location 'http://0.0.0.0:4000/chat/completions' \ + --header 'Authorization: Bearer sk-1234' \ + --header 'Content-Type: application/json' \ + --data '{ + "model": "bert-classifier", + "messages": [ + { + "role": "user", + "content": "I like you!" + } + ], + }' + ``` + + + + + + + +Append `text-generation` to the model name + +e.g. `huggingface/text-generation/` ```python import os @@ -75,7 +245,7 @@ messages = [{ "content": "There's a llama in my garden ๐Ÿ˜ฑ What should I do?"," # e.g. Call 'roneneldan/TinyStories-3M' hosted on HF Inference endpoints response = completion( - model="huggingface/roneneldan/TinyStories-3M", + model="huggingface/text-generation/roneneldan/TinyStories-3M", messages=messages, api_base="https://p69xlsj6rpno5drq.us-east-1.aws.endpoints.huggingface.cloud", ) diff --git a/docs/my-website/docs/providers/ollama.md b/docs/my-website/docs/providers/ollama.md index 1c913c08c8..c1c8fc57c8 100644 --- a/docs/my-website/docs/providers/ollama.md +++ b/docs/my-website/docs/providers/ollama.md @@ -101,13 +101,19 @@ Ollama supported models: https://github.com/ollama/ollama | Model Name | Function Call | |----------------------|----------------------------------------------------------------------------------- -| Mistral | `completion(model='ollama/mistral', messages, api_base="http://localhost:11434", stream=True)` | +| Mistral | `completion(model='ollama/mistral', messages, api_base="http://localhost:11434", stream=True)` | +| Mistral-7B-Instruct-v0.1 | `completion(model='ollama/mistral-7B-Instruct-v0.1', messages, api_base="http://localhost:11434", stream=False)` | +| Mistral-7B-Instruct-v0.2 | `completion(model='ollama/mistral-7B-Instruct-v0.2', messages, api_base="http://localhost:11434", stream=False)` | +| Mixtral-8x7B-Instruct-v0.1 | `completion(model='ollama/mistral-8x7B-Instruct-v0.1', messages, api_base="http://localhost:11434", stream=False)` | +| Mixtral-8x22B-Instruct-v0.1 | `completion(model='ollama/mixtral-8x22B-Instruct-v0.1', messages, api_base="http://localhost:11434", stream=False)` | | Llama2 7B | `completion(model='ollama/llama2', messages, api_base="http://localhost:11434", stream=True)` | | Llama2 13B | `completion(model='ollama/llama2:13b', messages, api_base="http://localhost:11434", stream=True)` | | Llama2 70B | `completion(model='ollama/llama2:70b', messages, api_base="http://localhost:11434", stream=True)` | | Llama2 Uncensored | `completion(model='ollama/llama2-uncensored', messages, api_base="http://localhost:11434", stream=True)` | | Code Llama | `completion(model='ollama/codellama', messages, api_base="http://localhost:11434", stream=True)` | -| Llama2 Uncensored | `completion(model='ollama/llama2-uncensored', messages, api_base="http://localhost:11434", stream=True)` | +| Llama2 Uncensored | `completion(model='ollama/llama2-uncensored', messages, api_base="http://localhost:11434", stream=True)` | +|Meta LLaMa3 8B | `completion(model='ollama/llama3', messages, api_base="http://localhost:11434", stream=False)` | +| Meta LLaMa3 70B | `completion(model='ollama/llama3:70b', messages, api_base="http://localhost:11434", stream=False)` | | Orca Mini | `completion(model='ollama/orca-mini', messages, api_base="http://localhost:11434", stream=True)` | | Vicuna | `completion(model='ollama/vicuna', messages, api_base="http://localhost:11434", stream=True)` | | Nous-Hermes | `completion(model='ollama/nous-hermes', messages, api_base="http://localhost:11434", stream=True)` | diff --git a/docs/my-website/docs/providers/openai.md b/docs/my-website/docs/providers/openai.md index a8fe541fc8..2f261ce178 100644 --- a/docs/my-website/docs/providers/openai.md +++ b/docs/my-website/docs/providers/openai.md @@ -20,7 +20,7 @@ os.environ["OPENAI_API_KEY"] = "your-api-key" # openai call response = completion( - model = "gpt-3.5-turbo", + model = "gpt-4o", messages=[{ "content": "Hello, how are you?","role": "user"}] ) ``` @@ -163,6 +163,8 @@ os.environ["OPENAI_API_BASE"] = "openaiai-api-base" # OPTIONAL | Model Name | Function Call | |-----------------------|-----------------------------------------------------------------| +| gpt-4o | `response = completion(model="gpt-4o", messages=messages)` | +| gpt-4o-2024-05-13 | `response = completion(model="gpt-4o-2024-05-13", messages=messages)` | | gpt-4-turbo | `response = completion(model="gpt-4-turbo", messages=messages)` | | gpt-4-turbo-preview | `response = completion(model="gpt-4-0125-preview", messages=messages)` | | gpt-4-0125-preview | `response = completion(model="gpt-4-0125-preview", messages=messages)` | @@ -186,6 +188,7 @@ These also support the `OPENAI_API_BASE` environment variable, which can be used ## OpenAI Vision Models | Model Name | Function Call | |-----------------------|-----------------------------------------------------------------| +| gpt-4o | `response = completion(model="gpt-4o", messages=messages)` | | gpt-4-turbo | `response = completion(model="gpt-4-turbo", messages=messages)` | | gpt-4-vision-preview | `response = completion(model="gpt-4-vision-preview", messages=messages)` | diff --git a/docs/my-website/docs/providers/triton-inference-server.md b/docs/my-website/docs/providers/triton-inference-server.md new file mode 100644 index 0000000000..aacc46a399 --- /dev/null +++ b/docs/my-website/docs/providers/triton-inference-server.md @@ -0,0 +1,95 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Triton Inference Server + +LiteLLM supports Embedding Models on Triton Inference Servers + + +## Usage + + + + + +### Example Call + +Use the `triton/` prefix to route to triton server +```python +from litellm import embedding +import os + +response = await litellm.aembedding( + model="triton/", + api_base="https://your-triton-api-base/triton/embeddings", # /embeddings endpoint you want litellm to call on your server + input=["good morning from litellm"], +) +``` + + + + +1. Add models to your config.yaml + + ```yaml + model_list: + - model_name: my-triton-model + litellm_params: + model: triton/" + api_base: https://your-triton-api-base/triton/embeddings + ``` + + +2. Start the proxy + + ```bash + $ litellm --config /path/to/config.yaml --detailed_debug + ``` + +3. Send Request to LiteLLM Proxy Server + + + + + + ```python + import openai + from openai import OpenAI + + # set base_url to your proxy server + # set api_key to send to proxy server + client = OpenAI(api_key="", base_url="http://0.0.0.0:4000") + + response = client.embeddings.create( + input=["hello from litellm"], + model="my-triton-model" + ) + + print(response) + + ``` + + + + + + `--header` is optional, only required if you're using litellm proxy with Virtual Keys + + ```shell + curl --location 'http://0.0.0.0:4000/embeddings' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer sk-1234' \ + --data ' { + "model": "my-triton-model", + "input": ["write a litellm poem"] + }' + + ``` + + + + + + + + diff --git a/docs/my-website/docs/providers/vertex.md b/docs/my-website/docs/providers/vertex.md index 8d59088af5..b67eb350b4 100644 --- a/docs/my-website/docs/providers/vertex.md +++ b/docs/my-website/docs/providers/vertex.md @@ -364,6 +364,8 @@ response = completion( | Model Name | Function Call | |------------------|--------------------------------------| | gemini-1.5-pro | `completion('gemini-1.5-pro', messages)`, `completion('vertex_ai/gemini-pro', messages)` | +| gemini-1.5-flash-preview-0514 | `completion('gemini-1.5-flash-preview-0514', messages)`, `completion('vertex_ai/gemini-pro', messages)` | +| gemini-1.5-pro-preview-0514 | `completion('gemini-1.5-pro-preview-0514', messages)`, `completion('vertex_ai/gemini-1.5-pro-preview-0514', messages)` | diff --git a/docs/my-website/docs/proxy/alerting.md b/docs/my-website/docs/proxy/alerting.md index 4275e0bf06..230a3a22e9 100644 --- a/docs/my-website/docs/proxy/alerting.md +++ b/docs/my-website/docs/proxy/alerting.md @@ -1,13 +1,18 @@ # ๐Ÿšจ Alerting Get alerts for: + - Hanging LLM api calls -- Failed LLM api calls - Slow LLM api calls -- Budget Tracking per key/user: - - When a User/Key crosses their Budget - - When a User/Key is 15% away from crossing their Budget +- Failed LLM api calls +- Budget Tracking per key/user +- Spend Reports - Weekly & Monthly spend per Team, Tag - Failed db read/writes +- Daily Reports: + - **LLM** Top 5 slowest deployments + - **LLM** Top 5 deployments with most failed requests + - **Spend** Weekly & Monthly spend per Team, Tag + ## Quick Start @@ -17,10 +22,12 @@ Set up a slack alert channel to receive alerts from proxy. Get a slack webhook url from https://api.slack.com/messaging/webhooks +You can also use Discord Webhooks, see [here](#using-discord-webhooks) ### Step 2: Update config.yaml -Let's save a bad key to our proxy +- Set `SLACK_WEBHOOK_URL` in your proxy env to enable Slack alerts. +- Just for testing purposes, let's save a bad key to our proxy. ```yaml model_list: @@ -33,16 +40,59 @@ general_settings: alerting: ["slack"] alerting_threshold: 300 # sends alerts if requests hang for 5min+ and responses take 5min+ +environment_variables: + SLACK_WEBHOOK_URL: "https://hooks.slack.com/services/<>/<>/<>" + SLACK_DAILY_REPORT_FREQUENCY: "86400" # 24 hours; Optional: defaults to 12 hours ``` -Set `SLACK_WEBHOOK_URL` in your proxy env - -```shell -SLACK_WEBHOOK_URL: "https://hooks.slack.com/services/<>/<>/<>" -``` ### Step 3: Start proxy ```bash $ litellm --config /path/to/config.yaml -``` \ No newline at end of file +``` + +## Testing Alerting is Setup Correctly + +Make a GET request to `/health/services`, expect to see a test slack alert in your provided webhook slack channel + +```shell +curl -X GET 'http://localhost:4000/health/services?service=slack' \ + -H 'Authorization: Bearer sk-1234' +``` + + +## Extras + +### Using Discord Webhooks + +Discord provides a slack compatible webhook url that you can use for alerting + +##### Quick Start + +1. Get a webhook url for your discord channel + +2. Append `/slack` to your discord webhook - it should look like + +``` +"https://discord.com/api/webhooks/1240030362193760286/cTLWt5ATn1gKmcy_982rl5xmYHsrM1IWJdmCL1AyOmU9JdQXazrp8L1_PYgUtgxj8x4f/slack" +``` + +3. Add it to your litellm config + +```yaml +model_list: + model_name: "azure-model" + litellm_params: + model: "azure/gpt-35-turbo" + api_key: "my-bad-key" # ๐Ÿ‘ˆ bad key + +general_settings: + alerting: ["slack"] + alerting_threshold: 300 # sends alerts if requests hang for 5min+ and responses take 5min+ + +environment_variables: + SLACK_WEBHOOK_URL: "https://discord.com/api/webhooks/1240030362193760286/cTLWt5ATn1gKmcy_982rl5xmYHsrM1IWJdmCL1AyOmU9JdQXazrp8L1_PYgUtgxj8x4f/slack" +``` + +That's it ! You're ready to go ! diff --git a/docs/my-website/docs/proxy/billing.md b/docs/my-website/docs/proxy/billing.md new file mode 100644 index 0000000000..97e944f258 --- /dev/null +++ b/docs/my-website/docs/proxy/billing.md @@ -0,0 +1,229 @@ +import Image from '@theme/IdealImage'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# ๐Ÿ’ฐ Billing + +Bill users for their usage. + +Requirements: +- Setup a billing plan on Lago, for usage-based billing. We recommend following their Stripe tutorial - https://docs.getlago.com/templates/per-transaction/stripe#step-1-create-billable-metrics-for-transaction + +Steps: +- Connect the proxy to Lago +- Set the id you want to bill for (customers, internal users, teams) +- Start! + +## 1. Connect proxy to Lago + +Add your Lago keys to the environment + +```bash +export LAGO_API_BASE="http://localhost:3000" # self-host - https://docs.getlago.com/guide/self-hosted/docker#run-the-app +export LAGO_API_KEY="3e29d607-de54-49aa-a019-ecf585729070" # Get key - https://docs.getlago.com/guide/self-hosted/docker#find-your-api-key +export LAGO_API_EVENT_CODE="openai_tokens" # name of lago billing code +``` + +Set 'lago' as a callback on your proxy config.yaml + +```yaml +... +litellm_settings: + callbacks: ["lago"] +``` + +## 2. Set the id you want to bill for + +For: +- Customers (id passed via 'user' param in /chat/completion call) = 'end_user_id' +- Internal Users (id set when [creating keys](https://docs.litellm.ai/docs/proxy/virtual_keys#advanced---spend-tracking)) = 'user_id' +- Teams (id set when [creating keys](https://docs.litellm.ai/docs/proxy/virtual_keys#advanced---spend-tracking)) = 'team_id' + +```yaml +export LAGO_API_CHARGE_BY="end_user_id" # ๐Ÿ‘ˆ Charge 'Customers'. Default is 'end_user_id'. +``` + +## 3. Start billing! + + + + + +### **Curl** +```shell +curl --location 'http://0.0.0.0:4000/chat/completions' \ +--header 'Content-Type: application/json' \ +--data ' { + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "what llm are you" + } + ], + "user": "my_customer_id" # ๐Ÿ‘ˆ whatever your customer id is + } +' +``` + +### **OpenAI Python SDK** + +```python +import openai +client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:4000" +) + +# request sent to model set on litellm proxy, `litellm --model` +response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } +], user="my_customer_id") # ๐Ÿ‘ˆ whatever your customer id is + +print(response) +``` + +### **Langchain** + +```python +from langchain.chat_models import ChatOpenAI +from langchain.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain.schema import HumanMessage, SystemMessage +import os + +os.environ["OPENAI_API_KEY"] = "anything" + +chat = ChatOpenAI( + openai_api_base="http://0.0.0.0:4000", + model = "gpt-3.5-turbo", + temperature=0.1, + extra_body={ + "user": "my_customer_id" # ๐Ÿ‘ˆ whatever your customer id is + } +) + +messages = [ + SystemMessage( + content="You are a helpful assistant that im using to make a test request to." + ), + HumanMessage( + content="test from litellm. tell me why it's amazing in 1 sentence" + ), +] +response = chat(messages) + +print(response) +``` + + + +1. Create a key for that user + +```bash +curl 'http://0.0.0.0:4000/key/generate' \ +--header 'Authorization: Bearer ' \ +--header 'Content-Type: application/json' \ +--data-raw '{"user_id": "my-unique-id"}' +``` + +Response Object: + +```bash +{ + "key": "sk-tXL0wt5-lOOVK9sfY2UacA", +} +``` + +2. Make API Calls with that Key + +```python +import openai +client = openai.OpenAI( + api_key="sk-tXL0wt5-lOOVK9sfY2UacA", # ๐Ÿ‘ˆ Generated key + base_url="http://0.0.0.0:4000" +) + +# request sent to model set on litellm proxy, `litellm --model` +response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } +]) + +print(response) +``` + + + + +1. Create a key for that team + +```bash +curl 'http://0.0.0.0:4000/key/generate' \ +--header 'Authorization: Bearer ' \ +--header 'Content-Type: application/json' \ +--data-raw '{"team_id": "my-unique-id"}' +``` + +Response Object: + +```bash +{ + "key": "sk-tXL0wt5-lOOVK9sfY2UacA", +} +``` + +2. Make API Calls with that Key + +```python +import openai +client = openai.OpenAI( + api_key="sk-tXL0wt5-lOOVK9sfY2UacA", # ๐Ÿ‘ˆ Generated key + base_url="http://0.0.0.0:4000" +) + +# request sent to model set on litellm proxy, `litellm --model` +response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } +]) + +print(response) +``` + + + +**See Results on Lago** + + + + +## Advanced - Lago Logging object + +This is what LiteLLM will log to Lagos + +``` +{ + "event": { + "transaction_id": "", + "external_customer_id": , # either 'end_user_id', 'user_id', or 'team_id'. Default 'end_user_id'. + "code": os.getenv("LAGO_API_EVENT_CODE"), + "properties": { + "input_tokens": , + "output_tokens": , + "model": , + "response_cost": , # ๐Ÿ‘ˆ LITELLM CALCULATED RESPONSE COST - https://github.com/BerriAI/litellm/blob/d43f75150a65f91f60dc2c0c9462ce3ffc713c1f/litellm/utils.py#L1473 + } + } +} +``` diff --git a/docs/my-website/docs/proxy/cost_tracking.md b/docs/my-website/docs/proxy/cost_tracking.md index 887ec9e3ed..2aaf8116e7 100644 --- a/docs/my-website/docs/proxy/cost_tracking.md +++ b/docs/my-website/docs/proxy/cost_tracking.md @@ -1,8 +1,161 @@ -# Cost Tracking - Azure +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# ๐Ÿ’ธ Spend Tracking + +Track spend for keys, users, and teams across 100+ LLMs. + +## Getting Spend Reports - To Charge Other Teams, API Keys + +Use the `/global/spend/report` endpoint to get daily spend per team, with a breakdown of spend per API Key, Model + +### Example Request + +```shell +curl -X GET 'http://localhost:4000/global/spend/report?start_date=2024-04-01&end_date=2024-06-30' \ + -H 'Authorization: Bearer sk-1234' +``` + +### Example Response + + + + +```shell +[ + { + "group_by_day": "2024-04-30T00:00:00+00:00", + "teams": [ + { + "team_name": "Prod Team", + "total_spend": 0.0015265, + "metadata": [ # see the spend by unique(key + model) + { + "model": "gpt-4", + "spend": 0.00123, + "total_tokens": 28, + "api_key": "88dc28.." # the hashed api key + }, + { + "model": "gpt-4", + "spend": 0.00123, + "total_tokens": 28, + "api_key": "a73dc2.." # the hashed api key + }, + { + "model": "chatgpt-v-2", + "spend": 0.000214, + "total_tokens": 122, + "api_key": "898c28.." # the hashed api key + }, + { + "model": "gpt-3.5-turbo", + "spend": 0.0000825, + "total_tokens": 85, + "api_key": "84dc28.." # the hashed api key + } + ] + } + ] + } +] +``` + + + + + + +```python +import requests +url = 'http://localhost:4000/global/spend/report' +params = { + 'start_date': '2023-04-01', + 'end_date': '2024-06-30' +} + +headers = { + 'Authorization': 'Bearer sk-1234' +} + +# Make the GET request +response = requests.get(url, headers=headers, params=params) +spend_report = response.json() + +for row in spend_report: + date = row["group_by_day"] + teams = row["teams"] + for team in teams: + team_name = team["team_name"] + total_spend = team["total_spend"] + metadata = team["metadata"] + + print(f"Date: {date}") + print(f"Team: {team_name}") + print(f"Total Spend: {total_spend}") + print("Metadata: ", metadata) + print() +``` + +Output from script +```shell +# Date: 2024-05-11T00:00:00+00:00 +# Team: local_test_team +# Total Spend: 0.003675099999999999 +# Metadata: [{'model': 'gpt-3.5-turbo', 'spend': 0.003675099999999999, 'api_key': 'b94d5e0bc3a71a573917fe1335dc0c14728c7016337451af9714924ff3a729db', 'total_tokens': 3105}] + +# Date: 2024-05-13T00:00:00+00:00 +# Team: Unassigned Team +# Total Spend: 3.4e-05 +# Metadata: [{'model': 'gpt-3.5-turbo', 'spend': 3.4e-05, 'api_key': '9569d13c9777dba68096dea49b0b03e0aaf4d2b65d4030eda9e8a2733c3cd6e0', 'total_tokens': 50}] + +# Date: 2024-05-13T00:00:00+00:00 +# Team: central +# Total Spend: 0.000684 +# Metadata: [{'model': 'gpt-3.5-turbo', 'spend': 0.000684, 'api_key': '0323facdf3af551594017b9ef162434a9b9a8ca1bbd9ccbd9d6ce173b1015605', 'total_tokens': 498}] + +# Date: 2024-05-13T00:00:00+00:00 +# Team: local_test_team +# Total Spend: 0.0005715000000000001 +# Metadata: [{'model': 'gpt-3.5-turbo', 'spend': 0.0005715000000000001, 'api_key': 'b94d5e0bc3a71a573917fe1335dc0c14728c7016337451af9714924ff3a729db', 'total_tokens': 423}] +``` + + + + + + + +## Reset Team, API Key Spend - MASTER KEY ONLY + +Use `/global/spend/reset` if you want to: +- Reset the Spend for all API Keys, Teams. The `spend` for ALL Teams and Keys in `LiteLLM_TeamTable` and `LiteLLM_VerificationToken` will be set to `spend=0` + +- LiteLLM will maintain all the logs in `LiteLLMSpendLogs` for Auditing Purposes + +### Request +Only the `LITELLM_MASTER_KEY` you set can access this route +```shell +curl -X POST \ + 'http://localhost:4000/global/spend/reset' \ + -H 'Authorization: Bearer sk-1234' \ + -H 'Content-Type: application/json' +``` + +### Expected Responses + +```shell +{"message":"Spend for all API Keys and Teams reset successfully","status":"success"} +``` + + + + +## Spend Tracking for Azure Set base model for cost tracking azure image-gen call -## Image Generation +### Image Generation ```yaml model_list: @@ -17,7 +170,7 @@ model_list: mode: image_generation ``` -## Chat Completions / Embeddings +### Chat Completions / Embeddings **Problem**: Azure returns `gpt-4` in the response when `azure/gpt-4-1106-preview` is used. This leads to inaccurate cost tracking diff --git a/docs/my-website/docs/proxy/logging.md b/docs/my-website/docs/proxy/logging.md index 932ea4f577..538a81d4b3 100644 --- a/docs/my-website/docs/proxy/logging.md +++ b/docs/my-website/docs/proxy/logging.md @@ -3,7 +3,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# ๐Ÿ”Ž Logging - Custom Callbacks, DataDog, Langfuse, s3 Bucket, Sentry, OpenTelemetry, Athina +# ๐Ÿ”Ž Logging - Custom Callbacks, DataDog, Langfuse, s3 Bucket, Sentry, OpenTelemetry, Athina, Azure Content-Safety Log Proxy Input, Output, Exceptions using Custom Callbacks, Langfuse, OpenTelemetry, LangFuse, DynamoDB, s3 Bucket @@ -17,6 +17,7 @@ Log Proxy Input, Output, Exceptions using Custom Callbacks, Langfuse, OpenTeleme - [Logging to Sentry](#logging-proxy-inputoutput---sentry) - [Logging to Traceloop (OpenTelemetry)](#logging-proxy-inputoutput-traceloop-opentelemetry) - [Logging to Athina](#logging-proxy-inputoutput-athina) +- [(BETA) Moderation with Azure Content-Safety](#moderation-with-azure-content-safety) ## Custom Callback Class [Async] Use this when you want to run custom callbacks in `python` @@ -1037,3 +1038,86 @@ curl --location 'http://0.0.0.0:4000/chat/completions' \ ] }' ``` + +## (BETA) Moderation with Azure Content Safety + +[Azure Content-Safety](https://azure.microsoft.com/en-us/products/ai-services/ai-content-safety) is a Microsoft Azure service that provides content moderation APIs to detect potential offensive, harmful, or risky content in text. + +We will use the `--config` to set `litellm.success_callback = ["azure_content_safety"]` this will moderate all LLM calls using Azure Content Safety. + +**Step 0** Deploy Azure Content Safety + +Deploy an Azure Content-Safety instance from the Azure Portal and get the `endpoint` and `key`. + +**Step 1** Set Athina API key + +```shell +AZURE_CONTENT_SAFETY_KEY = "" +``` + +**Step 2**: Create a `config.yaml` file and set `litellm_settings`: `success_callback` +```yaml +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo +litellm_settings: + callbacks: ["azure_content_safety"] + azure_content_safety_params: + endpoint: "" + key: "os.environ/AZURE_CONTENT_SAFETY_KEY" +``` + +**Step 3**: Start the proxy, make a test request + +Start proxy +```shell +litellm --config config.yaml --debug +``` + +Test Request +``` +curl --location 'http://0.0.0.0:4000/chat/completions' \ + --header 'Content-Type: application/json' \ + --data ' { + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "Hi, how are you?" + } + ] + }' +``` + +An HTTP 400 error will be returned if the content is detected with a value greater than the threshold set in the `config.yaml`. +The details of the response will describe : +- The `source` : input text or llm generated text +- The `category` : the category of the content that triggered the moderation +- The `severity` : the severity from 0 to 10 + +**Step 4**: Customizing Azure Content Safety Thresholds + +You can customize the thresholds for each category by setting the `thresholds` in the `config.yaml` + +```yaml +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo +litellm_settings: + callbacks: ["azure_content_safety"] + azure_content_safety_params: + endpoint: "" + key: "os.environ/AZURE_CONTENT_SAFETY_KEY" + thresholds: + Hate: 6 + SelfHarm: 8 + Sexual: 6 + Violence: 4 +``` + +:::info +`thresholds` are not required by default, but you can tune the values to your needs. +Default values is `4` for all categories +::: \ No newline at end of file diff --git a/docs/my-website/docs/proxy/prod.md b/docs/my-website/docs/proxy/prod.md index 32cd916c98..35c8c575ba 100644 --- a/docs/my-website/docs/proxy/prod.md +++ b/docs/my-website/docs/proxy/prod.md @@ -64,6 +64,12 @@ router_settings: redis_password: os.environ/REDIS_PASSWORD ``` +## 4. Disable 'load_dotenv' + +Set `export LITELLM_MODE="PRODUCTION"` + +This disables the load_dotenv() functionality, which will automatically load your environment credentials from the local `.env`. + ## Extras ### Expected Performance in Production diff --git a/docs/my-website/docs/proxy/reliability.md b/docs/my-website/docs/proxy/reliability.md index bd04216dd1..e39a6765fc 100644 --- a/docs/my-website/docs/proxy/reliability.md +++ b/docs/my-website/docs/proxy/reliability.md @@ -151,7 +151,7 @@ curl --location 'http://0.0.0.0:4000/chat/completions' \ }' ``` -## Advanced - Context Window Fallbacks +## Advanced - Context Window Fallbacks (Pre-Call Checks + Fallbacks) **Before call is made** check if a call is within model context window with **`enable_pre_call_checks: true`**. @@ -232,16 +232,16 @@ model_list: - model_name: gpt-3.5-turbo-small litellm_params: model: azure/chatgpt-v-2 - api_base: os.environ/AZURE_API_BASE - api_key: os.environ/AZURE_API_KEY - api_version: "2023-07-01-preview" - model_info: - base_model: azure/gpt-4-1106-preview # 2. ๐Ÿ‘ˆ (azure-only) SET BASE MODEL + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: "2023-07-01-preview" + model_info: + base_model: azure/gpt-4-1106-preview # 2. ๐Ÿ‘ˆ (azure-only) SET BASE MODEL - model_name: gpt-3.5-turbo-large litellm_params: - model: gpt-3.5-turbo-1106 - api_key: os.environ/OPENAI_API_KEY + model: gpt-3.5-turbo-1106 + api_key: os.environ/OPENAI_API_KEY - model_name: claude-opus litellm_params: @@ -287,6 +287,69 @@ print(response) +## Advanced - EU-Region Filtering (Pre-Call Checks) + +**Before call is made** check if a call is within model context window with **`enable_pre_call_checks: true`**. + +Set 'region_name' of deployment. + +**Note:** LiteLLM can automatically infer region_name for Vertex AI, Bedrock, and IBM WatsonxAI based on your litellm params. For Azure, set `litellm.enable_preview = True`. + +**1. Set Config** + +```yaml +router_settings: + enable_pre_call_checks: true # 1. Enable pre-call checks + +model_list: +- model_name: gpt-3.5-turbo + litellm_params: + model: azure/chatgpt-v-2 + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: "2023-07-01-preview" + region_name: "eu" # ๐Ÿ‘ˆ SET EU-REGION + +- model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo-1106 + api_key: os.environ/OPENAI_API_KEY + +- model_name: gemini-pro + litellm_params: + model: vertex_ai/gemini-pro-1.5 + vertex_project: adroit-crow-1234 + vertex_location: us-east1 # ๐Ÿ‘ˆ AUTOMATICALLY INFERS 'region_name' +``` + +**2. Start proxy** + +```bash +litellm --config /path/to/config.yaml + +# RUNNING on http://0.0.0.0:4000 +``` + +**3. Test it!** + +```python +import openai +client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:4000" +) + +# request sent to model set on litellm proxy, `litellm --model` +response = client.chat.completions.with_raw_response.create( + model="gpt-3.5-turbo", + messages = [{"role": "user", "content": "Who was Alexander?"}] +) + +print(response) + +print(f"response.headers.get('x-litellm-model-api-base')") +``` + ## Advanced - Custom Timeouts, Stream Timeouts - Per Model For each model you can set `timeout` & `stream_timeout` under `litellm_params` ```yaml diff --git a/docs/my-website/docs/proxy/token_auth.md b/docs/my-website/docs/proxy/token_auth.md index e4772d70af..659cc6edf0 100644 --- a/docs/my-website/docs/proxy/token_auth.md +++ b/docs/my-website/docs/proxy/token_auth.md @@ -110,7 +110,7 @@ general_settings: admin_jwt_scope: "litellm-proxy-admin" ``` -## Advanced - Spend Tracking (User / Team / Org) +## Advanced - Spend Tracking (End-Users / Internal Users / Team / Org) Set the field in the jwt token, which corresponds to a litellm user / team / org. @@ -123,6 +123,7 @@ general_settings: team_id_jwt_field: "client_id" # ๐Ÿ‘ˆ CAN BE ANY FIELD user_id_jwt_field: "sub" # ๐Ÿ‘ˆ CAN BE ANY FIELD org_id_jwt_field: "org_id" # ๐Ÿ‘ˆ CAN BE ANY FIELD + end_user_id_jwt_field: "customer_id" # ๐Ÿ‘ˆ CAN BE ANY FIELD ``` Expected JWT: @@ -131,7 +132,7 @@ Expected JWT: { "client_id": "my-unique-team", "sub": "my-unique-user", - "org_id": "my-unique-org" + "org_id": "my-unique-org", } ``` diff --git a/docs/my-website/docs/proxy/user_keys.md b/docs/my-website/docs/proxy/user_keys.md index fa78b37c11..cda3a46af9 100644 --- a/docs/my-website/docs/proxy/user_keys.md +++ b/docs/my-website/docs/proxy/user_keys.md @@ -365,6 +365,188 @@ curl --location 'http://0.0.0.0:4000/moderations' \ ## Advanced +### (BETA) Batch Completions - pass multiple models + +Use this when you want to send 1 request to N Models + +#### Expected Request Format + +Pass model as a string of comma separated value of models. Example `"model"="llama3,gpt-3.5-turbo"` + +This same request will be sent to the following model groups on the [litellm proxy config.yaml](https://docs.litellm.ai/docs/proxy/configs) +- `model_name="llama3"` +- `model_name="gpt-3.5-turbo"` + + + + + + +```python +import openai + +client = openai.OpenAI(api_key="sk-1234", base_url="http://0.0.0.0:4000") + +response = client.chat.completions.create( + model="gpt-3.5-turbo,llama3", + messages=[ + {"role": "user", "content": "this is a test request, write a short poem"} + ], +) + +print(response) +``` + + + +#### Expected Response Format + +Get a list of responses when `model` is passed as a list + +```python +[ + ChatCompletion( + id='chatcmpl-9NoYhS2G0fswot0b6QpoQgmRQMaIf', + choices=[ + Choice( + finish_reason='stop', + index=0, + logprobs=None, + message=ChatCompletionMessage( + content='In the depths of my soul, a spark ignites\nA light that shines so pure and bright\nIt dances and leaps, refusing to die\nA flame of hope that reaches the sky\n\nIt warms my heart and fills me with bliss\nA reminder that in darkness, there is light to kiss\nSo I hold onto this fire, this guiding light\nAnd let it lead me through the darkest night.', + role='assistant', + function_call=None, + tool_calls=None + ) + ) + ], + created=1715462919, + model='gpt-3.5-turbo-0125', + object='chat.completion', + system_fingerprint=None, + usage=CompletionUsage( + completion_tokens=83, + prompt_tokens=17, + total_tokens=100 + ) + ), + ChatCompletion( + id='chatcmpl-4ac3e982-da4e-486d-bddb-ed1d5cb9c03c', + choices=[ + Choice( + finish_reason='stop', + index=0, + logprobs=None, + message=ChatCompletionMessage( + content="A test request, and I'm delighted!\nHere's a short poem, just for you:\n\nMoonbeams dance upon the sea,\nA path of light, for you to see.\nThe stars up high, a twinkling show,\nA night of wonder, for all to know.\n\nThe world is quiet, save the night,\nA peaceful hush, a gentle light.\nThe world is full, of beauty rare,\nA treasure trove, beyond compare.\n\nI hope you enjoyed this little test,\nA poem born, of whimsy and jest.\nLet me know, if there's anything else!", + role='assistant', + function_call=None, + tool_calls=None + ) + ) + ], + created=1715462919, + model='groq/llama3-8b-8192', + object='chat.completion', + system_fingerprint='fp_a2c8d063cb', + usage=CompletionUsage( + completion_tokens=120, + prompt_tokens=20, + total_tokens=140 + ) + ) +] +``` + + + + + + + + + +```shell +curl --location 'http://localhost:4000/chat/completions' \ + --header 'Authorization: Bearer sk-1234' \ + --header 'Content-Type: application/json' \ + --data '{ + "model": "llama3,gpt-3.5-turbo", + "max_tokens": 10, + "user": "litellm2", + "messages": [ + { + "role": "user", + "content": "is litellm getting better" + } + ] +}' +``` + + + + +#### Expected Response Format + +Get a list of responses when `model` is passed as a list + +```json +[ + { + "id": "chatcmpl-3dbd5dd8-7c82-4ca3-bf1f-7c26f497cf2b", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "message": { + "content": "The Elder Scrolls IV: Oblivion!\n\nReleased", + "role": "assistant" + } + } + ], + "created": 1715459876, + "model": "groq/llama3-8b-8192", + "object": "chat.completion", + "system_fingerprint": "fp_179b0f92c9", + "usage": { + "completion_tokens": 10, + "prompt_tokens": 12, + "total_tokens": 22 + } + }, + { + "id": "chatcmpl-9NnldUfFLmVquFHSX4yAtjCw8PGei", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "message": { + "content": "TES4 could refer to The Elder Scrolls IV:", + "role": "assistant" + } + } + ], + "created": 1715459877, + "model": "gpt-3.5-turbo-0125", + "object": "chat.completion", + "system_fingerprint": null, + "usage": { + "completion_tokens": 10, + "prompt_tokens": 9, + "total_tokens": 19 + } + } +] +``` + + + + + + + + + ### Pass User LLM API Keys, Fallbacks Allow your end-users to pass their model list, api base, OpenAI API key (any LiteLLM supported provider) to make requests diff --git a/docs/my-website/docs/routing.md b/docs/my-website/docs/routing.md index 0b0c7713c2..5ba3221c97 100644 --- a/docs/my-website/docs/routing.md +++ b/docs/my-website/docs/routing.md @@ -653,7 +653,9 @@ from litellm import Router model_list = [{...}] router = Router(model_list=model_list, - allowed_fails=1) # cooldown model if it fails > 1 call in a minute. + allowed_fails=1, # cooldown model if it fails > 1 call in a minute. + cooldown_time=100 # cooldown the deployment for 100 seconds if it num_fails > allowed_fails + ) user_message = "Hello, whats the weather in San Francisco??" messages = [{"content": user_message, "role": "user"}] @@ -770,6 +772,8 @@ If the error is a context window exceeded error, fall back to a larger model gro Fallbacks are done in-order - ["gpt-3.5-turbo, "gpt-4", "gpt-4-32k"], will do 'gpt-3.5-turbo' first, then 'gpt-4', etc. +You can also set 'default_fallbacks', in case a specific model group is misconfigured / bad. + ```python from litellm import Router @@ -830,6 +834,7 @@ model_list = [ router = Router(model_list=model_list, fallbacks=[{"azure/gpt-3.5-turbo": ["gpt-3.5-turbo"]}], + default_fallbacks=["gpt-3.5-turbo-16k"], context_window_fallbacks=[{"azure/gpt-3.5-turbo-context-fallback": ["gpt-3.5-turbo-16k"]}, {"gpt-3.5-turbo": ["gpt-3.5-turbo-16k"]}], set_verbose=True) @@ -879,13 +884,11 @@ router = Router(model_list: Optional[list] = None, cache_responses=True) ``` -## Pre-Call Checks (Context Window) +## Pre-Call Checks (Context Window, EU-Regions) Enable pre-call checks to filter out: 1. deployments with context window limit < messages for a call. -2. deployments that have exceeded rate limits when making concurrent calls. (eg. `asyncio.gather(*[ - router.acompletion(model="gpt-3.5-turbo", messages=m) for m in list_of_messages - ])`) +2. deployments outside of eu-region @@ -900,10 +903,14 @@ router = Router(model_list=model_list, enable_pre_call_checks=True) # ๐Ÿ‘ˆ Set t **2. Set Model List** -For azure deployments, set the base model. Pick the base model from [this list](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json), all the azure models start with `azure/`. +For context window checks on azure deployments, set the base model. Pick the base model from [this list](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json), all the azure models start with `azure/`. - - +For 'eu-region' filtering, Set 'region_name' of deployment. + +**Note:** We automatically infer region_name for Vertex AI, Bedrock, and IBM WatsonxAI based on your litellm params. For Azure, set `litellm.enable_preview = True`. + + +[**See Code**](https://github.com/BerriAI/litellm/blob/d33e49411d6503cb634f9652873160cd534dec96/litellm/router.py#L2958) ```python model_list = [ @@ -914,10 +921,9 @@ model_list = [ "api_key": os.getenv("AZURE_API_KEY"), "api_version": os.getenv("AZURE_API_VERSION"), "api_base": os.getenv("AZURE_API_BASE"), - }, - "model_info": { + "region_name": "eu" # ๐Ÿ‘ˆ SET 'EU' REGION NAME "base_model": "azure/gpt-35-turbo", # ๐Ÿ‘ˆ (Azure-only) SET BASE MODEL - } + }, }, { "model_name": "gpt-3.5-turbo", # model group name @@ -926,54 +932,26 @@ model_list = [ "api_key": os.getenv("OPENAI_API_KEY"), }, }, + { + "model_name": "gemini-pro", + "litellm_params: { + "model": "vertex_ai/gemini-pro-1.5", + "vertex_project": "adroit-crow-1234", + "vertex_location": "us-east1" # ๐Ÿ‘ˆ AUTOMATICALLY INFERS 'region_name' + } + } ] router = Router(model_list=model_list, enable_pre_call_checks=True) ``` - - - - -```python -model_list = [ - { - "model_name": "gpt-3.5-turbo-small", # model group name - "litellm_params": { # params for litellm completion/embedding call - "model": "azure/chatgpt-v-2", - "api_key": os.getenv("AZURE_API_KEY"), - "api_version": os.getenv("AZURE_API_VERSION"), - "api_base": os.getenv("AZURE_API_BASE"), - }, - "model_info": { - "base_model": "azure/gpt-35-turbo", # ๐Ÿ‘ˆ (Azure-only) SET BASE MODEL - } - }, - { - "model_name": "gpt-3.5-turbo-large", # model group name - "litellm_params": { # params for litellm completion/embedding call - "model": "gpt-3.5-turbo-1106", - "api_key": os.getenv("OPENAI_API_KEY"), - }, - }, - { - "model_name": "claude-opus", - "litellm_params": { call - "model": "claude-3-opus-20240229", - "api_key": os.getenv("ANTHROPIC_API_KEY"), - }, - }, - ] - -router = Router(model_list=model_list, enable_pre_call_checks=True, context_window_fallbacks=[{"gpt-3.5-turbo-small": ["gpt-3.5-turbo-large", "claude-opus"]}]) -``` - - - - **3. Test it!** + + + + ```python """ - Give a gpt-3.5-turbo model group with different context windows (4k vs. 16k) @@ -983,7 +961,6 @@ router = Router(model_list=model_list, enable_pre_call_checks=True, context_wind from litellm import Router import os -try: model_list = [ { "model_name": "gpt-3.5-turbo", # model group name @@ -992,6 +969,7 @@ model_list = [ "api_key": os.getenv("AZURE_API_KEY"), "api_version": os.getenv("AZURE_API_VERSION"), "api_base": os.getenv("AZURE_API_BASE"), + "base_model": "azure/gpt-35-turbo", }, "model_info": { "base_model": "azure/gpt-35-turbo", @@ -1021,6 +999,59 @@ response = router.completion( print(f"response: {response}") ``` + + +```python +""" +- Give 2 gpt-3.5-turbo deployments, in eu + non-eu regions +- Make a call +- Assert it picks the eu-region model +""" + +from litellm import Router +import os + +model_list = [ + { + "model_name": "gpt-3.5-turbo", # model group name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE"), + "region_name": "eu" + }, + "model_info": { + "id": "1" + } + }, + { + "model_name": "gpt-3.5-turbo", # model group name + "litellm_params": { # params for litellm completion/embedding call + "model": "gpt-3.5-turbo-1106", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "model_info": { + "id": "2" + } + }, +] + +router = Router(model_list=model_list, enable_pre_call_checks=True) + +response = router.completion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Who was Alexander?"}], +) + +print(f"response: {response}") + +print(f"response id: {response._hidden_params['model_id']}") +``` + + + + :::info @@ -1283,10 +1314,11 @@ def __init__( num_retries: int = 0, timeout: Optional[float] = None, default_litellm_params={}, # default params for Router.chat.completion.create - fallbacks: List = [], + fallbacks: Optional[List] = None, + default_fallbacks: Optional[List] = None allowed_fails: Optional[int] = None, # Number of times a deployment can failbefore being added to cooldown cooldown_time: float = 1, # (seconds) time to cooldown a deployment after failure - context_window_fallbacks: List = [], + context_window_fallbacks: Optional[List] = None, model_group_alias: Optional[dict] = {}, retry_after: int = 0, # (min) time to wait before retrying a failed request routing_strategy: Literal[ diff --git a/docs/my-website/img/lago.jpeg b/docs/my-website/img/lago.jpeg new file mode 100644 index 0000000000..546852f1c4 Binary files /dev/null and b/docs/my-website/img/lago.jpeg differ diff --git a/docs/my-website/img/lago_2.png b/docs/my-website/img/lago_2.png new file mode 100644 index 0000000000..e51a93b077 Binary files /dev/null and b/docs/my-website/img/lago_2.png differ diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 4ce5870805..f840ed7897 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -39,7 +39,9 @@ const sidebars = { "proxy/demo", "proxy/configs", "proxy/reliability", + "proxy/cost_tracking", "proxy/users", + "proxy/billing", "proxy/user_keys", "proxy/enterprise", "proxy/virtual_keys", @@ -52,7 +54,6 @@ const sidebars = { "proxy/team_based_routing", "proxy/customer_routing", "proxy/ui", - "proxy/cost_tracking", "proxy/token_auth", { type: "category", @@ -134,6 +135,7 @@ const sidebars = { "providers/huggingface", "providers/watsonx", "providers/predibase", + "providers/triton-inference-server", "providers/ollama", "providers/perplexity", "providers/groq", @@ -174,6 +176,7 @@ const sidebars = { "observability/custom_callback", "observability/langfuse_integration", "observability/sentry", + "observability/lago", "observability/openmeter", "observability/promptlayer_integration", "observability/wandb_integration", @@ -188,7 +191,7 @@ const sidebars = { `observability/telemetry`, ], }, - "caching/redis_cache", + "caching/all_caches", { type: "category", label: "Tutorials", diff --git a/enterprise/enterprise_callbacks/generic_api_callback.py b/enterprise/enterprise_callbacks/generic_api_callback.py index 076c13d5ee..cf1d22e8f8 100644 --- a/enterprise/enterprise_callbacks/generic_api_callback.py +++ b/enterprise/enterprise_callbacks/generic_api_callback.py @@ -10,7 +10,6 @@ from litellm.caching import DualCache from typing import Literal, Union -dotenv.load_dotenv() # Loading env variables using dotenv import traceback @@ -19,8 +18,6 @@ import traceback import dotenv, os import requests - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/enterprise/utils.py b/enterprise/utils.py index 05bd7dac6e..90b14314c2 100644 --- a/enterprise/utils.py +++ b/enterprise/utils.py @@ -1,6 +1,7 @@ # Enterprise Proxy Util Endpoints from litellm._logging import verbose_logger import collections +from datetime import datetime async def get_spend_by_tags(start_date=None, end_date=None, prisma_client=None): @@ -18,26 +19,33 @@ async def get_spend_by_tags(start_date=None, end_date=None, prisma_client=None): return response -async def ui_get_spend_by_tags(start_date=None, end_date=None, prisma_client=None): - response = await prisma_client.db.query_raw( - """ +async def ui_get_spend_by_tags(start_date: str, end_date: str, prisma_client): + + sql_query = """ SELECT jsonb_array_elements_text(request_tags) AS individual_request_tag, DATE(s."startTime") AS spend_date, COUNT(*) AS log_count, SUM(spend) AS total_spend FROM "LiteLLM_SpendLogs" s - WHERE s."startTime" >= current_date - interval '30 days' + WHERE + DATE(s."startTime") >= $1::date + AND DATE(s."startTime") <= $2::date GROUP BY individual_request_tag, spend_date - ORDER BY spend_date; - """ + ORDER BY spend_date + LIMIT 100; + """ + response = await prisma_client.db.query_raw( + sql_query, + start_date, + end_date, ) # print("tags - spend") # print(response) # Bar Chart 1 - Spend per tag - Top 10 tags by spend - total_spend_per_tag = collections.defaultdict(float) - total_requests_per_tag = collections.defaultdict(int) + total_spend_per_tag: collections.defaultdict = collections.defaultdict(float) + total_requests_per_tag: collections.defaultdict = collections.defaultdict(int) for row in response: tag_name = row["individual_request_tag"] tag_spend = row["total_spend"] @@ -49,15 +57,18 @@ async def ui_get_spend_by_tags(start_date=None, end_date=None, prisma_client=Non # convert to ui format ui_tags = [] for tag in sorted_tags: + current_spend = tag[1] + if current_spend is not None and isinstance(current_spend, float): + current_spend = round(current_spend, 4) ui_tags.append( { "name": tag[0], - "value": tag[1], + "spend": current_spend, "log_count": total_requests_per_tag[tag[0]], } ) - return {"top_10_tags": ui_tags} + return {"spend_per_tag": ui_tags} async def view_spend_logs_from_clickhouse( diff --git a/litellm/__init__.py b/litellm/__init__.py index 8d4666a3b0..ac2b420d71 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -1,5 +1,6 @@ ### Hide pydantic namespace conflict warnings globally ### import warnings + warnings.filterwarnings("ignore", message=".*conflict with protected namespace.*") ### INIT VARIABLES ### import threading, requests, os @@ -14,7 +15,9 @@ from litellm.proxy._types import ( import httpx import dotenv -dotenv.load_dotenv() +litellm_mode = os.getenv("LITELLM_MODE", "DEV") # "PRODUCTION", "DEV" +if litellm_mode == "DEV": + dotenv.load_dotenv() ############################################# if set_verbose == True: _turn_on_debug() @@ -24,8 +27,8 @@ input_callback: List[Union[str, Callable]] = [] success_callback: List[Union[str, Callable]] = [] failure_callback: List[Union[str, Callable]] = [] service_callback: List[Union[str, Callable]] = [] -callbacks: List[Callable] = [] -_custom_logger_compatible_callbacks: list = ["openmeter"] +_custom_logger_compatible_callbacks_literal = Literal["lago", "openmeter"] +callbacks: List[Union[Callable, _custom_logger_compatible_callbacks_literal]] = [] _langfuse_default_tags: Optional[ List[ Literal[ @@ -70,6 +73,7 @@ azure_key: Optional[str] = None anthropic_key: Optional[str] = None replicate_key: Optional[str] = None cohere_key: Optional[str] = None +clarifai_key: Optional[str] = None maritalk_key: Optional[str] = None ai21_key: Optional[str] = None ollama_key: Optional[str] = None @@ -100,6 +104,9 @@ blocked_user_list: Optional[Union[str, List]] = None banned_keywords_list: Optional[Union[str, List]] = None llm_guard_mode: Literal["all", "key-specific", "request-specific"] = "all" ################## +### PREVIEW FEATURES ### +enable_preview_features: bool = False +################## logging: bool = True caching: bool = ( False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648 @@ -214,6 +221,7 @@ max_end_user_budget: Optional[float] = None #### RELIABILITY #### request_timeout: Optional[float] = 6000 num_retries: Optional[int] = None # per model endpoint +default_fallbacks: Optional[List] = None fallbacks: Optional[List] = None context_window_fallbacks: Optional[List] = None allowed_fails: int = 0 @@ -400,6 +408,73 @@ replicate_models: List = [ "replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad", ] +clarifai_models: List = [ + "clarifai/meta.Llama-3.Llama-3-8B-Instruct", + "clarifai/gcp.generate.gemma-1_1-7b-it", + "clarifai/mistralai.completion.mixtral-8x22B", + "clarifai/cohere.generate.command-r-plus", + "clarifai/databricks.drbx.dbrx-instruct", + "clarifai/mistralai.completion.mistral-large", + "clarifai/mistralai.completion.mistral-medium", + "clarifai/mistralai.completion.mistral-small", + "clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1", + "clarifai/gcp.generate.gemma-2b-it", + "clarifai/gcp.generate.gemma-7b-it", + "clarifai/deci.decilm.deciLM-7B-instruct", + "clarifai/mistralai.completion.mistral-7B-Instruct", + "clarifai/gcp.generate.gemini-pro", + "clarifai/anthropic.completion.claude-v1", + "clarifai/anthropic.completion.claude-instant-1_2", + "clarifai/anthropic.completion.claude-instant", + "clarifai/anthropic.completion.claude-v2", + "clarifai/anthropic.completion.claude-2_1", + "clarifai/meta.Llama-2.codeLlama-70b-Python", + "clarifai/meta.Llama-2.codeLlama-70b-Instruct", + "clarifai/openai.completion.gpt-3_5-turbo-instruct", + "clarifai/meta.Llama-2.llama2-7b-chat", + "clarifai/meta.Llama-2.llama2-13b-chat", + "clarifai/meta.Llama-2.llama2-70b-chat", + "clarifai/openai.chat-completion.gpt-4-turbo", + "clarifai/microsoft.text-generation.phi-2", + "clarifai/meta.Llama-2.llama2-7b-chat-vllm", + "clarifai/upstage.solar.solar-10_7b-instruct", + "clarifai/openchat.openchat.openchat-3_5-1210", + "clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B", + "clarifai/gcp.generate.text-bison", + "clarifai/meta.Llama-2.llamaGuard-7b", + "clarifai/fblgit.una-cybertron.una-cybertron-7b-v2", + "clarifai/openai.chat-completion.GPT-4", + "clarifai/openai.chat-completion.GPT-3_5-turbo", + "clarifai/ai21.complete.Jurassic2-Grande", + "clarifai/ai21.complete.Jurassic2-Grande-Instruct", + "clarifai/ai21.complete.Jurassic2-Jumbo-Instruct", + "clarifai/ai21.complete.Jurassic2-Jumbo", + "clarifai/ai21.complete.Jurassic2-Large", + "clarifai/cohere.generate.cohere-generate-command", + "clarifai/wizardlm.generate.wizardCoder-Python-34B", + "clarifai/wizardlm.generate.wizardLM-70B", + "clarifai/tiiuae.falcon.falcon-40b-instruct", + "clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat", + "clarifai/gcp.generate.code-gecko", + "clarifai/gcp.generate.code-bison", + "clarifai/mistralai.completion.mistral-7B-OpenOrca", + "clarifai/mistralai.completion.openHermes-2-mistral-7B", + "clarifai/wizardlm.generate.wizardLM-13B", + "clarifai/huggingface-research.zephyr.zephyr-7B-alpha", + "clarifai/wizardlm.generate.wizardCoder-15B", + "clarifai/microsoft.text-generation.phi-1_5", + "clarifai/databricks.Dolly-v2.dolly-v2-12b", + "clarifai/bigcode.code.StarCoder", + "clarifai/salesforce.xgen.xgen-7b-8k-instruct", + "clarifai/mosaicml.mpt.mpt-7b-instruct", + "clarifai/anthropic.completion.claude-3-opus", + "clarifai/anthropic.completion.claude-3-sonnet", + "clarifai/gcp.generate.gemini-1_5-pro", + "clarifai/gcp.generate.imagen-2", + "clarifai/salesforce.blip.general-english-image-caption-blip-2", +] + + huggingface_models: List = [ "meta-llama/Llama-2-7b-hf", "meta-llama/Llama-2-7b-chat-hf", @@ -505,6 +580,7 @@ provider_list: List = [ "text-completion-openai", "cohere", "cohere_chat", + "clarifai", "anthropic", "replicate", "huggingface", @@ -537,6 +613,7 @@ provider_list: List = [ "xinference", "fireworks_ai", "watsonx", + "triton", "predibase", "custom", # custom apis ] @@ -654,6 +731,7 @@ from .llms.predibase import PredibaseConfig from .llms.anthropic_text import AnthropicTextConfig from .llms.replicate import ReplicateConfig from .llms.cohere import CohereConfig +from .llms.clarifai import ClarifaiConfig from .llms.ai21 import AI21Config from .llms.together_ai import TogetherAIConfig from .llms.cloudflare import CloudflareConfig @@ -668,6 +746,7 @@ from .llms.sagemaker import SagemakerConfig from .llms.ollama import OllamaConfig from .llms.ollama_chat import OllamaChatConfig from .llms.maritalk import MaritTalkConfig +from .llms.bedrock_httpx import AmazonCohereChatConfig from .llms.bedrock import ( AmazonTitanConfig, AmazonAI21Config, @@ -679,7 +758,7 @@ from .llms.bedrock import ( AmazonMistralConfig, AmazonBedrockGlobalConfig, ) -from .llms.openai import OpenAIConfig, OpenAITextCompletionConfig +from .llms.openai import OpenAIConfig, OpenAITextCompletionConfig, MistralConfig from .llms.azure import AzureOpenAIConfig, AzureOpenAIError from .llms.watsonx import IBMWatsonXAIConfig from .main import * # type: ignore diff --git a/litellm/caching.py b/litellm/caching.py index 83cfe060b1..8c9157e539 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -373,11 +373,12 @@ class RedisCache(BaseCache): print_verbose( f"Set ASYNC Redis Cache PIPELINE: key: {cache_key}\nValue {cache_value}\nttl={ttl}" ) + json_cache_value = json.dumps(cache_value) # Set the value with a TTL if it's provided. if ttl is not None: - pipe.setex(cache_key, ttl, json.dumps(cache_value)) + pipe.setex(cache_key, ttl, json_cache_value) else: - pipe.set(cache_key, json.dumps(cache_value)) + pipe.set(cache_key, json_cache_value) # Execute the pipeline and return the results. results = await pipe.execute() @@ -810,9 +811,7 @@ class RedisSemanticCache(BaseCache): # get the prompt messages = kwargs["messages"] - prompt = "" - for message in messages: - prompt += message["content"] + prompt = "".join(message["content"] for message in messages) # create an embedding for prompt embedding_response = litellm.embedding( @@ -847,9 +846,7 @@ class RedisSemanticCache(BaseCache): # get the messages messages = kwargs["messages"] - prompt = "" - for message in messages: - prompt += message["content"] + prompt = "".join(message["content"] for message in messages) # convert to embedding embedding_response = litellm.embedding( @@ -909,9 +906,7 @@ class RedisSemanticCache(BaseCache): # get the prompt messages = kwargs["messages"] - prompt = "" - for message in messages: - prompt += message["content"] + prompt = "".join(message["content"] for message in messages) # create an embedding for prompt router_model_names = ( [m["model_name"] for m in llm_model_list] @@ -964,9 +959,7 @@ class RedisSemanticCache(BaseCache): # get the messages messages = kwargs["messages"] - prompt = "" - for message in messages: - prompt += message["content"] + prompt = "".join(message["content"] for message in messages) router_model_names = ( [m["model_name"] for m in llm_model_list] @@ -1448,7 +1441,7 @@ class DualCache(BaseCache): class Cache: def __init__( self, - type: Optional[Literal["local", "redis", "redis-semantic", "s3"]] = "local", + type: Optional[Literal["local", "redis", "redis-semantic", "s3", "disk"]] = "local", host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, @@ -1491,13 +1484,14 @@ class Cache: redis_semantic_cache_use_async=False, redis_semantic_cache_embedding_model="text-embedding-ada-002", redis_flush_size=None, + disk_cache_dir=None, **kwargs, ): """ Initializes the cache based on the given type. Args: - type (str, optional): The type of cache to initialize. Can be "local", "redis", "redis-semantic", or "s3". Defaults to "local". + type (str, optional): The type of cache to initialize. Can be "local", "redis", "redis-semantic", "s3" or "disk". Defaults to "local". host (str, optional): The host address for the Redis cache. Required if type is "redis". port (int, optional): The port number for the Redis cache. Required if type is "redis". password (str, optional): The password for the Redis cache. Required if type is "redis". @@ -1543,6 +1537,8 @@ class Cache: s3_path=s3_path, **kwargs, ) + elif type == "disk": + self.cache = DiskCache(disk_cache_dir=disk_cache_dir) if "cache" not in litellm.input_callback: litellm.input_callback.append("cache") if "cache" not in litellm.success_callback: @@ -1914,8 +1910,86 @@ class Cache: await self.cache.disconnect() +class DiskCache(BaseCache): + def __init__(self, disk_cache_dir: Optional[str] = None): + import diskcache as dc + + # if users don't provider one, use the default litellm cache + if disk_cache_dir is None: + self.disk_cache = dc.Cache(".litellm_cache") + else: + self.disk_cache = dc.Cache(disk_cache_dir) + + def set_cache(self, key, value, **kwargs): + print_verbose("DiskCache: set_cache") + if "ttl" in kwargs: + self.disk_cache.set(key, value, expire=kwargs["ttl"]) + else: + self.disk_cache.set(key, value) + + async def async_set_cache(self, key, value, **kwargs): + self.set_cache(key=key, value=value, **kwargs) + + async def async_set_cache_pipeline(self, cache_list, ttl=None): + for cache_key, cache_value in cache_list: + if ttl is not None: + self.set_cache(key=cache_key, value=cache_value, ttl=ttl) + else: + self.set_cache(key=cache_key, value=cache_value) + + def get_cache(self, key, **kwargs): + original_cached_response = self.disk_cache.get(key) + if original_cached_response: + try: + cached_response = json.loads(original_cached_response) + except: + cached_response = original_cached_response + return cached_response + return None + + def batch_get_cache(self, keys: list, **kwargs): + return_val = [] + for k in keys: + val = self.get_cache(key=k, **kwargs) + return_val.append(val) + return return_val + + def increment_cache(self, key, value: int, **kwargs) -> int: + # get the value + init_value = self.get_cache(key=key) or 0 + value = init_value + value + self.set_cache(key, value, **kwargs) + return value + + async def async_get_cache(self, key, **kwargs): + return self.get_cache(key=key, **kwargs) + + async def async_batch_get_cache(self, keys: list, **kwargs): + return_val = [] + for k in keys: + val = self.get_cache(key=k, **kwargs) + return_val.append(val) + return return_val + + async def async_increment(self, key, value: int, **kwargs) -> int: + # get the value + init_value = await self.async_get_cache(key=key) or 0 + value = init_value + value + await self.async_set_cache(key, value, **kwargs) + return value + + def flush_cache(self): + self.disk_cache.clear() + + async def disconnect(self): + pass + + def delete_cache(self, key): + self.disk_cache.pop(key) + + def enable_cache( - type: Optional[Literal["local", "redis", "s3"]] = "local", + type: Optional[Literal["local", "redis", "s3", "disk"]] = "local", host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, @@ -1944,7 +2018,7 @@ def enable_cache( Enable cache with the specified configuration. Args: - type (Optional[Literal["local", "redis"]]): The type of cache to enable. Defaults to "local". + type (Optional[Literal["local", "redis", "s3", "disk"]]): The type of cache to enable. Defaults to "local". host (Optional[str]): The host address of the cache server. Defaults to None. port (Optional[str]): The port number of the cache server. Defaults to None. password (Optional[str]): The password for the cache server. Defaults to None. @@ -1980,7 +2054,7 @@ def enable_cache( def update_cache( - type: Optional[Literal["local", "redis"]] = "local", + type: Optional[Literal["local", "redis", "s3", "disk"]] = "local", host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, @@ -2009,7 +2083,7 @@ def update_cache( Update the cache for LiteLLM. Args: - type (Optional[Literal["local", "redis"]]): The type of cache. Defaults to "local". + type (Optional[Literal["local", "redis", "s3", "disk"]]): The type of cache. Defaults to "local". host (Optional[str]): The host of the cache. Defaults to None. port (Optional[str]): The port of the cache. Defaults to None. password (Optional[str]): The password for the cache. Defaults to None. diff --git a/litellm/exceptions.py b/litellm/exceptions.py index d8b0a7c55a..5eb66743b5 100644 --- a/litellm/exceptions.py +++ b/litellm/exceptions.py @@ -9,55 +9,64 @@ ## LiteLLM versions of the OpenAI Exception Types -from openai import ( - AuthenticationError, - BadRequestError, - NotFoundError, - RateLimitError, - APIStatusError, - OpenAIError, - APIError, - APITimeoutError, - APIConnectionError, - APIResponseValidationError, - UnprocessableEntityError, - PermissionDeniedError, -) +import openai import httpx from typing import Optional -class AuthenticationError(AuthenticationError): # type: ignore - def __init__(self, message, llm_provider, model, response: httpx.Response): +class AuthenticationError(openai.AuthenticationError): # type: ignore + def __init__( + self, + message, + llm_provider, + model, + response: httpx.Response, + litellm_debug_info: Optional[str] = None, + ): self.status_code = 401 self.message = message self.llm_provider = llm_provider self.model = model + self.litellm_debug_info = litellm_debug_info super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs # raise when invalid models passed, example gpt-8 -class NotFoundError(NotFoundError): # type: ignore - def __init__(self, message, model, llm_provider, response: httpx.Response): +class NotFoundError(openai.NotFoundError): # type: ignore + def __init__( + self, + message, + model, + llm_provider, + response: httpx.Response, + litellm_debug_info: Optional[str] = None, + ): self.status_code = 404 self.message = message self.model = model self.llm_provider = llm_provider + self.litellm_debug_info = litellm_debug_info super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs -class BadRequestError(BadRequestError): # type: ignore +class BadRequestError(openai.BadRequestError): # type: ignore def __init__( - self, message, model, llm_provider, response: Optional[httpx.Response] = None + self, + message, + model, + llm_provider, + response: Optional[httpx.Response] = None, + litellm_debug_info: Optional[str] = None, ): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider + self.litellm_debug_info = litellm_debug_info response = response or httpx.Response( status_code=self.status_code, request=httpx.Request( @@ -69,19 +78,29 @@ class BadRequestError(BadRequestError): # type: ignore ) # Call the base class constructor with the parameters it needs -class UnprocessableEntityError(UnprocessableEntityError): # type: ignore - def __init__(self, message, model, llm_provider, response: httpx.Response): +class UnprocessableEntityError(openai.UnprocessableEntityError): # type: ignore + def __init__( + self, + message, + model, + llm_provider, + response: httpx.Response, + litellm_debug_info: Optional[str] = None, + ): self.status_code = 422 self.message = message self.model = model self.llm_provider = llm_provider + self.litellm_debug_info = litellm_debug_info super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs -class Timeout(APITimeoutError): # type: ignore - def __init__(self, message, model, llm_provider): +class Timeout(openai.APITimeoutError): # type: ignore + def __init__( + self, message, model, llm_provider, litellm_debug_info: Optional[str] = None + ): request = httpx.Request(method="POST", url="https://api.openai.com/v1") super().__init__( request=request @@ -90,29 +109,46 @@ class Timeout(APITimeoutError): # type: ignore self.message = message self.model = model self.llm_provider = llm_provider + self.litellm_debug_info = litellm_debug_info # custom function to convert to str def __str__(self): return str(self.message) -class PermissionDeniedError(PermissionDeniedError): # type:ignore - def __init__(self, message, llm_provider, model, response: httpx.Response): +class PermissionDeniedError(openai.PermissionDeniedError): # type:ignore + def __init__( + self, + message, + llm_provider, + model, + response: httpx.Response, + litellm_debug_info: Optional[str] = None, + ): self.status_code = 403 self.message = message self.llm_provider = llm_provider self.model = model + self.litellm_debug_info = litellm_debug_info super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs -class RateLimitError(RateLimitError): # type: ignore - def __init__(self, message, llm_provider, model, response: httpx.Response): +class RateLimitError(openai.RateLimitError): # type: ignore + def __init__( + self, + message, + llm_provider, + model, + response: httpx.Response, + litellm_debug_info: Optional[str] = None, + ): self.status_code = 429 self.message = message self.llm_provider = llm_provider self.modle = model + self.litellm_debug_info = litellm_debug_info super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs @@ -120,11 +156,19 @@ class RateLimitError(RateLimitError): # type: ignore # sub class of rate limit error - meant to give more granularity for error handling context window exceeded errors class ContextWindowExceededError(BadRequestError): # type: ignore - def __init__(self, message, model, llm_provider, response: httpx.Response): + def __init__( + self, + message, + model, + llm_provider, + response: httpx.Response, + litellm_debug_info: Optional[str] = None, + ): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider + self.litellm_debug_info = litellm_debug_info super().__init__( message=self.message, model=self.model, # type: ignore @@ -135,11 +179,19 @@ class ContextWindowExceededError(BadRequestError): # type: ignore class ContentPolicyViolationError(BadRequestError): # type: ignore # Error code: 400 - {'error': {'code': 'content_policy_violation', 'message': 'Your request was rejected as a result of our safety system. Image descriptions generated from your prompt may contain text that is not allowed by our safety system. If you believe this was done in error, your request may succeed if retried, or by adjusting your prompt.', 'param': None, 'type': 'invalid_request_error'}} - def __init__(self, message, model, llm_provider, response: httpx.Response): + def __init__( + self, + message, + model, + llm_provider, + response: httpx.Response, + litellm_debug_info: Optional[str] = None, + ): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider + self.litellm_debug_info = litellm_debug_info super().__init__( message=self.message, model=self.model, # type: ignore @@ -148,51 +200,77 @@ class ContentPolicyViolationError(BadRequestError): # type: ignore ) # Call the base class constructor with the parameters it needs -class ServiceUnavailableError(APIStatusError): # type: ignore - def __init__(self, message, llm_provider, model, response: httpx.Response): +class ServiceUnavailableError(openai.APIStatusError): # type: ignore + def __init__( + self, + message, + llm_provider, + model, + response: httpx.Response, + litellm_debug_info: Optional[str] = None, + ): self.status_code = 503 self.message = message self.llm_provider = llm_provider self.model = model + self.litellm_debug_info = litellm_debug_info super().__init__( self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs # raise this when the API returns an invalid response object - https://github.com/openai/openai-python/blob/1be14ee34a0f8e42d3f9aa5451aa4cb161f1781f/openai/api_requestor.py#L401 -class APIError(APIError): # type: ignore +class APIError(openai.APIError): # type: ignore def __init__( - self, status_code, message, llm_provider, model, request: httpx.Request + self, + status_code, + message, + llm_provider, + model, + request: httpx.Request, + litellm_debug_info: Optional[str] = None, ): self.status_code = status_code self.message = message self.llm_provider = llm_provider self.model = model + self.litellm_debug_info = litellm_debug_info super().__init__(self.message, request=request, body=None) # type: ignore # raised if an invalid request (not get, delete, put, post) is made -class APIConnectionError(APIConnectionError): # type: ignore - def __init__(self, message, llm_provider, model, request: httpx.Request): +class APIConnectionError(openai.APIConnectionError): # type: ignore + def __init__( + self, + message, + llm_provider, + model, + request: httpx.Request, + litellm_debug_info: Optional[str] = None, + ): self.message = message self.llm_provider = llm_provider self.model = model self.status_code = 500 + self.litellm_debug_info = litellm_debug_info super().__init__(message=self.message, request=request) # raised if an invalid request (not get, delete, put, post) is made -class APIResponseValidationError(APIResponseValidationError): # type: ignore - def __init__(self, message, llm_provider, model): +class APIResponseValidationError(openai.APIResponseValidationError): # type: ignore + def __init__( + self, message, llm_provider, model, litellm_debug_info: Optional[str] = None + ): self.message = message self.llm_provider = llm_provider self.model = model request = httpx.Request(method="POST", url="https://api.openai.com/v1") response = httpx.Response(status_code=500, request=request) + self.litellm_debug_info = litellm_debug_info super().__init__(response=response, body=None, message=message) -class OpenAIError(OpenAIError): # type: ignore +class OpenAIError(openai.OpenAIError): # type: ignore def __init__(self, original_exception): self.status_code = original_exception.http_status super().__init__( @@ -214,7 +292,7 @@ class BudgetExceededError(Exception): ## DEPRECATED ## -class InvalidRequestError(BadRequestError): # type: ignore +class InvalidRequestError(openai.BadRequestError): # type: ignore def __init__(self, message, model, llm_provider): self.status_code = 400 self.message = message diff --git a/litellm/integrations/aispend.py b/litellm/integrations/aispend.py index a893f8923b..2fe8ea0dfa 100644 --- a/litellm/integrations/aispend.py +++ b/litellm/integrations/aispend.py @@ -1,8 +1,6 @@ #### What this does #### # On success + failure, log events to aispend.io import dotenv, os - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime diff --git a/litellm/integrations/berrispend.py b/litellm/integrations/berrispend.py index 1f0ae4581f..7d30b706c8 100644 --- a/litellm/integrations/berrispend.py +++ b/litellm/integrations/berrispend.py @@ -3,7 +3,6 @@ import dotenv, os import requests # type: ignore -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime diff --git a/litellm/integrations/clickhouse.py b/litellm/integrations/clickhouse.py index 7d1fb37d94..0c38b86267 100644 --- a/litellm/integrations/clickhouse.py +++ b/litellm/integrations/clickhouse.py @@ -8,8 +8,6 @@ from litellm.proxy._types import UserAPIKeyAuth from litellm.caching import DualCache from typing import Literal, Union - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback @@ -18,8 +16,6 @@ import traceback import dotenv, os import requests - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/custom_logger.py b/litellm/integrations/custom_logger.py index 8a3e0f4673..d508825922 100644 --- a/litellm/integrations/custom_logger.py +++ b/litellm/integrations/custom_logger.py @@ -6,8 +6,6 @@ from litellm.proxy._types import UserAPIKeyAuth from litellm.caching import DualCache from typing import Literal, Union, Optional - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback diff --git a/litellm/integrations/datadog.py b/litellm/integrations/datadog.py index d969341fc4..6d5e08faff 100644 --- a/litellm/integrations/datadog.py +++ b/litellm/integrations/datadog.py @@ -3,8 +3,6 @@ import dotenv, os import requests # type: ignore - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/dynamodb.py b/litellm/integrations/dynamodb.py index b5462ee7fa..21ccabe4b7 100644 --- a/litellm/integrations/dynamodb.py +++ b/litellm/integrations/dynamodb.py @@ -3,8 +3,6 @@ import dotenv, os import requests # type: ignore - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/helicone.py b/litellm/integrations/helicone.py index c8c1075419..85e73258ea 100644 --- a/litellm/integrations/helicone.py +++ b/litellm/integrations/helicone.py @@ -3,8 +3,6 @@ import dotenv, os import requests # type: ignore import litellm - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback diff --git a/litellm/integrations/lago.py b/litellm/integrations/lago.py new file mode 100644 index 0000000000..e6d38f530c --- /dev/null +++ b/litellm/integrations/lago.py @@ -0,0 +1,179 @@ +# What is this? +## On Success events log cost to Lago - https://github.com/BerriAI/litellm/issues/3639 + +import dotenv, os, json +import litellm +import traceback, httpx +from litellm.integrations.custom_logger import CustomLogger +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler +import uuid +from typing import Optional, Literal + + +def get_utc_datetime(): + import datetime as dt + from datetime import datetime + + if hasattr(dt, "UTC"): + return datetime.now(dt.UTC) # type: ignore + else: + return datetime.utcnow() # type: ignore + + +class LagoLogger(CustomLogger): + def __init__(self) -> None: + super().__init__() + self.validate_environment() + self.async_http_handler = AsyncHTTPHandler() + self.sync_http_handler = HTTPHandler() + + def validate_environment(self): + """ + Expects + LAGO_API_BASE, + LAGO_API_KEY, + LAGO_API_EVENT_CODE, + + Optional: + LAGO_API_CHARGE_BY + + in the environment + """ + missing_keys = [] + if os.getenv("LAGO_API_KEY", None) is None: + missing_keys.append("LAGO_API_KEY") + + if os.getenv("LAGO_API_BASE", None) is None: + missing_keys.append("LAGO_API_BASE") + + if os.getenv("LAGO_API_EVENT_CODE", None) is None: + missing_keys.append("LAGO_API_EVENT_CODE") + + if len(missing_keys) > 0: + raise Exception("Missing keys={} in environment.".format(missing_keys)) + + def _common_logic(self, kwargs: dict, response_obj) -> dict: + call_id = response_obj.get("id", kwargs.get("litellm_call_id")) + dt = get_utc_datetime().isoformat() + cost = kwargs.get("response_cost", None) + model = kwargs.get("model") + usage = {} + + if ( + isinstance(response_obj, litellm.ModelResponse) + or isinstance(response_obj, litellm.EmbeddingResponse) + ) and hasattr(response_obj, "usage"): + usage = { + "prompt_tokens": response_obj["usage"].get("prompt_tokens", 0), + "completion_tokens": response_obj["usage"].get("completion_tokens", 0), + "total_tokens": response_obj["usage"].get("total_tokens"), + } + + litellm_params = kwargs.get("litellm_params", {}) or {} + proxy_server_request = litellm_params.get("proxy_server_request") or {} + end_user_id = proxy_server_request.get("body", {}).get("user", None) + user_id = litellm_params["metadata"].get("user_api_key_user_id", None) + team_id = litellm_params["metadata"].get("user_api_key_team_id", None) + org_id = litellm_params["metadata"].get("user_api_key_org_id", None) + + charge_by: Literal["end_user_id", "team_id", "user_id"] = "end_user_id" + external_customer_id: Optional[str] = None + + if os.getenv("LAGO_API_CHARGE_BY", None) is not None and isinstance( + os.environ["LAGO_API_CHARGE_BY"], str + ): + if os.environ["LAGO_API_CHARGE_BY"] in [ + "end_user_id", + "user_id", + "team_id", + ]: + charge_by = os.environ["LAGO_API_CHARGE_BY"] # type: ignore + else: + raise Exception("invalid LAGO_API_CHARGE_BY set") + + if charge_by == "end_user_id": + external_customer_id = end_user_id + elif charge_by == "team_id": + external_customer_id = team_id + elif charge_by == "user_id": + external_customer_id = user_id + + if external_customer_id is None: + raise Exception("External Customer ID is not set") + + return { + "event": { + "transaction_id": str(uuid.uuid4()), + "external_customer_id": external_customer_id, + "code": os.getenv("LAGO_API_EVENT_CODE"), + "properties": {"model": model, "response_cost": cost, **usage}, + } + } + + def log_success_event(self, kwargs, response_obj, start_time, end_time): + _url = os.getenv("LAGO_API_BASE") + assert _url is not None and isinstance( + _url, str + ), "LAGO_API_BASE missing or not set correctly. LAGO_API_BASE={}".format(_url) + if _url.endswith("/"): + _url += "api/v1/events" + else: + _url += "/api/v1/events" + + api_key = os.getenv("LAGO_API_KEY") + + _data = self._common_logic(kwargs=kwargs, response_obj=response_obj) + _headers = { + "Content-Type": "application/json", + "Authorization": "Bearer {}".format(api_key), + } + + try: + response = self.sync_http_handler.post( + url=_url, + data=json.dumps(_data), + headers=_headers, + ) + + response.raise_for_status() + except Exception as e: + if hasattr(response, "text"): + litellm.print_verbose(f"\nError Message: {response.text}") + raise e + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + try: + _url = os.getenv("LAGO_API_BASE") + assert _url is not None and isinstance( + _url, str + ), "LAGO_API_BASE missing or not set correctly. LAGO_API_BASE={}".format( + _url + ) + if _url.endswith("/"): + _url += "api/v1/events" + else: + _url += "/api/v1/events" + + api_key = os.getenv("LAGO_API_KEY") + + _data = self._common_logic(kwargs=kwargs, response_obj=response_obj) + _headers = { + "Content-Type": "application/json", + "Authorization": "Bearer {}".format(api_key), + } + except Exception as e: + raise e + + response: Optional[httpx.Response] = None + try: + response = await self.async_http_handler.post( + url=_url, + data=json.dumps(_data), + headers=_headers, + ) + + response.raise_for_status() + except Exception as e: + if response is not None and hasattr(response, "text"): + litellm.print_verbose(f"\nError Message: {response.text}") + raise e diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index caf5437b24..f4a581eb9f 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -1,8 +1,6 @@ #### What this does #### # On success, logs events to Langfuse -import dotenv, os - -dotenv.load_dotenv() # Loading env variables using dotenv +import os import copy import traceback from packaging.version import Version @@ -262,7 +260,23 @@ class LangFuseLogger: try: tags = [] - metadata = copy.deepcopy(metadata) # Avoid modifying the original metadata + try: + metadata = copy.deepcopy( + metadata + ) # Avoid modifying the original metadata + except: + new_metadata = {} + for key, value in metadata.items(): + if ( + isinstance(value, list) + or isinstance(value, dict) + or isinstance(value, str) + or isinstance(value, int) + or isinstance(value, float) + ): + new_metadata[key] = copy.deepcopy(value) + metadata = new_metadata + supports_tags = Version(langfuse.version.__version__) >= Version("2.6.3") supports_prompt = Version(langfuse.version.__version__) >= Version("2.7.3") supports_costs = Version(langfuse.version.__version__) >= Version("2.7.3") @@ -307,6 +321,9 @@ class LangFuseLogger: trace_id = clean_metadata.pop("trace_id", None) existing_trace_id = clean_metadata.pop("existing_trace_id", None) update_trace_keys = clean_metadata.pop("update_trace_keys", []) + debug = clean_metadata.pop("debug_langfuse", None) + mask_input = clean_metadata.pop("mask_input", False) + mask_output = clean_metadata.pop("mask_output", False) if trace_name is None and existing_trace_id is None: # just log `litellm-{call_type}` as the trace name @@ -334,18 +351,19 @@ class LangFuseLogger: # Special keys that are found in the function arguments and not the metadata if "input" in update_trace_keys: - trace_params["input"] = input + trace_params["input"] = input if not mask_input else "redacted-by-litellm" if "output" in update_trace_keys: - trace_params["output"] = output + trace_params["output"] = output if not mask_output else "redacted-by-litellm" else: # don't overwrite an existing trace trace_params = { "id": trace_id, "name": trace_name, "session_id": session_id, - "input": input, + "input": input if not mask_input else "redacted-by-litellm", "version": clean_metadata.pop( "trace_version", clean_metadata.get("version", None) ), # If provided just version, it will applied to the trace as well, if applied a trace version it will take precedence + "user_id": user_id, } for key in list( filter(lambda key: key.startswith("trace_"), clean_metadata.keys()) @@ -357,7 +375,14 @@ class LangFuseLogger: if level == "ERROR": trace_params["status_message"] = output else: - trace_params["output"] = output + trace_params["output"] = output if not mask_output else "redacted-by-litellm" + + if debug == True or (isinstance(debug, str) and debug.lower() == "true"): + if "metadata" in trace_params: + # log the raw_metadata in the trace + trace_params["metadata"]["metadata_passed_to_litellm"] = metadata + else: + trace_params["metadata"] = {"metadata_passed_to_litellm": metadata} cost = kwargs.get("response_cost", None) print_verbose(f"trace: {cost}") @@ -409,7 +434,6 @@ class LangFuseLogger: "url": url, "headers": clean_headers, } - trace = self.Langfuse.trace(**trace_params) generation_id = None @@ -441,8 +465,8 @@ class LangFuseLogger: "end_time": end_time, "model": kwargs["model"], "model_parameters": optional_params, - "input": input, - "output": output, + "input": input if not mask_input else "redacted-by-litellm", + "output": output if not mask_output else "redacted-by-litellm", "usage": usage, "metadata": clean_metadata, "level": level, @@ -450,7 +474,29 @@ class LangFuseLogger: } if supports_prompt: - generation_params["prompt"] = clean_metadata.pop("prompt", None) + user_prompt = clean_metadata.pop("prompt", None) + if user_prompt is None: + pass + elif isinstance(user_prompt, dict): + from langfuse.model import ( + TextPromptClient, + ChatPromptClient, + Prompt_Text, + Prompt_Chat, + ) + + if user_prompt.get("type", "") == "chat": + _prompt_chat = Prompt_Chat(**user_prompt) + generation_params["prompt"] = ChatPromptClient( + prompt=_prompt_chat + ) + elif user_prompt.get("type", "") == "text": + _prompt_text = Prompt_Text(**user_prompt) + generation_params["prompt"] = TextPromptClient( + prompt=_prompt_text + ) + else: + generation_params["prompt"] = user_prompt if output is not None and isinstance(output, str) and level == "ERROR": generation_params["status_message"] = output diff --git a/litellm/integrations/langsmith.py b/litellm/integrations/langsmith.py index 8a0fb38522..92e4402155 100644 --- a/litellm/integrations/langsmith.py +++ b/litellm/integrations/langsmith.py @@ -3,8 +3,6 @@ import dotenv, os # type: ignore import requests # type: ignore from datetime import datetime - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import asyncio import types diff --git a/litellm/integrations/lunary.py b/litellm/integrations/lunary.py index 6ddf2ca599..2e16e44a14 100644 --- a/litellm/integrations/lunary.py +++ b/litellm/integrations/lunary.py @@ -2,14 +2,10 @@ # On success + failure, log events to lunary.ai from datetime import datetime, timezone import traceback -import dotenv import importlib -import sys import packaging -dotenv.load_dotenv() - # convert to {completion: xx, tokens: xx} def parse_usage(usage): @@ -18,13 +14,33 @@ def parse_usage(usage): "prompt": usage["prompt_tokens"] if "prompt_tokens" in usage else 0, } +def parse_tool_calls(tool_calls): + if tool_calls is None: + return None + + def clean_tool_call(tool_call): + + serialized = { + "type": tool_call.type, + "id": tool_call.id, + "function": { + "name": tool_call.function.name, + "arguments": tool_call.function.arguments, + } + } + + return serialized + + return [clean_tool_call(tool_call) for tool_call in tool_calls] + def parse_messages(input): + if input is None: return None def clean_message(message): - # if is strin, return as is + # if is string, return as is if isinstance(message, str): return message @@ -38,9 +54,7 @@ def parse_messages(input): # Only add tool_calls and function_call to res if they are set if message.get("tool_calls"): - serialized["tool_calls"] = message.get("tool_calls") - if message.get("function_call"): - serialized["function_call"] = message.get("function_call") + serialized["tool_calls"] = parse_tool_calls(message.get("tool_calls")) return serialized @@ -62,14 +76,16 @@ class LunaryLogger: version = importlib.metadata.version("lunary") # if version < 0.1.43 then raise ImportError if packaging.version.Version(version) < packaging.version.Version("0.1.43"): - print( + print( # noqa "Lunary version outdated. Required: >= 0.1.43. Upgrade via 'pip install lunary --upgrade'" ) raise ImportError self.lunary_client = lunary except ImportError: - print("Lunary not installed. Please install it using 'pip install lunary'") + print( # noqa + "Lunary not installed. Please install it using 'pip install lunary'" + ) # noqa raise ImportError def log_event( @@ -93,8 +109,13 @@ class LunaryLogger: print_verbose(f"Lunary Logging - Logging request for model {model}") litellm_params = kwargs.get("litellm_params", {}) + optional_params = kwargs.get("optional_params", {}) metadata = litellm_params.get("metadata", {}) or {} + if optional_params: + # merge into extra + extra = {**extra, **optional_params} + tags = litellm_params.pop("tags", None) or [] if extra: @@ -104,7 +125,7 @@ class LunaryLogger: # keep only serializable types for param, value in extra.items(): - if not isinstance(value, (str, int, bool, float)): + if not isinstance(value, (str, int, bool, float)) and param != "tools": try: extra[param] = str(value) except: @@ -140,7 +161,7 @@ class LunaryLogger: metadata=metadata, runtime="litellm", tags=tags, - extra=extra, + params=extra, ) self.lunary_client.track_event( diff --git a/litellm/integrations/openmeter.py b/litellm/integrations/openmeter.py index a454739d54..2c470d6f49 100644 --- a/litellm/integrations/openmeter.py +++ b/litellm/integrations/openmeter.py @@ -3,8 +3,6 @@ import dotenv, os, json import litellm - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback from litellm.integrations.custom_logger import CustomLogger from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler diff --git a/litellm/integrations/prometheus.py b/litellm/integrations/prometheus.py index 577946ce18..6fbc6ca4ce 100644 --- a/litellm/integrations/prometheus.py +++ b/litellm/integrations/prometheus.py @@ -4,8 +4,6 @@ import dotenv, os import requests # type: ignore - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/prometheus_services.py b/litellm/integrations/prometheus_services.py index d276bb85ba..8fce8930de 100644 --- a/litellm/integrations/prometheus_services.py +++ b/litellm/integrations/prometheus_services.py @@ -5,8 +5,6 @@ import dotenv, os import requests # type: ignore - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/prompt_layer.py b/litellm/integrations/prompt_layer.py index ce610e1ef1..531ed75fe0 100644 --- a/litellm/integrations/prompt_layer.py +++ b/litellm/integrations/prompt_layer.py @@ -3,8 +3,6 @@ import dotenv, os import requests # type: ignore from pydantic import BaseModel - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback diff --git a/litellm/integrations/s3.py b/litellm/integrations/s3.py index d31b158402..d131e44f0e 100644 --- a/litellm/integrations/s3.py +++ b/litellm/integrations/s3.py @@ -1,9 +1,7 @@ #### What this does #### # On success + failure, log events to Supabase -import dotenv, os - -dotenv.load_dotenv() # Loading env variables using dotenv +import os import traceback import datetime, subprocess, sys import litellm, uuid diff --git a/litellm/integrations/slack_alerting.py b/litellm/integrations/slack_alerting.py index 07c3585f08..40258af6b8 100644 --- a/litellm/integrations/slack_alerting.py +++ b/litellm/integrations/slack_alerting.py @@ -2,8 +2,6 @@ # Class for sending Slack Alerts # import dotenv, os from litellm.proxy._types import UserAPIKeyAuth - -dotenv.load_dotenv() # Loading env variables using dotenv from litellm._logging import verbose_logger, verbose_proxy_logger import litellm, threading from typing import List, Literal, Any, Union, Optional, Dict @@ -14,7 +12,7 @@ from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler import datetime from pydantic import BaseModel from enum import Enum -from datetime import datetime as dt, timedelta +from datetime import datetime as dt, timedelta, timezone from litellm.integrations.custom_logger import CustomLogger import random @@ -33,7 +31,10 @@ class LiteLLMBase(BaseModel): class SlackAlertingArgs(LiteLLMBase): - daily_report_frequency: int = 12 * 60 * 60 # 12 hours + default_daily_report_frequency: int = 12 * 60 * 60 # 12 hours + daily_report_frequency: int = int( + os.getenv("SLACK_DAILY_REPORT_FREQUENCY", default_daily_report_frequency) + ) report_check_interval: int = 5 * 60 # 5 minutes @@ -78,16 +79,14 @@ class SlackAlerting(CustomLogger): internal_usage_cache: Optional[DualCache] = None, alerting_threshold: float = 300, # threshold for slow / hanging llm responses (in seconds) alerting: Optional[List] = [], - alert_types: Optional[ - List[ - Literal[ - "llm_exceptions", - "llm_too_slow", - "llm_requests_hanging", - "budget_alerts", - "db_exceptions", - "daily_reports", - ] + alert_types: List[ + Literal[ + "llm_exceptions", + "llm_too_slow", + "llm_requests_hanging", + "budget_alerts", + "db_exceptions", + "daily_reports", ] ] = [ "llm_exceptions", @@ -242,6 +241,8 @@ class SlackAlerting(CustomLogger): end_time=end_time, ) ) + if litellm.turn_off_message_logging: + messages = "Message not logged. `litellm.turn_off_message_logging=True`." request_info = f"\nRequest Model: `{model}`\nAPI Base: `{api_base}`\nMessages: `{messages}`" slow_message = f"`Responses are slow - {round(time_difference_float,2)}s response time > Alerting threshold: {self.alerting_threshold}s`" if time_difference_float > self.alerting_threshold: @@ -348,8 +349,9 @@ class SlackAlerting(CustomLogger): all_none = True for val in combined_metrics_values: - if val is not None: + if val is not None and val > 0: all_none = False + break if all_none: return False @@ -367,12 +369,15 @@ class SlackAlerting(CustomLogger): for value in failed_request_values ] - ## Get the indices of top 5 keys with the highest numerical values (ignoring None values) + ## Get the indices of top 5 keys with the highest numerical values (ignoring None and 0 values) top_5_failed = sorted( range(len(replaced_failed_values)), key=lambda i: replaced_failed_values[i], reverse=True, )[:5] + top_5_failed = [ + index for index in top_5_failed if replaced_failed_values[index] > 0 + ] # find top 5 slowest # Replace None values with a placeholder value (-1 in this case) @@ -382,17 +387,22 @@ class SlackAlerting(CustomLogger): for value in latency_values ] - # Get the indices of top 5 values with the highest numerical values (ignoring None values) + # Get the indices of top 5 values with the highest numerical values (ignoring None and 0 values) top_5_slowest = sorted( range(len(replaced_slowest_values)), key=lambda i: replaced_slowest_values[i], reverse=True, )[:5] + top_5_slowest = [ + index for index in top_5_slowest if replaced_slowest_values[index] > 0 + ] # format alert -> return the litellm model name + api base message = f"\n\nHere are today's key metrics ๐Ÿ“ˆ: \n\n" - message += "\n\n*โ—๏ธ Top 5 Deployments with Most Failed Requests:*\n\n" + message += "\n\n*โ—๏ธ Top Deployments with Most Failed Requests:*\n\n" + if not top_5_failed: + message += "\tNone\n" for i in range(len(top_5_failed)): key = failed_request_keys[top_5_failed[i]].split(":")[0] _deployment = router.get_model_info(key) @@ -412,7 +422,9 @@ class SlackAlerting(CustomLogger): value = replaced_failed_values[top_5_failed[i]] message += f"\t{i+1}. Deployment: `{deployment_name}`, Failed Requests: `{value}`, API Base: `{api_base}`\n" - message += "\n\n*๐Ÿ˜… Top 5 Slowest Deployments:*\n\n" + message += "\n\n*๐Ÿ˜… Top Slowest Deployments:*\n\n" + if not top_5_slowest: + message += "\tNone\n" for i in range(len(top_5_slowest)): key = latency_keys[top_5_slowest[i]].split(":")[0] _deployment = router.get_model_info(key) @@ -464,6 +476,11 @@ class SlackAlerting(CustomLogger): messages = messages[:100] except: messages = "" + + if litellm.turn_off_message_logging: + messages = ( + "Message not logged. `litellm.turn_off_message_logging=True`." + ) request_info = f"\nRequest Model: `{model}`\nMessages: `{messages}`" else: request_info = "" @@ -814,14 +831,6 @@ Model Info: updated_at=litellm.utils.get_utc_datetime(), ) ) - if "llm_exceptions" in self.alert_types: - original_exception = kwargs.get("exception", None) - - await self.send_alert( - message="LLM API Failure - " + str(original_exception), - level="High", - alert_type="llm_exceptions", - ) async def _run_scheduler_helper(self, llm_router) -> bool: """ @@ -844,15 +853,22 @@ Model Info: value=_current_time, ) else: - # check if current time - interval >= time last sent - delta = current_time - timedelta( - seconds=self.alerting_args.daily_report_frequency - ) - + # Check if current time - interval >= time last sent + delta_naive = timedelta(seconds=self.alerting_args.daily_report_frequency) if isinstance(report_sent, str): report_sent = dt.fromisoformat(report_sent) - if delta >= report_sent: + # Ensure report_sent is an aware datetime object + if report_sent.tzinfo is None: + report_sent = report_sent.replace(tzinfo=timezone.utc) + + # Calculate delta as an aware datetime object with the same timezone as report_sent + delta = report_sent - delta_naive + + current_time_utc = current_time.astimezone(timezone.utc) + delta_utc = delta.astimezone(timezone.utc) + + if current_time_utc >= delta_utc: # Sneak in the reporting logic here await self.send_daily_reports(router=llm_router) # Also, don't forget to update the report_sent time after sending the report! @@ -885,3 +901,99 @@ Model Info: ) # shuffle to prevent collisions await asyncio.sleep(interval) return + + async def send_weekly_spend_report(self): + """ """ + try: + from litellm.proxy.proxy_server import _get_spend_report_for_time_range + + todays_date = datetime.datetime.now().date() + week_before = todays_date - datetime.timedelta(days=7) + + weekly_spend_per_team, weekly_spend_per_tag = ( + await _get_spend_report_for_time_range( + start_date=week_before.strftime("%Y-%m-%d"), + end_date=todays_date.strftime("%Y-%m-%d"), + ) + ) + + _weekly_spend_message = f"*๐Ÿ’ธ Weekly Spend Report for `{week_before.strftime('%m-%d-%Y')} - {todays_date.strftime('%m-%d-%Y')}` *\n" + + if weekly_spend_per_team is not None: + _weekly_spend_message += "\n*Team Spend Report:*\n" + for spend in weekly_spend_per_team: + _team_spend = spend["total_spend"] + _team_spend = float(_team_spend) + # round to 4 decimal places + _team_spend = round(_team_spend, 4) + _weekly_spend_message += ( + f"Team: `{spend['team_alias']}` | Spend: `${_team_spend}`\n" + ) + + if weekly_spend_per_tag is not None: + _weekly_spend_message += "\n*Tag Spend Report:*\n" + for spend in weekly_spend_per_tag: + _tag_spend = spend["total_spend"] + _tag_spend = float(_tag_spend) + # round to 4 decimal places + _tag_spend = round(_tag_spend, 4) + _weekly_spend_message += f"Tag: `{spend['individual_request_tag']}` | Spend: `${_tag_spend}`\n" + + await self.send_alert( + message=_weekly_spend_message, + level="Low", + alert_type="daily_reports", + ) + except Exception as e: + verbose_proxy_logger.error("Error sending weekly spend report", e) + + async def send_monthly_spend_report(self): + """ """ + try: + from calendar import monthrange + + from litellm.proxy.proxy_server import _get_spend_report_for_time_range + + todays_date = datetime.datetime.now().date() + first_day_of_month = todays_date.replace(day=1) + _, last_day_of_month = monthrange(todays_date.year, todays_date.month) + last_day_of_month = first_day_of_month + datetime.timedelta( + days=last_day_of_month - 1 + ) + + monthly_spend_per_team, monthly_spend_per_tag = ( + await _get_spend_report_for_time_range( + start_date=first_day_of_month.strftime("%Y-%m-%d"), + end_date=last_day_of_month.strftime("%Y-%m-%d"), + ) + ) + + _spend_message = f"*๐Ÿ’ธ Monthly Spend Report for `{first_day_of_month.strftime('%m-%d-%Y')} - {last_day_of_month.strftime('%m-%d-%Y')}` *\n" + + if monthly_spend_per_team is not None: + _spend_message += "\n*Team Spend Report:*\n" + for spend in monthly_spend_per_team: + _team_spend = spend["total_spend"] + _team_spend = float(_team_spend) + # round to 4 decimal places + _team_spend = round(_team_spend, 4) + _spend_message += ( + f"Team: `{spend['team_alias']}` | Spend: `${_team_spend}`\n" + ) + + if monthly_spend_per_tag is not None: + _spend_message += "\n*Tag Spend Report:*\n" + for spend in monthly_spend_per_tag: + _tag_spend = spend["total_spend"] + _tag_spend = float(_tag_spend) + # round to 4 decimal places + _tag_spend = round(_tag_spend, 4) + _spend_message += f"Tag: `{spend['individual_request_tag']}` | Spend: `${_tag_spend}`\n" + + await self.send_alert( + message=_spend_message, + level="Low", + alert_type="daily_reports", + ) + except Exception as e: + verbose_proxy_logger.error("Error sending weekly spend report", e) diff --git a/litellm/integrations/supabase.py b/litellm/integrations/supabase.py index 58beba8a3d..4e6bf517f3 100644 --- a/litellm/integrations/supabase.py +++ b/litellm/integrations/supabase.py @@ -3,8 +3,6 @@ import dotenv, os import requests # type: ignore - -dotenv.load_dotenv() # Loading env variables using dotenv import traceback import datetime, subprocess, sys import litellm diff --git a/litellm/integrations/weights_biases.py b/litellm/integrations/weights_biases.py index 53e6070a5b..a56233b22f 100644 --- a/litellm/integrations/weights_biases.py +++ b/litellm/integrations/weights_biases.py @@ -21,11 +21,11 @@ try: # contains a (known) object attribute object: Literal["chat.completion", "edit", "text_completion"] - def __getitem__(self, key: K) -> V: - ... # pragma: no cover + def __getitem__(self, key: K) -> V: ... # noqa - def get(self, key: K, default: Optional[V] = None) -> Optional[V]: - ... # pragma: no cover + def get( # noqa + self, key: K, default: Optional[V] = None + ) -> Optional[V]: ... # pragma: no cover class OpenAIRequestResponseResolver: def __call__( @@ -173,12 +173,11 @@ except: #### What this does #### # On success, logs events to Langfuse -import dotenv, os +import os import requests import requests from datetime import datetime -dotenv.load_dotenv() # Loading env variables using dotenv import traceback diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index 818c4ecb3a..97a473a2ee 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -3,7 +3,7 @@ import json from enum import Enum import requests, copy # type: ignore import time -from typing import Callable, Optional, List +from typing import Callable, Optional, List, Union from litellm.utils import ModelResponse, Usage, map_finish_reason, CustomStreamWrapper import litellm from .prompt_templates.factory import prompt_factory, custom_prompt @@ -151,19 +151,135 @@ class AnthropicChatCompletion(BaseLLM): def __init__(self) -> None: super().__init__() + def process_streaming_response( + self, + model: str, + response: Union[requests.Response, httpx.Response], + model_response: ModelResponse, + stream: bool, + logging_obj: litellm.utils.Logging, + optional_params: dict, + api_key: str, + data: Union[dict, str], + messages: List, + print_verbose, + encoding, + ) -> CustomStreamWrapper: + """ + Return stream object for tool-calling + streaming + """ + ## LOGGING + logging_obj.post_call( + input=messages, + api_key=api_key, + original_response=response.text, + additional_args={"complete_input_dict": data}, + ) + print_verbose(f"raw model_response: {response.text}") + ## RESPONSE OBJECT + try: + completion_response = response.json() + except: + raise AnthropicError( + message=response.text, status_code=response.status_code + ) + text_content = "" + tool_calls = [] + for content in completion_response["content"]: + if content["type"] == "text": + text_content += content["text"] + ## TOOL CALLING + elif content["type"] == "tool_use": + tool_calls.append( + { + "id": content["id"], + "type": "function", + "function": { + "name": content["name"], + "arguments": json.dumps(content["input"]), + }, + } + ) + if "error" in completion_response: + raise AnthropicError( + message=str(completion_response["error"]), + status_code=response.status_code, + ) + _message = litellm.Message( + tool_calls=tool_calls, + content=text_content or None, + ) + model_response.choices[0].message = _message # type: ignore + model_response._hidden_params["original_response"] = completion_response[ + "content" + ] # allow user to access raw anthropic tool calling response + + model_response.choices[0].finish_reason = map_finish_reason( + completion_response["stop_reason"] + ) + + print_verbose("INSIDE ANTHROPIC STREAMING TOOL CALLING CONDITION BLOCK") + # return an iterator + streaming_model_response = ModelResponse(stream=True) + streaming_model_response.choices[0].finish_reason = model_response.choices[ # type: ignore + 0 + ].finish_reason + # streaming_model_response.choices = [litellm.utils.StreamingChoices()] + streaming_choice = litellm.utils.StreamingChoices() + streaming_choice.index = model_response.choices[0].index + _tool_calls = [] + print_verbose( + f"type of model_response.choices[0]: {type(model_response.choices[0])}" + ) + print_verbose(f"type of streaming_choice: {type(streaming_choice)}") + if isinstance(model_response.choices[0], litellm.Choices): + if getattr( + model_response.choices[0].message, "tool_calls", None + ) is not None and isinstance( + model_response.choices[0].message.tool_calls, list + ): + for tool_call in model_response.choices[0].message.tool_calls: + _tool_call = {**tool_call.dict(), "index": 0} + _tool_calls.append(_tool_call) + delta_obj = litellm.utils.Delta( + content=getattr(model_response.choices[0].message, "content", None), + role=model_response.choices[0].message.role, + tool_calls=_tool_calls, + ) + streaming_choice.delta = delta_obj + streaming_model_response.choices = [streaming_choice] + completion_stream = ModelResponseIterator( + model_response=streaming_model_response + ) + print_verbose( + "Returns anthropic CustomStreamWrapper with 'cached_response' streaming object" + ) + return CustomStreamWrapper( + completion_stream=completion_stream, + model=model, + custom_llm_provider="cached_response", + logging_obj=logging_obj, + ) + else: + raise AnthropicError( + status_code=422, + message="Unprocessable response object - {}".format(response.text), + ) + def process_response( self, - model, - response, - model_response, - _is_function_call, - stream, - logging_obj, - api_key, - data, - messages, + model: str, + response: Union[requests.Response, httpx.Response], + model_response: ModelResponse, + stream: bool, + logging_obj: litellm.utils.Logging, + optional_params: dict, + api_key: str, + data: Union[dict, str], + messages: List, print_verbose, - ): + encoding, + ) -> ModelResponse: ## LOGGING logging_obj.post_call( input=messages, @@ -216,51 +332,6 @@ class AnthropicChatCompletion(BaseLLM): completion_response["stop_reason"] ) - print_verbose(f"_is_function_call: {_is_function_call}; stream: {stream}") - if _is_function_call and stream: - print_verbose("INSIDE ANTHROPIC STREAMING TOOL CALLING CONDITION BLOCK") - # return an iterator - streaming_model_response = ModelResponse(stream=True) - streaming_model_response.choices[0].finish_reason = model_response.choices[ - 0 - ].finish_reason - # streaming_model_response.choices = [litellm.utils.StreamingChoices()] - streaming_choice = litellm.utils.StreamingChoices() - streaming_choice.index = model_response.choices[0].index - _tool_calls = [] - print_verbose( - f"type of model_response.choices[0]: {type(model_response.choices[0])}" - ) - print_verbose(f"type of streaming_choice: {type(streaming_choice)}") - if isinstance(model_response.choices[0], litellm.Choices): - if getattr( - model_response.choices[0].message, "tool_calls", None - ) is not None and isinstance( - model_response.choices[0].message.tool_calls, list - ): - for tool_call in model_response.choices[0].message.tool_calls: - _tool_call = {**tool_call.dict(), "index": 0} - _tool_calls.append(_tool_call) - delta_obj = litellm.utils.Delta( - content=getattr(model_response.choices[0].message, "content", None), - role=model_response.choices[0].message.role, - tool_calls=_tool_calls, - ) - streaming_choice.delta = delta_obj - streaming_model_response.choices = [streaming_choice] - completion_stream = ModelResponseIterator( - model_response=streaming_model_response - ) - print_verbose( - "Returns anthropic CustomStreamWrapper with 'cached_response' streaming object" - ) - return CustomStreamWrapper( - completion_stream=completion_stream, - model=model, - custom_llm_provider="cached_response", - logging_obj=logging_obj, - ) - ## CALCULATING USAGE prompt_tokens = completion_response["usage"]["input_tokens"] completion_tokens = completion_response["usage"]["output_tokens"] @@ -273,7 +344,7 @@ class AnthropicChatCompletion(BaseLLM): completion_tokens=completion_tokens, total_tokens=total_tokens, ) - model_response.usage = usage + setattr(model_response, "usage", usage) # type: ignore return model_response async def acompletion_stream_function( @@ -289,7 +360,7 @@ class AnthropicChatCompletion(BaseLLM): logging_obj, stream, _is_function_call, - data=None, + data: dict, optional_params=None, litellm_params=None, logger_fn=None, @@ -331,29 +402,44 @@ class AnthropicChatCompletion(BaseLLM): logging_obj, stream, _is_function_call, - data=None, - optional_params=None, + data: dict, + optional_params: dict, litellm_params=None, logger_fn=None, headers={}, - ): + ) -> Union[ModelResponse, CustomStreamWrapper]: self.async_handler = AsyncHTTPHandler( timeout=httpx.Timeout(timeout=600.0, connect=5.0) ) response = await self.async_handler.post( api_base, headers=headers, data=json.dumps(data) ) + if stream and _is_function_call: + return self.process_streaming_response( + model=model, + response=response, + model_response=model_response, + stream=stream, + logging_obj=logging_obj, + api_key=api_key, + data=data, + messages=messages, + print_verbose=print_verbose, + optional_params=optional_params, + encoding=encoding, + ) return self.process_response( model=model, response=response, model_response=model_response, - _is_function_call=_is_function_call, stream=stream, logging_obj=logging_obj, api_key=api_key, data=data, messages=messages, print_verbose=print_verbose, + optional_params=optional_params, + encoding=encoding, ) def completion( @@ -367,7 +453,7 @@ class AnthropicChatCompletion(BaseLLM): encoding, api_key, logging_obj, - optional_params=None, + optional_params: dict, acompletion=None, litellm_params=None, logger_fn=None, @@ -526,17 +612,33 @@ class AnthropicChatCompletion(BaseLLM): raise AnthropicError( status_code=response.status_code, message=response.text ) + + if stream and _is_function_call: + return self.process_streaming_response( + model=model, + response=response, + model_response=model_response, + stream=stream, + logging_obj=logging_obj, + api_key=api_key, + data=data, + messages=messages, + print_verbose=print_verbose, + optional_params=optional_params, + encoding=encoding, + ) return self.process_response( model=model, response=response, model_response=model_response, - _is_function_call=_is_function_call, stream=stream, logging_obj=logging_obj, api_key=api_key, data=data, messages=messages, print_verbose=print_verbose, + optional_params=optional_params, + encoding=encoding, ) def embedding(self): diff --git a/litellm/llms/anthropic_text.py b/litellm/llms/anthropic_text.py index cef31c2693..0093d9f353 100644 --- a/litellm/llms/anthropic_text.py +++ b/litellm/llms/anthropic_text.py @@ -100,7 +100,7 @@ class AnthropicTextCompletion(BaseLLM): def __init__(self) -> None: super().__init__() - def process_response( + def _process_response( self, model_response: ModelResponse, response, encoding, prompt: str, model: str ): ## RESPONSE OBJECT @@ -171,7 +171,7 @@ class AnthropicTextCompletion(BaseLLM): additional_args={"complete_input_dict": data}, ) - response = self.process_response( + response = self._process_response( model_response=model_response, response=response, encoding=encoding, @@ -330,7 +330,7 @@ class AnthropicTextCompletion(BaseLLM): ) print_verbose(f"raw model_response: {response.text}") - response = self.process_response( + response = self._process_response( model_response=model_response, response=response, encoding=encoding, diff --git a/litellm/llms/azure.py b/litellm/llms/azure.py index f416d14377..02fe4a08f2 100644 --- a/litellm/llms/azure.py +++ b/litellm/llms/azure.py @@ -8,14 +8,16 @@ from litellm.utils import ( CustomStreamWrapper, convert_to_model_response_object, TranscriptionResponse, + get_secret, ) -from typing import Callable, Optional, BinaryIO +from typing import Callable, Optional, BinaryIO, List from litellm import OpenAIConfig import litellm, json import httpx # type: ignore from .custom_httpx.azure_dall_e_2 import CustomHTTPTransport, AsyncCustomHTTPTransport from openai import AzureOpenAI, AsyncAzureOpenAI import uuid +import os class AzureOpenAIError(Exception): @@ -105,6 +107,12 @@ class AzureOpenAIConfig(OpenAIConfig): optional_params["azure_ad_token"] = value return optional_params + def get_eu_regions(self) -> List[str]: + """ + Source: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-model-availability + """ + return ["europe", "sweden", "switzerland", "france", "uk"] + def select_azure_base_url_or_endpoint(azure_client_params: dict): # azure_client_params = { @@ -126,6 +134,51 @@ def select_azure_base_url_or_endpoint(azure_client_params: dict): return azure_client_params +def get_azure_ad_token_from_oidc(azure_ad_token: str): + azure_client_id = os.getenv("AZURE_CLIENT_ID", None) + azure_tenant = os.getenv("AZURE_TENANT_ID", None) + + if azure_client_id is None or azure_tenant is None: + raise AzureOpenAIError( + status_code=422, + message="AZURE_CLIENT_ID and AZURE_TENANT_ID must be set", + ) + + oidc_token = get_secret(azure_ad_token) + + if oidc_token is None: + raise AzureOpenAIError( + status_code=401, + message="OIDC token could not be retrieved from secret manager.", + ) + + req_token = httpx.post( + f"https://login.microsoftonline.com/{azure_tenant}/oauth2/v2.0/token", + data={ + "client_id": azure_client_id, + "grant_type": "client_credentials", + "scope": "https://cognitiveservices.azure.com/.default", + "client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer", + "client_assertion": oidc_token, + }, + ) + + if req_token.status_code != 200: + raise AzureOpenAIError( + status_code=req_token.status_code, + message=req_token.text, + ) + + possible_azure_ad_token = req_token.json().get("access_token", None) + + if possible_azure_ad_token is None: + raise AzureOpenAIError( + status_code=422, message="Azure AD Token not returned" + ) + + return possible_azure_ad_token + + class AzureChatCompletion(BaseLLM): def __init__(self) -> None: super().__init__() @@ -137,6 +190,8 @@ class AzureChatCompletion(BaseLLM): if api_key is not None: headers["api-key"] = api_key elif azure_ad_token is not None: + if azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) headers["Authorization"] = f"Bearer {azure_ad_token}" return headers @@ -189,6 +244,9 @@ class AzureChatCompletion(BaseLLM): if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: + if azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) + azure_client_params["azure_ad_token"] = azure_ad_token if acompletion is True: @@ -276,6 +334,8 @@ class AzureChatCompletion(BaseLLM): if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: + if azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) @@ -351,6 +411,8 @@ class AzureChatCompletion(BaseLLM): if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: + if azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token # setting Azure client @@ -422,6 +484,8 @@ class AzureChatCompletion(BaseLLM): if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: + if azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AzureOpenAI(**azure_client_params) @@ -478,6 +542,8 @@ class AzureChatCompletion(BaseLLM): if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: + if azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if client is None: azure_client = AsyncAzureOpenAI(**azure_client_params) @@ -599,6 +665,8 @@ class AzureChatCompletion(BaseLLM): if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: + if azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token ## LOGGING @@ -755,6 +823,8 @@ class AzureChatCompletion(BaseLLM): if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: + if azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if aimg_generation == True: @@ -833,6 +903,8 @@ class AzureChatCompletion(BaseLLM): if api_key is not None: azure_client_params["api_key"] = api_key elif azure_ad_token is not None: + if azure_ad_token.startswith("oidc/"): + azure_ad_token = get_azure_ad_token_from_oidc(azure_ad_token) azure_client_params["azure_ad_token"] = azure_ad_token if max_retries is not None: diff --git a/litellm/llms/base.py b/litellm/llms/base.py index 62b8069f06..d940d94714 100644 --- a/litellm/llms/base.py +++ b/litellm/llms/base.py @@ -1,12 +1,32 @@ ## This is a template base class to be used for adding new LLM providers via API calls import litellm -import httpx -from typing import Optional +import httpx, requests +from typing import Optional, Union +from litellm.utils import Logging class BaseLLM: _client_session: Optional[httpx.Client] = None + def process_response( + self, + model: str, + response: Union[requests.Response, httpx.Response], + model_response: litellm.utils.ModelResponse, + stream: bool, + logging_obj: Logging, + optional_params: dict, + api_key: str, + data: Union[dict, str], + messages: list, + print_verbose, + encoding, + ) -> litellm.utils.ModelResponse: + """ + Helper function to process the response across sync + async completion calls + """ + return model_response + def create_client_session(self): if litellm.client_session: _client_session = litellm.client_session diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index 08433ba18b..4314032e76 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -52,6 +52,16 @@ class AmazonBedrockGlobalConfig: optional_params[mapped_params[param]] = value return optional_params + def get_eu_regions(self) -> List[str]: + """ + Source: https://www.aws-services.info/bedrock.html + """ + return [ + "eu-west-1", + "eu-west-3", + "eu-central-1", + ] + class AmazonTitanConfig: """ @@ -551,6 +561,7 @@ def init_bedrock_client( aws_session_name: Optional[str] = None, aws_profile_name: Optional[str] = None, aws_role_name: Optional[str] = None, + aws_web_identity_token: Optional[str] = None, extra_headers: Optional[dict] = None, timeout: Optional[Union[float, httpx.Timeout]] = None, ): @@ -567,6 +578,7 @@ def init_bedrock_client( aws_session_name, aws_profile_name, aws_role_name, + aws_web_identity_token, ] # Iterate over parameters and update if needed @@ -582,6 +594,7 @@ def init_bedrock_client( aws_session_name, aws_profile_name, aws_role_name, + aws_web_identity_token, ) = params_to_check ### SET REGION NAME @@ -620,7 +633,38 @@ def init_bedrock_client( config = boto3.session.Config() ### CHECK STS ### - if aws_role_name is not None and aws_session_name is not None: + if aws_web_identity_token is not None and aws_role_name is not None and aws_session_name is not None: + oidc_token = get_secret(aws_web_identity_token) + + if oidc_token is None: + raise BedrockError( + message="OIDC token could not be retrieved from secret manager.", + status_code=401, + ) + + sts_client = boto3.client( + "sts" + ) + + # https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sts/client/assume_role_with_web_identity.html + sts_response = sts_client.assume_role_with_web_identity( + RoleArn=aws_role_name, + RoleSessionName=aws_session_name, + WebIdentityToken=oidc_token, + DurationSeconds=3600, + ) + + client = boto3.client( + service_name="bedrock-runtime", + aws_access_key_id=sts_response["Credentials"]["AccessKeyId"], + aws_secret_access_key=sts_response["Credentials"]["SecretAccessKey"], + aws_session_token=sts_response["Credentials"]["SessionToken"], + region_name=region_name, + endpoint_url=endpoint_url, + config=config, + ) + elif aws_role_name is not None and aws_session_name is not None: # use sts if role name passed in sts_client = boto3.client( "sts", @@ -755,6 +799,7 @@ def completion( aws_bedrock_runtime_endpoint = optional_params.pop( "aws_bedrock_runtime_endpoint", None ) + aws_web_identity_token = optional_params.pop("aws_web_identity_token", None) # use passed in BedrockRuntime.Client if provided, otherwise create a new one client = optional_params.pop("aws_bedrock_client", None) @@ -769,6 +814,7 @@ def completion( aws_role_name=aws_role_name, aws_session_name=aws_session_name, aws_profile_name=aws_profile_name, + aws_web_identity_token=aws_web_identity_token, extra_headers=extra_headers, timeout=timeout, ) @@ -1291,6 +1337,7 @@ def embedding( aws_bedrock_runtime_endpoint = optional_params.pop( "aws_bedrock_runtime_endpoint", None ) + aws_web_identity_token = optional_params.pop("aws_web_identity_token", None) # use passed in BedrockRuntime.Client if provided, otherwise create a new one client = init_bedrock_client( @@ -1298,6 +1345,7 @@ def embedding( aws_secret_access_key=aws_secret_access_key, aws_region_name=aws_region_name, aws_bedrock_runtime_endpoint=aws_bedrock_runtime_endpoint, + aws_web_identity_token=aws_web_identity_token, aws_role_name=aws_role_name, aws_session_name=aws_session_name, ) @@ -1380,6 +1428,7 @@ def image_generation( aws_bedrock_runtime_endpoint = optional_params.pop( "aws_bedrock_runtime_endpoint", None ) + aws_web_identity_token = optional_params.pop("aws_web_identity_token", None) # use passed in BedrockRuntime.Client if provided, otherwise create a new one client = init_bedrock_client( @@ -1387,6 +1436,7 @@ def image_generation( aws_secret_access_key=aws_secret_access_key, aws_region_name=aws_region_name, aws_bedrock_runtime_endpoint=aws_bedrock_runtime_endpoint, + aws_web_identity_token=aws_web_identity_token, aws_role_name=aws_role_name, aws_session_name=aws_session_name, timeout=timeout, diff --git a/litellm/llms/bedrock_httpx.py b/litellm/llms/bedrock_httpx.py new file mode 100644 index 0000000000..1ff3767bdc --- /dev/null +++ b/litellm/llms/bedrock_httpx.py @@ -0,0 +1,733 @@ +# What is this? +## Initial implementation of calling bedrock via httpx client (allows for async calls). +## V0 - just covers cohere command-r support + +import os, types +import json +from enum import Enum +import requests, copy # type: ignore +import time +from typing import ( + Callable, + Optional, + List, + Literal, + Union, + Any, + TypedDict, + Tuple, + Iterator, + AsyncIterator, +) +from litellm.utils import ( + ModelResponse, + Usage, + map_finish_reason, + CustomStreamWrapper, + Message, + Choices, + get_secret, + Logging, +) +import litellm +from .prompt_templates.factory import prompt_factory, custom_prompt, cohere_message_pt +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler +from .base import BaseLLM +import httpx # type: ignore +from .bedrock import BedrockError, convert_messages_to_prompt +from litellm.types.llms.bedrock import * + + +class AmazonCohereChatConfig: + """ + Reference - https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-cohere-command-r-plus.html + """ + + documents: Optional[List[Document]] = None + search_queries_only: Optional[bool] = None + preamble: Optional[str] = None + max_tokens: Optional[int] = None + temperature: Optional[float] = None + p: Optional[float] = None + k: Optional[float] = None + prompt_truncation: Optional[str] = None + frequency_penalty: Optional[float] = None + presence_penalty: Optional[float] = None + seed: Optional[int] = None + return_prompt: Optional[bool] = None + stop_sequences: Optional[List[str]] = None + raw_prompting: Optional[bool] = None + + def __init__( + self, + documents: Optional[List[Document]] = None, + search_queries_only: Optional[bool] = None, + preamble: Optional[str] = None, + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, + p: Optional[float] = None, + k: Optional[float] = None, + prompt_truncation: Optional[str] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + seed: Optional[int] = None, + return_prompt: Optional[bool] = None, + stop_sequences: Optional[str] = None, + raw_prompting: Optional[bool] = None, + ) -> None: + locals_ = locals() + for key, value in locals_.items(): + if key != "self" and value is not None: + setattr(self.__class__, key, value) + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + + def get_supported_openai_params(self) -> List[str]: + return [ + "max_tokens", + "stream", + "stop", + "temperature", + "top_p", + "frequency_penalty", + "presence_penalty", + "seed", + "stop", + ] + + def map_openai_params( + self, non_default_params: dict, optional_params: dict + ) -> dict: + for param, value in non_default_params.items(): + if param == "max_tokens": + optional_params["max_tokens"] = value + if param == "stream": + optional_params["stream"] = value + if param == "stop": + if isinstance(value, str): + value = [value] + optional_params["stop_sequences"] = value + if param == "temperature": + optional_params["temperature"] = value + if param == "top_p": + optional_params["p"] = value + if param == "frequency_penalty": + optional_params["frequency_penalty"] = value + if param == "presence_penalty": + optional_params["presence_penalty"] = value + if "seed": + optional_params["seed"] = value + return optional_params + + +class BedrockLLM(BaseLLM): + """ + Example call + + ``` + curl --location --request POST 'https://bedrock-runtime.{aws_region_name}.amazonaws.com/model/{bedrock_model_name}/invoke' \ + --header 'Content-Type: application/json' \ + --header 'Accept: application/json' \ + --user "$AWS_ACCESS_KEY_ID":"$AWS_SECRET_ACCESS_KEY" \ + --aws-sigv4 "aws:amz:us-east-1:bedrock" \ + --data-raw '{ + "prompt": "Hi", + "temperature": 0, + "p": 0.9, + "max_tokens": 4096 + }' + ``` + """ + + def __init__(self) -> None: + super().__init__() + + def convert_messages_to_prompt( + self, model, messages, provider, custom_prompt_dict + ) -> Tuple[str, Optional[list]]: + # handle anthropic prompts and amazon titan prompts + prompt = "" + chat_history: Optional[list] = None + if provider == "anthropic" or provider == "amazon": + if model in custom_prompt_dict: + # check if the model has a registered custom prompt + model_prompt_details = custom_prompt_dict[model] + prompt = custom_prompt( + role_dict=model_prompt_details["roles"], + initial_prompt_value=model_prompt_details["initial_prompt_value"], + final_prompt_value=model_prompt_details["final_prompt_value"], + messages=messages, + ) + else: + prompt = prompt_factory( + model=model, messages=messages, custom_llm_provider="bedrock" + ) + elif provider == "mistral": + prompt = prompt_factory( + model=model, messages=messages, custom_llm_provider="bedrock" + ) + elif provider == "meta": + prompt = prompt_factory( + model=model, messages=messages, custom_llm_provider="bedrock" + ) + elif provider == "cohere": + prompt, chat_history = cohere_message_pt(messages=messages) + else: + prompt = "" + for message in messages: + if "role" in message: + if message["role"] == "user": + prompt += f"{message['content']}" + else: + prompt += f"{message['content']}" + else: + prompt += f"{message['content']}" + return prompt, chat_history # type: ignore + + def get_credentials( + self, + aws_access_key_id: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_region_name: Optional[str] = None, + aws_session_name: Optional[str] = None, + aws_profile_name: Optional[str] = None, + aws_role_name: Optional[str] = None, + ): + """ + Return a boto3.Credentials object + """ + import boto3 + + ## CHECK IS 'os.environ/' passed in + params_to_check: List[Optional[str]] = [ + aws_access_key_id, + aws_secret_access_key, + aws_region_name, + aws_session_name, + aws_profile_name, + aws_role_name, + ] + + # Iterate over parameters and update if needed + for i, param in enumerate(params_to_check): + if param and param.startswith("os.environ/"): + _v = get_secret(param) + if _v is not None and isinstance(_v, str): + params_to_check[i] = _v + # Assign updated values back to parameters + ( + aws_access_key_id, + aws_secret_access_key, + aws_region_name, + aws_session_name, + aws_profile_name, + aws_role_name, + ) = params_to_check + + ### CHECK STS ### + if aws_role_name is not None and aws_session_name is not None: + sts_client = boto3.client( + "sts", + aws_access_key_id=aws_access_key_id, # [OPTIONAL] + aws_secret_access_key=aws_secret_access_key, # [OPTIONAL] + ) + + sts_response = sts_client.assume_role( + RoleArn=aws_role_name, RoleSessionName=aws_session_name + ) + + return sts_response["Credentials"] + elif aws_profile_name is not None: ### CHECK SESSION ### + # uses auth values from AWS profile usually stored in ~/.aws/credentials + client = boto3.Session(profile_name=aws_profile_name) + + return client.get_credentials() + else: + session = boto3.Session( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + region_name=aws_region_name, + ) + + return session.get_credentials() + + def process_response( + self, + model: str, + response: Union[requests.Response, httpx.Response], + model_response: ModelResponse, + stream: bool, + logging_obj: Logging, + optional_params: dict, + api_key: str, + data: Union[dict, str], + messages: List, + print_verbose, + encoding, + ) -> ModelResponse: + ## LOGGING + logging_obj.post_call( + input=messages, + api_key=api_key, + original_response=response.text, + additional_args={"complete_input_dict": data}, + ) + print_verbose(f"raw model_response: {response.text}") + + ## RESPONSE OBJECT + try: + completion_response = response.json() + except: + raise BedrockError(message=response.text, status_code=422) + + try: + model_response.choices[0].message.content = completion_response["text"] # type: ignore + except Exception as e: + raise BedrockError(message=response.text, status_code=422) + + ## CALCULATING USAGE - bedrock returns usage in the headers + prompt_tokens = int( + response.headers.get( + "x-amzn-bedrock-input-token-count", + len(encoding.encode("".join(m.get("content", "") for m in messages))), + ) + ) + completion_tokens = int( + response.headers.get( + "x-amzn-bedrock-output-token-count", + len( + encoding.encode( + model_response.choices[0].message.content, # type: ignore + disallowed_special=(), + ) + ), + ) + ) + + model_response["created"] = int(time.time()) + model_response["model"] = model + usage = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, + ) + setattr(model_response, "usage", usage) + + return model_response + + def completion( + self, + model: str, + messages: list, + custom_prompt_dict: dict, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + logging_obj, + optional_params: dict, + acompletion: bool, + timeout: Optional[Union[float, httpx.Timeout]], + litellm_params=None, + logger_fn=None, + extra_headers: Optional[dict] = None, + client: Optional[Union[AsyncHTTPHandler, HTTPHandler]] = None, + ) -> Union[ModelResponse, CustomStreamWrapper]: + try: + import boto3 + + from botocore.auth import SigV4Auth + from botocore.awsrequest import AWSRequest + from botocore.credentials import Credentials + except ImportError as e: + raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.") + + ## SETUP ## + stream = optional_params.pop("stream", None) + + ## CREDENTIALS ## + # pop aws_secret_access_key, aws_access_key_id, aws_region_name from kwargs, since completion calls fail with them + aws_secret_access_key = optional_params.pop("aws_secret_access_key", None) + aws_access_key_id = optional_params.pop("aws_access_key_id", None) + aws_region_name = optional_params.pop("aws_region_name", None) + aws_role_name = optional_params.pop("aws_role_name", None) + aws_session_name = optional_params.pop("aws_session_name", None) + aws_profile_name = optional_params.pop("aws_profile_name", None) + aws_bedrock_runtime_endpoint = optional_params.pop( + "aws_bedrock_runtime_endpoint", None + ) # https://bedrock-runtime.{region_name}.amazonaws.com + + ### SET REGION NAME ### + if aws_region_name is None: + # check env # + litellm_aws_region_name = get_secret("AWS_REGION_NAME", None) + + if litellm_aws_region_name is not None and isinstance( + litellm_aws_region_name, str + ): + aws_region_name = litellm_aws_region_name + + standard_aws_region_name = get_secret("AWS_REGION", None) + if standard_aws_region_name is not None and isinstance( + standard_aws_region_name, str + ): + aws_region_name = standard_aws_region_name + + if aws_region_name is None: + aws_region_name = "us-west-2" + + credentials: Credentials = self.get_credentials( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_region_name=aws_region_name, + aws_session_name=aws_session_name, + aws_profile_name=aws_profile_name, + aws_role_name=aws_role_name, + ) + + ### SET RUNTIME ENDPOINT ### + endpoint_url = "" + env_aws_bedrock_runtime_endpoint = get_secret("AWS_BEDROCK_RUNTIME_ENDPOINT") + if aws_bedrock_runtime_endpoint is not None and isinstance( + aws_bedrock_runtime_endpoint, str + ): + endpoint_url = aws_bedrock_runtime_endpoint + elif env_aws_bedrock_runtime_endpoint and isinstance( + env_aws_bedrock_runtime_endpoint, str + ): + endpoint_url = env_aws_bedrock_runtime_endpoint + else: + endpoint_url = f"https://bedrock-runtime.{aws_region_name}.amazonaws.com" + + if stream is not None and stream == True: + endpoint_url = f"{endpoint_url}/model/{model}/invoke-with-response-stream" + else: + endpoint_url = f"{endpoint_url}/model/{model}/invoke" + + sigv4 = SigV4Auth(credentials, "bedrock", aws_region_name) + + provider = model.split(".")[0] + prompt, chat_history = self.convert_messages_to_prompt( + model, messages, provider, custom_prompt_dict + ) + inference_params = copy.deepcopy(optional_params) + + if provider == "cohere": + if model.startswith("cohere.command-r"): + ## LOAD CONFIG + config = litellm.AmazonCohereChatConfig().get_config() + for k, v in config.items(): + if ( + k not in inference_params + ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in + inference_params[k] = v + _data = {"message": prompt, **inference_params} + if chat_history is not None: + _data["chat_history"] = chat_history + data = json.dumps(_data) + else: + ## LOAD CONFIG + config = litellm.AmazonCohereConfig.get_config() + for k, v in config.items(): + if ( + k not in inference_params + ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in + inference_params[k] = v + if stream == True: + inference_params["stream"] = ( + True # cohere requires stream = True in inference params + ) + data = json.dumps({"prompt": prompt, **inference_params}) + else: + raise Exception("UNSUPPORTED PROVIDER") + + ## COMPLETION CALL + + headers = {"Content-Type": "application/json"} + if extra_headers is not None: + headers = {"Content-Type": "application/json", **extra_headers} + request = AWSRequest( + method="POST", url=endpoint_url, data=data, headers=headers + ) + sigv4.add_auth(request) + prepped = request.prepare() + + ## LOGGING + logging_obj.pre_call( + input=messages, + api_key="", + additional_args={ + "complete_input_dict": data, + "api_base": prepped.url, + "headers": prepped.headers, + }, + ) + + ### ROUTING (ASYNC, STREAMING, SYNC) + if acompletion: + if isinstance(client, HTTPHandler): + client = None + if stream: + return self.async_streaming( + model=model, + messages=messages, + data=data, + api_base=prepped.url, + model_response=model_response, + print_verbose=print_verbose, + encoding=encoding, + logging_obj=logging_obj, + optional_params=optional_params, + stream=True, + litellm_params=litellm_params, + logger_fn=logger_fn, + headers=prepped.headers, + timeout=timeout, + client=client, + ) # type: ignore + ### ASYNC COMPLETION + return self.async_completion( + model=model, + messages=messages, + data=data, + api_base=prepped.url, + model_response=model_response, + print_verbose=print_verbose, + encoding=encoding, + logging_obj=logging_obj, + optional_params=optional_params, + stream=False, + litellm_params=litellm_params, + logger_fn=logger_fn, + headers=prepped.headers, + timeout=timeout, + client=client, + ) # type: ignore + + if client is None or isinstance(client, AsyncHTTPHandler): + _params = {} + if timeout is not None: + if isinstance(timeout, float) or isinstance(timeout, int): + timeout = httpx.Timeout(timeout) + _params["timeout"] = timeout + self.client = HTTPHandler(**_params) # type: ignore + else: + self.client = client + if stream is not None and stream == True: + response = self.client.post( + url=prepped.url, + headers=prepped.headers, # type: ignore + data=data, + stream=stream, + ) + + if response.status_code != 200: + raise BedrockError( + status_code=response.status_code, message=response.text + ) + + decoder = AWSEventStreamDecoder() + + completion_stream = decoder.iter_bytes(response.iter_bytes(chunk_size=1024)) + streaming_response = CustomStreamWrapper( + completion_stream=completion_stream, + model=model, + custom_llm_provider="bedrock", + logging_obj=logging_obj, + ) + return streaming_response + + response = self.client.post(url=prepped.url, headers=prepped.headers, data=data) # type: ignore + + try: + response.raise_for_status() + except httpx.HTTPStatusError as err: + error_code = err.response.status_code + raise BedrockError(status_code=error_code, message=response.text) + + return self.process_response( + model=model, + response=response, + model_response=model_response, + stream=stream, + logging_obj=logging_obj, + optional_params=optional_params, + api_key="", + data=data, + messages=messages, + print_verbose=print_verbose, + encoding=encoding, + ) + + async def async_completion( + self, + model: str, + messages: list, + api_base: str, + model_response: ModelResponse, + print_verbose: Callable, + data: str, + timeout: Optional[Union[float, httpx.Timeout]], + encoding, + logging_obj, + stream, + optional_params: dict, + litellm_params=None, + logger_fn=None, + headers={}, + client: Optional[AsyncHTTPHandler] = None, + ) -> ModelResponse: + if client is None: + _params = {} + if timeout is not None: + if isinstance(timeout, float) or isinstance(timeout, int): + timeout = httpx.Timeout(timeout) + _params["timeout"] = timeout + self.client = AsyncHTTPHandler(**_params) # type: ignore + else: + self.client = client # type: ignore + + response = await self.client.post(api_base, headers=headers, data=data) # type: ignore + return self.process_response( + model=model, + response=response, + model_response=model_response, + stream=stream, + logging_obj=logging_obj, + api_key="", + data=data, + messages=messages, + print_verbose=print_verbose, + optional_params=optional_params, + encoding=encoding, + ) + + async def async_streaming( + self, + model: str, + messages: list, + api_base: str, + model_response: ModelResponse, + print_verbose: Callable, + data: str, + timeout: Optional[Union[float, httpx.Timeout]], + encoding, + logging_obj, + stream, + optional_params: dict, + litellm_params=None, + logger_fn=None, + headers={}, + client: Optional[AsyncHTTPHandler] = None, + ) -> CustomStreamWrapper: + if client is None: + _params = {} + if timeout is not None: + if isinstance(timeout, float) or isinstance(timeout, int): + timeout = httpx.Timeout(timeout) + _params["timeout"] = timeout + self.client = AsyncHTTPHandler(**_params) # type: ignore + else: + self.client = client # type: ignore + + response = await self.client.post(api_base, headers=headers, data=data, stream=True) # type: ignore + + if response.status_code != 200: + raise BedrockError(status_code=response.status_code, message=response.text) + + decoder = AWSEventStreamDecoder() + + completion_stream = decoder.aiter_bytes(response.aiter_bytes(chunk_size=1024)) + streaming_response = CustomStreamWrapper( + completion_stream=completion_stream, + model=model, + custom_llm_provider="bedrock", + logging_obj=logging_obj, + ) + return streaming_response + + def embedding(self, *args, **kwargs): + return super().embedding(*args, **kwargs) + + +def get_response_stream_shape(): + from botocore.model import ServiceModel + from botocore.loaders import Loader + + loader = Loader() + bedrock_service_dict = loader.load_service_model("bedrock-runtime", "service-2") + bedrock_service_model = ServiceModel(bedrock_service_dict) + return bedrock_service_model.shape_for("ResponseStream") + + +class AWSEventStreamDecoder: + def __init__(self) -> None: + from botocore.parsers import EventStreamJSONParser + + self.parser = EventStreamJSONParser() + + def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[GenericStreamingChunk]: + """Given an iterator that yields lines, iterate over it & yield every event encountered""" + from botocore.eventstream import EventStreamBuffer + + event_stream_buffer = EventStreamBuffer() + for chunk in iterator: + event_stream_buffer.add_data(chunk) + for event in event_stream_buffer: + message = self._parse_message_from_event(event) + if message: + # sse_event = ServerSentEvent(data=message, event="completion") + _data = json.loads(message) + streaming_chunk: GenericStreamingChunk = GenericStreamingChunk( + text=_data.get("text", ""), + is_finished=_data.get("is_finished", False), + finish_reason=_data.get("finish_reason", ""), + ) + yield streaming_chunk + + async def aiter_bytes( + self, iterator: AsyncIterator[bytes] + ) -> AsyncIterator[GenericStreamingChunk]: + """Given an async iterator that yields lines, iterate over it & yield every event encountered""" + from botocore.eventstream import EventStreamBuffer + + event_stream_buffer = EventStreamBuffer() + async for chunk in iterator: + event_stream_buffer.add_data(chunk) + for event in event_stream_buffer: + message = self._parse_message_from_event(event) + if message: + _data = json.loads(message) + streaming_chunk: GenericStreamingChunk = GenericStreamingChunk( + text=_data.get("text", ""), + is_finished=_data.get("is_finished", False), + finish_reason=_data.get("finish_reason", ""), + ) + yield streaming_chunk + + def _parse_message_from_event(self, event) -> Optional[str]: + response_dict = event.to_response_dict() + parsed_response = self.parser.parse(response_dict, get_response_stream_shape()) + if response_dict["status_code"] != 200: + raise ValueError(f"Bad response code, expected 200: {response_dict}") + + chunk = parsed_response.get("chunk") + if not chunk: + return None + + return chunk.get("bytes").decode() # type: ignore[no-any-return] diff --git a/litellm/llms/clarifai.py b/litellm/llms/clarifai.py new file mode 100644 index 0000000000..e07a8d9e8a --- /dev/null +++ b/litellm/llms/clarifai.py @@ -0,0 +1,328 @@ +import os, types, traceback +import json +import requests +import time +from typing import Callable, Optional +from litellm.utils import ModelResponse, Usage, Choices, Message, CustomStreamWrapper +import litellm +import httpx +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler +from .prompt_templates.factory import prompt_factory, custom_prompt + + +class ClarifaiError(Exception): + def __init__(self, status_code, message, url): + self.status_code = status_code + self.message = message + self.request = httpx.Request( + method="POST", url=url + ) + self.response = httpx.Response(status_code=status_code, request=self.request) + super().__init__( + self.message + ) + +class ClarifaiConfig: + """ + Reference: https://clarifai.com/meta/Llama-2/models/llama2-70b-chat + TODO fill in the details + """ + max_tokens: Optional[int] = None + temperature: Optional[int] = None + top_k: Optional[int] = None + + def __init__( + self, + max_tokens: Optional[int] = None, + temperature: Optional[int] = None, + top_k: Optional[int] = None, + ) -> None: + locals_ = locals() + for key, value in locals_.items(): + if key != "self" and value is not None: + setattr(self.__class__, key, value) + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + +def validate_environment(api_key): + headers = { + "accept": "application/json", + "content-type": "application/json", + } + if api_key: + headers["Authorization"] = f"Bearer {api_key}" + return headers + +def completions_to_model(payload): + # if payload["n"] != 1: + # raise HTTPException( + # status_code=422, + # detail="Only one generation is supported. Please set candidate_count to 1.", + # ) + + params = {} + if temperature := payload.get("temperature"): + params["temperature"] = temperature + if max_tokens := payload.get("max_tokens"): + params["max_tokens"] = max_tokens + return { + "inputs": [{"data": {"text": {"raw": payload["prompt"]}}}], + "model": {"output_info": {"params": params}}, +} + +def process_response( + model, + prompt, + response, + model_response, + api_key, + data, + encoding, + logging_obj + ): + logging_obj.post_call( + input=prompt, + api_key=api_key, + original_response=response.text, + additional_args={"complete_input_dict": data}, + ) + ## RESPONSE OBJECT + try: + completion_response = response.json() + except Exception: + raise ClarifaiError( + message=response.text, status_code=response.status_code, url=model + ) + # print(completion_response) + try: + choices_list = [] + for idx, item in enumerate(completion_response["outputs"]): + if len(item["data"]["text"]["raw"]) > 0: + message_obj = Message(content=item["data"]["text"]["raw"]) + else: + message_obj = Message(content=None) + choice_obj = Choices( + finish_reason="stop", + index=idx + 1, #check + message=message_obj, + ) + choices_list.append(choice_obj) + model_response["choices"] = choices_list + + except Exception as e: + raise ClarifaiError( + message=traceback.format_exc(), status_code=response.status_code, url=model + ) + + # Calculate Usage + prompt_tokens = len(encoding.encode(prompt)) + completion_tokens = len( + encoding.encode(model_response["choices"][0]["message"].get("content")) + ) + model_response["model"] = model + model_response["usage"] = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, + ) + return model_response + +def convert_model_to_url(model: str, api_base: str): + user_id, app_id, model_id = model.split(".") + return f"{api_base}/users/{user_id}/apps/{app_id}/models/{model_id}/outputs" + +def get_prompt_model_name(url: str): + clarifai_model_name = url.split("/")[-2] + if "claude" in clarifai_model_name: + return "anthropic", clarifai_model_name.replace("_", ".") + if ("llama" in clarifai_model_name)or ("mistral" in clarifai_model_name): + return "", "meta-llama/llama-2-chat" + else: + return "", clarifai_model_name + +async def async_completion( + model: str, + prompt: str, + api_base: str, + custom_prompt_dict: dict, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + api_key, + logging_obj, + data=None, + optional_params=None, + litellm_params=None, + logger_fn=None, + headers={}): + + async_handler = AsyncHTTPHandler( + timeout=httpx.Timeout(timeout=600.0, connect=5.0) + ) + response = await async_handler.post( + api_base, headers=headers, data=json.dumps(data) + ) + + return process_response( + model=model, + prompt=prompt, + response=response, + model_response=model_response, + api_key=api_key, + data=data, + encoding=encoding, + logging_obj=logging_obj, + ) + +def completion( + model: str, + messages: list, + api_base: str, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + api_key, + logging_obj, + custom_prompt_dict={}, + acompletion=False, + optional_params=None, + litellm_params=None, + logger_fn=None, +): + headers = validate_environment(api_key) + model = convert_model_to_url(model, api_base) + prompt = " ".join(message["content"] for message in messages) # TODO + + ## Load Config + config = litellm.ClarifaiConfig.get_config() + for k, v in config.items(): + if ( + k not in optional_params + ): + optional_params[k] = v + + custom_llm_provider, orig_model_name = get_prompt_model_name(model) + if custom_llm_provider == "anthropic": + prompt = prompt_factory( + model=orig_model_name, + messages=messages, + api_key=api_key, + custom_llm_provider="clarifai" + ) + else: + prompt = prompt_factory( + model=orig_model_name, + messages=messages, + api_key=api_key, + custom_llm_provider=custom_llm_provider + ) + # print(prompt); exit(0) + + data = { + "prompt": prompt, + **optional_params, + } + data = completions_to_model(data) + + + ## LOGGING + logging_obj.pre_call( + input=prompt, + api_key=api_key, + additional_args={ + "complete_input_dict": data, + "headers": headers, + "api_base": api_base, + }, + ) + if acompletion==True: + return async_completion( + model=model, + prompt=prompt, + api_base=api_base, + custom_prompt_dict=custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + encoding=encoding, + api_key=api_key, + logging_obj=logging_obj, + data=data, + optional_params=optional_params, + litellm_params=litellm_params, + logger_fn=logger_fn, + headers=headers, + ) + else: + ## COMPLETION CALL + response = requests.post( + model, + headers=headers, + data=json.dumps(data), + ) + # print(response.content); exit() + + if response.status_code != 200: + raise ClarifaiError(status_code=response.status_code, message=response.text, url=model) + + if "stream" in optional_params and optional_params["stream"] == True: + completion_stream = response.iter_lines() + stream_response = CustomStreamWrapper( + completion_stream=completion_stream, + model=model, + custom_llm_provider="clarifai", + logging_obj=logging_obj, + ) + return stream_response + + else: + return process_response( + model=model, + prompt=prompt, + response=response, + model_response=model_response, + api_key=api_key, + data=data, + encoding=encoding, + logging_obj=logging_obj) + + +class ModelResponseIterator: + def __init__(self, model_response): + self.model_response = model_response + self.is_done = False + + # Sync iterator + def __iter__(self): + return self + + def __next__(self): + if self.is_done: + raise StopIteration + self.is_done = True + return self.model_response + + # Async iterator + def __aiter__(self): + return self + + async def __anext__(self): + if self.is_done: + raise StopAsyncIteration + self.is_done = True + return self.model_response \ No newline at end of file diff --git a/litellm/llms/custom_httpx/http_handler.py b/litellm/llms/custom_httpx/http_handler.py index 7c7d4938a4..0adbd95bf9 100644 --- a/litellm/llms/custom_httpx/http_handler.py +++ b/litellm/llms/custom_httpx/http_handler.py @@ -58,16 +58,25 @@ class AsyncHTTPHandler: class HTTPHandler: def __init__( - self, timeout: httpx.Timeout = _DEFAULT_TIMEOUT, concurrent_limit=1000 + self, + timeout: Optional[httpx.Timeout] = None, + concurrent_limit=1000, + client: Optional[httpx.Client] = None, ): - # Create a client with a connection pool - self.client = httpx.Client( - timeout=timeout, - limits=httpx.Limits( - max_connections=concurrent_limit, - max_keepalive_connections=concurrent_limit, - ), - ) + if timeout is None: + timeout = _DEFAULT_TIMEOUT + + if client is None: + # Create a client with a connection pool + self.client = httpx.Client( + timeout=timeout, + limits=httpx.Limits( + max_connections=concurrent_limit, + max_keepalive_connections=concurrent_limit, + ), + ) + else: + self.client = client def close(self): # Close the client when you're done with it @@ -82,11 +91,15 @@ class HTTPHandler: def post( self, url: str, - data: Optional[dict] = None, + data: Optional[Union[dict, str]] = None, params: Optional[dict] = None, headers: Optional[dict] = None, + stream: bool = False, ): - response = self.client.post(url, data=data, params=params, headers=headers) + req = self.client.build_request( + "POST", url, data=data, params=params, headers=headers # type: ignore + ) + response = self.client.send(req, stream=stream) return response def __del__(self) -> None: diff --git a/litellm/llms/huggingface_restapi.py b/litellm/llms/huggingface_restapi.py index a2c4457c23..c54dba75f1 100644 --- a/litellm/llms/huggingface_restapi.py +++ b/litellm/llms/huggingface_restapi.py @@ -6,10 +6,12 @@ import httpx, requests from .base import BaseLLM import time import litellm -from typing import Callable, Dict, List, Any +from typing import Callable, Dict, List, Any, Literal, Tuple from litellm.utils import ModelResponse, Choices, Message, CustomStreamWrapper, Usage from typing import Optional from .prompt_templates.factory import prompt_factory, custom_prompt +from litellm.types.completion import ChatCompletionMessageToolCallParam +import enum class HuggingfaceError(Exception): @@ -39,11 +41,29 @@ class HuggingfaceError(Exception): ) # Call the base class constructor with the parameters it needs +hf_task_list = [ + "text-generation-inference", + "conversational", + "text-classification", + "text-generation", +] + +hf_tasks = Literal[ + "text-generation-inference", + "conversational", + "text-classification", + "text-generation", +] + + class HuggingfaceConfig: """ Reference: https://huggingface.github.io/text-generation-inference/#/Text%20Generation%20Inference/compat_generate """ + hf_task: Optional[hf_tasks] = ( + None # litellm-specific param, used to know the api spec to use when calling huggingface api + ) best_of: Optional[int] = None decoder_input_details: Optional[bool] = None details: Optional[bool] = True # enables returning logprobs + best of @@ -101,6 +121,51 @@ class HuggingfaceConfig: and v is not None } + def get_supported_openai_params(self): + return [ + "stream", + "temperature", + "max_tokens", + "top_p", + "stop", + "n", + "echo", + ] + + def map_openai_params( + self, non_default_params: dict, optional_params: dict + ) -> dict: + for param, value in non_default_params.items(): + # temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None + if param == "temperature": + if value == 0.0 or value == 0: + # hugging face exception raised when temp==0 + # Failed: Error occurred: HuggingfaceException - Input validation error: `temperature` must be strictly positive + value = 0.01 + optional_params["temperature"] = value + if param == "top_p": + optional_params["top_p"] = value + if param == "n": + optional_params["best_of"] = value + optional_params["do_sample"] = ( + True # Need to sample if you want best of for hf inference endpoints + ) + if param == "stream": + optional_params["stream"] = value + if param == "stop": + optional_params["stop"] = value + if param == "max_tokens": + # HF TGI raises the following exception when max_new_tokens==0 + # Failed: Error occurred: HuggingfaceException - Input validation error: `max_new_tokens` must be strictly positive + if value == 0: + value = 1 + optional_params["max_new_tokens"] = value + if param == "echo": + # https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation.decoder_input_details + # Return the decoder input token logprobs and ids. You must set details=True as well for it to be taken into account. Defaults to False + optional_params["decoder_input_details"] = True + return optional_params + def output_parser(generated_text: str): """ @@ -162,18 +227,21 @@ def read_tgi_conv_models(): return set(), set() -def get_hf_task_for_model(model): +def get_hf_task_for_model(model: str) -> Tuple[hf_tasks, str]: # read text file, cast it to set # read the file called "huggingface_llms_metadata/hf_text_generation_models.txt" + if model.split("/")[0] in hf_task_list: + split_model = model.split("/", 1) + return split_model[0], split_model[1] # type: ignore tgi_models, conversational_models = read_tgi_conv_models() if model in tgi_models: - return "text-generation-inference" + return "text-generation-inference", model elif model in conversational_models: - return "conversational" + return "conversational", model elif "roneneldan/TinyStories" in model: - return None + return "text-generation", model else: - return "text-generation-inference" # default to tgi + return "text-generation-inference", model # default to tgi class Huggingface(BaseLLM): @@ -202,7 +270,7 @@ class Huggingface(BaseLLM): self, completion_response, model_response, - task, + task: hf_tasks, optional_params, encoding, input_text, @@ -270,6 +338,10 @@ class Huggingface(BaseLLM): ) choices_list.append(choice_obj) model_response["choices"].extend(choices_list) + elif task == "text-classification": + model_response["choices"][0]["message"]["content"] = json.dumps( + completion_response + ) else: if len(completion_response[0]["generated_text"]) > 0: model_response["choices"][0]["message"]["content"] = output_parser( @@ -332,7 +404,13 @@ class Huggingface(BaseLLM): exception_mapping_worked = False try: headers = self.validate_environment(api_key, headers) - task = get_hf_task_for_model(model) + task, model = get_hf_task_for_model(model) + ## VALIDATE API FORMAT + if task is None or not isinstance(task, str) or task not in hf_task_list: + raise Exception( + "Invalid hf task - {}. Valid formats - {}.".format(task, hf_tasks) + ) + print_verbose(f"{model}, {task}") completion_url = "" input_text = "" @@ -433,14 +511,15 @@ class Huggingface(BaseLLM): inference_params.pop("return_full_text") data = { "inputs": prompt, - "parameters": inference_params, - "stream": ( # type: ignore - True + } + if task == "text-generation-inference": + data["parameters"] = inference_params + data["stream"] = ( # type: ignore + True # type: ignore if "stream" in optional_params and optional_params["stream"] == True else False - ), - } + ) input_text = prompt ## LOGGING logging_obj.pre_call( @@ -531,10 +610,10 @@ class Huggingface(BaseLLM): isinstance(completion_response, dict) and "error" in completion_response ): - print_verbose(f"completion error: {completion_response['error']}") + print_verbose(f"completion error: {completion_response['error']}") # type: ignore print_verbose(f"response.status_code: {response.status_code}") raise HuggingfaceError( - message=completion_response["error"], + message=completion_response["error"], # type: ignore status_code=response.status_code, ) return self.convert_to_model_response_object( @@ -563,7 +642,7 @@ class Huggingface(BaseLLM): data: dict, headers: dict, model_response: ModelResponse, - task: str, + task: hf_tasks, encoding: Any, input_text: str, model: str, diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index 866761905f..d1ff4953fa 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -300,7 +300,7 @@ def get_ollama_response( model_response["choices"][0]["message"] = message model_response["choices"][0]["finish_reason"] = "tool_calls" else: - model_response["choices"][0]["message"] = response_json["message"] + model_response["choices"][0]["message"]["content"] = response_json["message"]["content"] model_response["created"] = int(time.time()) model_response["model"] = "ollama/" + model prompt_tokens = response_json.get("prompt_eval_count", litellm.token_counter(messages=messages)) # type: ignore @@ -484,7 +484,7 @@ async def ollama_acompletion( model_response["choices"][0]["message"] = message model_response["choices"][0]["finish_reason"] = "tool_calls" else: - model_response["choices"][0]["message"] = response_json["message"] + model_response["choices"][0]["message"]["content"] = response_json["message"]["content"] model_response["created"] = int(time.time()) model_response["model"] = "ollama_chat/" + data["model"] diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 674cc86a25..7acbdfae02 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -53,6 +53,113 @@ class OpenAIError(Exception): ) # Call the base class constructor with the parameters it needs +class MistralConfig: + """ + Reference: https://docs.mistral.ai/api/ + + The class `MistralConfig` provides configuration for the Mistral's Chat API interface. Below are the parameters: + + - `temperature` (number or null): Defines the sampling temperature to use, varying between 0 and 2. API Default - 0.7. + + - `top_p` (number or null): An alternative to sampling with temperature, used for nucleus sampling. API Default - 1. + + - `max_tokens` (integer or null): This optional parameter helps to set the maximum number of tokens to generate in the chat completion. API Default - null. + + - `tools` (list or null): A list of available tools for the model. Use this to specify functions for which the model can generate JSON inputs. + + - `tool_choice` (string - 'auto'/'any'/'none' or null): Specifies if/how functions are called. If set to none the model won't call a function and will generate a message instead. If set to auto the model can choose to either generate a message or call a function. If set to any the model is forced to call a function. Default - 'auto'. + + - `random_seed` (integer or null): The seed to use for random sampling. If set, different calls will generate deterministic results. + + - `safe_prompt` (boolean): Whether to inject a safety prompt before all conversations. API Default - 'false'. + + - `response_format` (object or null): An object specifying the format that the model must output. Setting to { "type": "json_object" } enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. + """ + + temperature: Optional[int] = None + top_p: Optional[int] = None + max_tokens: Optional[int] = None + tools: Optional[list] = None + tool_choice: Optional[Literal["auto", "any", "none"]] = None + random_seed: Optional[int] = None + safe_prompt: Optional[bool] = None + response_format: Optional[dict] = None + + def __init__( + self, + temperature: Optional[int] = None, + top_p: Optional[int] = None, + max_tokens: Optional[int] = None, + tools: Optional[list] = None, + tool_choice: Optional[Literal["auto", "any", "none"]] = None, + random_seed: Optional[int] = None, + safe_prompt: Optional[bool] = None, + response_format: Optional[dict] = None, + ) -> None: + locals_ = locals() + for key, value in locals_.items(): + if key != "self" and value is not None: + setattr(self.__class__, key, value) + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + + def get_supported_openai_params(self): + return [ + "stream", + "temperature", + "top_p", + "max_tokens", + "tools", + "tool_choice", + "seed", + "response_format", + ] + + def _map_tool_choice(self, tool_choice: str) -> str: + if tool_choice == "auto" or tool_choice == "none": + return tool_choice + elif tool_choice == "required": + return "any" + else: # openai 'tool_choice' object param not supported by Mistral API + return "any" + + def map_openai_params(self, non_default_params: dict, optional_params: dict): + for param, value in non_default_params.items(): + if param == "max_tokens": + optional_params["max_tokens"] = value + if param == "tools": + optional_params["tools"] = value + if param == "stream" and value == True: + optional_params["stream"] = value + if param == "temperature": + optional_params["temperature"] = value + if param == "top_p": + optional_params["top_p"] = value + if param == "tool_choice" and isinstance(value, str): + optional_params["tool_choice"] = self._map_tool_choice( + tool_choice=value + ) + if param == "seed": + optional_params["extra_body"] = {"random_seed": value} + return optional_params + + class OpenAIConfig: """ Reference: https://platform.openai.com/docs/api-reference/chat/create @@ -1327,8 +1434,8 @@ class OpenAIAssistantsAPI(BaseLLM): client=client, ) - thread_message: OpenAIMessage = openai_client.beta.threads.messages.create( - thread_id, **message_data + thread_message: OpenAIMessage = openai_client.beta.threads.messages.create( # type: ignore + thread_id, **message_data # type: ignore ) response_obj: Optional[OpenAIMessage] = None @@ -1458,7 +1565,7 @@ class OpenAIAssistantsAPI(BaseLLM): client=client, ) - response = openai_client.beta.threads.runs.create_and_poll( + response = openai_client.beta.threads.runs.create_and_poll( # type: ignore thread_id=thread_id, assistant_id=assistant_id, additional_instructions=additional_instructions, diff --git a/litellm/llms/predibase.py b/litellm/llms/predibase.py index c3424d244b..1e7e1d3348 100644 --- a/litellm/llms/predibase.py +++ b/litellm/llms/predibase.py @@ -168,7 +168,7 @@ class PredibaseChatCompletion(BaseLLM): logging_obj: litellm.utils.Logging, optional_params: dict, api_key: str, - data: dict, + data: Union[dict, str], messages: list, print_verbose, encoding, @@ -185,9 +185,7 @@ class PredibaseChatCompletion(BaseLLM): try: completion_response = response.json() except: - raise PredibaseError( - message=response.text, status_code=response.status_code - ) + raise PredibaseError(message=response.text, status_code=422) if "error" in completion_response: raise PredibaseError( message=str(completion_response["error"]), @@ -363,7 +361,7 @@ class PredibaseChatCompletion(BaseLLM): }, ) ## COMPLETION CALL - if acompletion is True: + if acompletion == True: ### ASYNC STREAMING if stream == True: return self.async_streaming( diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 24a076dd0c..cf593369c4 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -1509,6 +1509,11 @@ def prompt_factory( model="meta-llama/Meta-Llama-3-8B-Instruct", messages=messages, ) + + elif custom_llm_provider == "clarifai": + if "claude" in model: + return anthropic_pt(messages=messages) + elif custom_llm_provider == "perplexity": for message in messages: message.pop("name", None) diff --git a/litellm/llms/triton.py b/litellm/llms/triton.py new file mode 100644 index 0000000000..711186b3fb --- /dev/null +++ b/litellm/llms/triton.py @@ -0,0 +1,119 @@ +import os, types +import json +from enum import Enum +import requests, copy # type: ignore +import time +from typing import Callable, Optional, List +from litellm.utils import ModelResponse, Usage, map_finish_reason, CustomStreamWrapper +import litellm +from .prompt_templates.factory import prompt_factory, custom_prompt +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler +from .base import BaseLLM +import httpx # type: ignore + + +class TritonError(Exception): + def __init__(self, status_code, message): + self.status_code = status_code + self.message = message + self.request = httpx.Request( + method="POST", + url="https://api.anthropic.com/v1/messages", # using anthropic api base since httpx requires a url + ) + self.response = httpx.Response(status_code=status_code, request=self.request) + super().__init__( + self.message + ) # Call the base class constructor with the parameters it needs + + +class TritonChatCompletion(BaseLLM): + def __init__(self) -> None: + super().__init__() + + async def aembedding( + self, + data: dict, + model_response: litellm.utils.EmbeddingResponse, + api_base: str, + logging_obj=None, + api_key: Optional[str] = None, + ): + + async_handler = AsyncHTTPHandler( + timeout=httpx.Timeout(timeout=600.0, connect=5.0) + ) + + response = await async_handler.post(url=api_base, data=json.dumps(data)) + + if response.status_code != 200: + raise TritonError(status_code=response.status_code, message=response.text) + + _text_response = response.text + + logging_obj.post_call(original_response=_text_response) + + _json_response = response.json() + + _outputs = _json_response["outputs"] + _output_data = _outputs[0]["data"] + _embedding_output = { + "object": "embedding", + "index": 0, + "embedding": _output_data, + } + + model_response.model = _json_response.get("model_name", "None") + model_response.data = [_embedding_output] + + return model_response + + def embedding( + self, + model: str, + input: list, + timeout: float, + api_base: str, + model_response: litellm.utils.EmbeddingResponse, + api_key: Optional[str] = None, + logging_obj=None, + optional_params=None, + client=None, + aembedding=None, + ): + data_for_triton = { + "inputs": [ + { + "name": "input_text", + "shape": [1], + "datatype": "BYTES", + "data": input, + } + ] + } + + ## LOGGING + + curl_string = f"curl {api_base} -X POST -H 'Content-Type: application/json' -d '{data_for_triton}'" + + logging_obj.pre_call( + input="", + api_key=None, + additional_args={ + "complete_input_dict": optional_params, + "request_str": curl_string, + }, + ) + + if aembedding == True: + response = self.aembedding( + data=data_for_triton, + model_response=model_response, + logging_obj=logging_obj, + api_base=api_base, + api_key=api_key, + ) + return response + else: + raise Exception( + "Only async embedding supported for triton, please use litellm.aembedding() for now" + ) diff --git a/litellm/llms/vertex_ai.py b/litellm/llms/vertex_ai.py index cab7ae19f2..84fec734fd 100644 --- a/litellm/llms/vertex_ai.py +++ b/litellm/llms/vertex_ai.py @@ -198,6 +198,23 @@ class VertexAIConfig: optional_params[mapped_params[param]] = value return optional_params + def get_eu_regions(self) -> List[str]: + """ + Source: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions + """ + return [ + "europe-central2", + "europe-north1", + "europe-southwest1", + "europe-west1", + "europe-west2", + "europe-west3", + "europe-west4", + "europe-west6", + "europe-west8", + "europe-west9", + ] + import asyncio @@ -419,6 +436,7 @@ def completion( from google.protobuf.struct_pb2 import Value # type: ignore from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types # type: ignore import google.auth # type: ignore + import proto # type: ignore ## Load credentials with the correct quota project ref: https://github.com/googleapis/python-aiplatform/issues/2557#issuecomment-1709284744 print_verbose( @@ -605,9 +623,21 @@ def completion( ): function_call = response.candidates[0].content.parts[0].function_call args_dict = {} - for k, v in function_call.args.items(): - args_dict[k] = v - args_str = json.dumps(args_dict) + + # Check if it's a RepeatedComposite instance + for key, val in function_call.args.items(): + if isinstance( + val, proto.marshal.collections.repeated.RepeatedComposite + ): + # If so, convert to list + args_dict[key] = [v for v in val] + else: + args_dict[key] = val + + try: + args_str = json.dumps(args_dict) + except Exception as e: + raise VertexAIError(status_code=422, message=str(e)) message = litellm.Message( content=None, tool_calls=[ @@ -810,6 +840,8 @@ def completion( setattr(model_response, "usage", usage) return model_response except Exception as e: + if isinstance(e, VertexAIError): + raise e raise VertexAIError(status_code=500, message=str(e)) @@ -835,6 +867,8 @@ async def async_completion( Add support for acompletion calls for gemini-pro """ try: + import proto # type: ignore + if mode == "vision": print_verbose("\nMaking VertexAI Gemini Pro/Vision Call") print_verbose(f"\nProcessing input messages = {messages}") @@ -869,9 +903,21 @@ async def async_completion( ): function_call = response.candidates[0].content.parts[0].function_call args_dict = {} - for k, v in function_call.args.items(): - args_dict[k] = v - args_str = json.dumps(args_dict) + + # Check if it's a RepeatedComposite instance + for key, val in function_call.args.items(): + if isinstance( + val, proto.marshal.collections.repeated.RepeatedComposite + ): + # If so, convert to list + args_dict[key] = [v for v in val] + else: + args_dict[key] = val + + try: + args_str = json.dumps(args_dict) + except Exception as e: + raise VertexAIError(status_code=422, message=str(e)) message = litellm.Message( content=None, tool_calls=[ diff --git a/litellm/llms/watsonx.py b/litellm/llms/watsonx.py index 99f2d18baf..34176a23a4 100644 --- a/litellm/llms/watsonx.py +++ b/litellm/llms/watsonx.py @@ -1,12 +1,26 @@ from enum import Enum import json, types, time # noqa: E401 -from contextlib import contextmanager -from typing import Callable, Dict, Optional, Any, Union, List +from contextlib import asynccontextmanager, contextmanager +from typing import ( + Callable, + Dict, + Generator, + AsyncGenerator, + Iterator, + AsyncIterator, + Optional, + Any, + Union, + List, + ContextManager, + AsyncContextManager, +) import httpx # type: ignore import requests # type: ignore import litellm -from litellm.utils import ModelResponse, get_secret, Usage +from litellm.utils import ModelResponse, Usage, get_secret +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from .base import BaseLLM from .prompt_templates import factory as ptf @@ -149,6 +163,15 @@ class IBMWatsonXAIConfig: optional_params[mapped_params[param]] = value return optional_params + def get_eu_regions(self) -> List[str]: + """ + Source: https://www.ibm.com/docs/en/watsonx/saas?topic=integrations-regional-availability + """ + return [ + "eu-de", + "eu-gb", + ] + def convert_messages_to_prompt(model, messages, provider, custom_prompt_dict): # handle anthropic prompts and amazon titan prompts @@ -188,11 +211,12 @@ class WatsonXAIEndpoint(str, Enum): ) EMBEDDINGS = "/ml/v1/text/embeddings" PROMPTS = "/ml/v1/prompts" + AVAILABLE_MODELS = "/ml/v1/foundation_model_specs" class IBMWatsonXAI(BaseLLM): """ - Class to interface with IBM Watsonx.ai API for text generation and embeddings. + Class to interface with IBM watsonx.ai API for text generation and embeddings. Reference: https://cloud.ibm.com/apidocs/watsonx-ai """ @@ -343,7 +367,7 @@ class IBMWatsonXAI(BaseLLM): ) if token is None and api_key is not None: # generate the auth token - if print_verbose: + if print_verbose is not None: print_verbose("Generating IAM token for Watsonx.ai") token = self.generate_iam_token(api_key) elif token is None and api_key is None: @@ -378,10 +402,11 @@ class IBMWatsonXAI(BaseLLM): print_verbose: Callable, encoding, logging_obj, - optional_params: dict, - litellm_params: Optional[dict] = None, + optional_params=None, + acompletion=None, + litellm_params=None, logger_fn=None, - timeout: Optional[float] = None, + timeout=None, ): """ Send a text generation request to the IBM Watsonx.ai API. @@ -402,12 +427,12 @@ class IBMWatsonXAI(BaseLLM): model, messages, provider, custom_prompt_dict ) - def process_text_request(request_params: dict) -> ModelResponse: - with self._manage_response( - request_params, logging_obj=logging_obj, input=prompt, timeout=timeout - ) as resp: - json_resp = resp.json() - + def process_text_gen_response(json_resp: dict) -> ModelResponse: + if "results" not in json_resp: + raise WatsonXAIError( + status_code=500, + message=f"Error: Invalid response from Watsonx.ai API: {json_resp}", + ) generated_text = json_resp["results"][0]["generated_text"] prompt_tokens = json_resp["results"][0]["input_token_count"] completion_tokens = json_resp["results"][0]["generated_token_count"] @@ -415,36 +440,70 @@ class IBMWatsonXAI(BaseLLM): model_response["finish_reason"] = json_resp["results"][0]["stop_reason"] model_response["created"] = int(time.time()) model_response["model"] = model - setattr( - model_response, - "usage", - Usage( - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=prompt_tokens + completion_tokens, - ), + usage = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, ) + setattr(model_response, "usage", usage) return model_response - def process_stream_request( - request_params: dict, + def process_stream_response( + stream_resp: Union[Iterator[str], AsyncIterator], ) -> litellm.CustomStreamWrapper: + streamwrapper = litellm.CustomStreamWrapper( + stream_resp, + model=model, + custom_llm_provider="watsonx", + logging_obj=logging_obj, + ) + return streamwrapper + + # create the function to manage the request to watsonx.ai + self.request_manager = RequestManager(logging_obj) + + def handle_text_request(request_params: dict) -> ModelResponse: + with self.request_manager.request( + request_params, + input=prompt, + timeout=timeout, + ) as resp: + json_resp = resp.json() + + return process_text_gen_response(json_resp) + + async def handle_text_request_async(request_params: dict) -> ModelResponse: + async with self.request_manager.async_request( + request_params, + input=prompt, + timeout=timeout, + ) as resp: + json_resp = resp.json() + return process_text_gen_response(json_resp) + + def handle_stream_request(request_params: dict) -> litellm.CustomStreamWrapper: # stream the response - generated chunks will be handled # by litellm.utils.CustomStreamWrapper.handle_watsonx_stream - with self._manage_response( + with self.request_manager.request( request_params, - logging_obj=logging_obj, stream=True, input=prompt, timeout=timeout, ) as resp: - response = litellm.CustomStreamWrapper( - resp.iter_lines(), - model=model, - custom_llm_provider="watsonx", - logging_obj=logging_obj, - ) - return response + streamwrapper = process_stream_response(resp.iter_lines()) + return streamwrapper + + async def handle_stream_request_async(request_params: dict) -> litellm.CustomStreamWrapper: + # stream the response - generated chunks will be handled + # by litellm.utils.CustomStreamWrapper.handle_watsonx_stream + async with self.request_manager.async_request( + request_params, + stream=True, + input=prompt, + timeout=timeout, + ) as resp: + streamwrapper = process_stream_response(resp.aiter_lines()) + return streamwrapper try: ## Get the response from the model @@ -455,10 +514,18 @@ class IBMWatsonXAI(BaseLLM): optional_params=optional_params, print_verbose=print_verbose, ) - if stream: - return process_stream_request(req_params) + if stream and (acompletion is True): + # stream and async text generation + return handle_stream_request_async(req_params) + elif stream: + # streaming text generation + return handle_stream_request(req_params) + elif (acompletion is True): + # async text generation + return handle_text_request_async(req_params) else: - return process_text_request(req_params) + # regular text generation + return handle_text_request(req_params) except WatsonXAIError as e: raise e except Exception as e: @@ -473,6 +540,7 @@ class IBMWatsonXAI(BaseLLM): model_response=None, optional_params=None, encoding=None, + aembedding=None, ): """ Send a text embedding request to the IBM Watsonx.ai API. @@ -507,9 +575,6 @@ class IBMWatsonXAI(BaseLLM): } request_params = dict(version=api_params["api_version"]) url = api_params["url"].rstrip("/") + WatsonXAIEndpoint.EMBEDDINGS - # request = httpx.Request( - # "POST", url, headers=headers, json=payload, params=request_params - # ) req_params = { "method": "POST", "url": url, @@ -517,25 +582,49 @@ class IBMWatsonXAI(BaseLLM): "json": payload, "params": request_params, } - with self._manage_response( - req_params, logging_obj=logging_obj, input=input - ) as resp: - json_resp = resp.json() + request_manager = RequestManager(logging_obj) - results = json_resp.get("results", []) - embedding_response = [] - for idx, result in enumerate(results): - embedding_response.append( - {"object": "embedding", "index": idx, "embedding": result["embedding"]} + def process_embedding_response(json_resp: dict) -> ModelResponse: + results = json_resp.get("results", []) + embedding_response = [] + for idx, result in enumerate(results): + embedding_response.append( + { + "object": "embedding", + "index": idx, + "embedding": result["embedding"], + } + ) + model_response["object"] = "list" + model_response["data"] = embedding_response + model_response["model"] = model + input_tokens = json_resp.get("input_token_count", 0) + model_response.usage = Usage( + prompt_tokens=input_tokens, + completion_tokens=0, + total_tokens=input_tokens, ) - model_response["object"] = "list" - model_response["data"] = embedding_response - model_response["model"] = model - input_tokens = json_resp.get("input_token_count", 0) - model_response.usage = Usage( - prompt_tokens=input_tokens, completion_tokens=0, total_tokens=input_tokens - ) - return model_response + return model_response + + def handle_embedding(request_params: dict) -> ModelResponse: + with request_manager.request(request_params, input=input) as resp: + json_resp = resp.json() + return process_embedding_response(json_resp) + + async def handle_aembedding(request_params: dict) -> ModelResponse: + async with request_manager.async_request(request_params, input=input) as resp: + json_resp = resp.json() + return process_embedding_response(json_resp) + + try: + if aembedding is True: + return handle_embedding(req_params) + else: + return handle_aembedding(req_params) + except WatsonXAIError as e: + raise e + except Exception as e: + raise WatsonXAIError(status_code=500, message=str(e)) def generate_iam_token(self, api_key=None, **params): headers = {} @@ -558,52 +647,144 @@ class IBMWatsonXAI(BaseLLM): self.token = iam_access_token return iam_access_token - @contextmanager - def _manage_response( - self, - request_params: dict, - logging_obj: Any, - stream: bool = False, - input: Optional[Any] = None, - timeout: Optional[float] = None, - ): - request_str = ( - f"response = {request_params['method']}(\n" - f"\turl={request_params['url']},\n" - f"\tjson={request_params['json']},\n" - f")" - ) - logging_obj.pre_call( - input=input, - api_key=request_params["headers"].get("Authorization"), - additional_args={ - "complete_input_dict": request_params["json"], - "request_str": request_str, - }, - ) - if timeout: - request_params["timeout"] = timeout - try: - if stream: - resp = requests.request( - **request_params, - stream=True, - ) - resp.raise_for_status() - yield resp - else: - resp = requests.request(**request_params) - resp.raise_for_status() - yield resp - except Exception as e: - raise WatsonXAIError(status_code=500, message=str(e)) - if not stream: - logging_obj.post_call( + def get_available_models(self, *, ids_only: bool = True, **params): + api_params = self._get_api_params(params) + headers = { + "Authorization": f"Bearer {api_params['token']}", + "Content-Type": "application/json", + "Accept": "application/json", + } + request_params = dict(version=api_params["api_version"]) + url = api_params["url"].rstrip("/") + WatsonXAIEndpoint.AVAILABLE_MODELS + req_params = dict(method="GET", url=url, headers=headers, params=request_params) + with RequestManager(logging_obj=None).request(req_params) as resp: + json_resp = resp.json() + if not ids_only: + return json_resp + return [res["model_id"] for res in json_resp["resources"]] + +class RequestManager: + """ + Returns a context manager that manages the response from the request. + if async_ is True, returns an async context manager, otherwise returns a regular context manager. + + Usage: + ```python + request_params = dict(method="POST", url="https://api.example.com", headers={"Authorization" : "Bearer token"}, json={"key": "value"}) + request_manager = RequestManager(logging_obj=logging_obj) + async with request_manager.request(request_params) as resp: + ... + # or + with request_manager.async_request(request_params) as resp: + ... + ``` + """ + + def __init__(self, logging_obj=None): + self.logging_obj = logging_obj + + def pre_call( + self, + request_params: dict, + input: Optional[Any] = None, + ): + if self.logging_obj is None: + return + request_str = ( + f"response = {request_params['method']}(\n" + f"\turl={request_params['url']},\n" + f"\tjson={request_params.get('json')},\n" + f")" + ) + self.logging_obj.pre_call( + input=input, + api_key=request_params["headers"].get("Authorization"), + additional_args={ + "complete_input_dict": request_params.get("json"), + "request_str": request_str, + }, + ) + + def post_call(self, resp, request_params): + if self.logging_obj is None: + return + self.logging_obj.post_call( input=input, api_key=request_params["headers"].get("Authorization"), original_response=json.dumps(resp.json()), additional_args={ "status_code": resp.status_code, - "complete_input_dict": request_params["json"], + "complete_input_dict": request_params.get( + "data", request_params.get("json") + ), }, ) + + @contextmanager + def request( + self, + request_params: dict, + stream: bool = False, + input: Optional[Any] = None, + timeout=None, + ) -> Generator[requests.Response, None, None]: + """ + Returns a context manager that yields the response from the request. + """ + self.pre_call(request_params, input) + if timeout: + request_params["timeout"] = timeout + if stream: + request_params["stream"] = stream + try: + resp = requests.request(**request_params) + if not resp.ok: + raise WatsonXAIError( + status_code=resp.status_code, + message=f"Error {resp.status_code} ({resp.reason}): {resp.text}", + ) + yield resp + except Exception as e: + raise WatsonXAIError(status_code=500, message=str(e)) + if not stream: + self.post_call(resp, request_params) + + @asynccontextmanager + async def async_request( + self, + request_params: dict, + stream: bool = False, + input: Optional[Any] = None, + timeout=None, + ) -> AsyncGenerator[httpx.Response, None]: + self.pre_call(request_params, input) + if timeout: + request_params["timeout"] = timeout + if stream: + request_params["stream"] = stream + try: + # async with AsyncHTTPHandler(timeout=timeout) as client: + self.async_handler = AsyncHTTPHandler( + timeout=httpx.Timeout( + timeout=request_params.pop("timeout", 600.0), connect=5.0 + ), + ) + # async_handler.client.verify = False + if "json" in request_params: + request_params["data"] = json.dumps(request_params.pop("json", {})) + method = request_params.pop("method") + if method.upper() == "POST": + resp = await self.async_handler.post(**request_params) + else: + resp = await self.async_handler.get(**request_params) + if resp.status_code not in [200, 201]: + raise WatsonXAIError( + status_code=resp.status_code, + message=f"Error {resp.status_code} ({resp.reason}): {resp.text}", + ) + yield resp + # await async_handler.close() + except Exception as e: + raise WatsonXAIError(status_code=500, message=str(e)) + if not stream: + self.post_call(resp, request_params) \ No newline at end of file diff --git a/litellm/main.py b/litellm/main.py index 72f5b1dc63..d51c28f244 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -9,12 +9,13 @@ import os, openai, sys, json, inspect, uuid, datetime, threading from typing import Any, Literal, Union, BinaryIO +from typing_extensions import overload from functools import partial import dotenv, traceback, random, asyncio, time, contextvars from copy import deepcopy import httpx -import litellm +import litellm from ._logging import verbose_logger from litellm import ( # type: ignore client, @@ -47,6 +48,7 @@ from .llms import ( ai21, sagemaker, bedrock, + triton, huggingface_restapi, replicate, aleph_alpha, @@ -56,6 +58,7 @@ from .llms import ( ollama, ollama_chat, cloudflare, + clarifai, cohere, cohere_chat, petals, @@ -75,6 +78,8 @@ from .llms.anthropic import AnthropicChatCompletion from .llms.anthropic_text import AnthropicTextCompletion from .llms.huggingface_restapi import Huggingface from .llms.predibase import PredibaseChatCompletion +from .llms.bedrock_httpx import BedrockLLM +from .llms.triton import TritonChatCompletion from .llms.prompt_templates.factory import ( prompt_factory, custom_prompt, @@ -103,7 +108,6 @@ from litellm.utils import ( ) ####### ENVIRONMENT VARIABLES ################### -dotenv.load_dotenv() # Loading env variables using dotenv openai_chat_completions = OpenAIChatCompletion() openai_text_completions = OpenAITextCompletion() anthropic_chat_completions = AnthropicChatCompletion() @@ -112,6 +116,8 @@ azure_chat_completions = AzureChatCompletion() azure_text_completions = AzureTextCompletion() huggingface = Huggingface() predibase_chat_completions = PredibaseChatCompletion() +triton_chat_completions = TritonChatCompletion() +bedrock_chat_completion = BedrockLLM() ####### COMPLETION ENDPOINTS ################ @@ -254,7 +260,7 @@ async def acompletion( - If `stream` is True, the function returns an async generator that yields completion lines. """ loop = asyncio.get_event_loop() - custom_llm_provider = None + custom_llm_provider = kwargs.get("custom_llm_provider", None) # Adjusted to use explicit arguments instead of *args and **kwargs completion_kwargs = { "model": model, @@ -286,9 +292,10 @@ async def acompletion( "model_list": model_list, "acompletion": True, # assuming this is a required parameter } - _, custom_llm_provider, _, _ = get_llm_provider( - model=model, api_base=completion_kwargs.get("base_url", None) - ) + if custom_llm_provider is None: + _, custom_llm_provider, _, _ = get_llm_provider( + model=model, api_base=completion_kwargs.get("base_url", None) + ) try: # Use a partial function to pass your keyword arguments func = partial(completion, **completion_kwargs, **kwargs) @@ -297,9 +304,6 @@ async def acompletion( ctx = contextvars.copy_context() func_with_context = partial(ctx.run, func) - _, custom_llm_provider, _, _ = get_llm_provider( - model=model, api_base=kwargs.get("api_base", None) - ) if ( custom_llm_provider == "openai" or custom_llm_provider == "azure" @@ -321,6 +325,7 @@ async def acompletion( or custom_llm_provider == "sagemaker" or custom_llm_provider == "anthropic" or custom_llm_provider == "predibase" + or (custom_llm_provider == "bedrock" and "cohere" in model) or custom_llm_provider in litellm.openai_compatible_providers ): # currently implemented aiohttp calls for just azure, openai, hf, ollama, vertex ai soon all. init_response = await loop.run_in_executor(None, func_with_context) @@ -661,6 +666,7 @@ def completion( "supports_system_message", "region_name", "allowed_model_region", + "model_config", ] default_params = openai_params + litellm_params @@ -668,20 +674,6 @@ def completion( k: v for k, v in kwargs.items() if k not in default_params } # model-specific params - pass them straight to the model/provider - ### TIMEOUT LOGIC ### - timeout = timeout or kwargs.get("request_timeout", 600) or 600 - # set timeout for 10 minutes by default - - if ( - timeout is not None - and isinstance(timeout, httpx.Timeout) - and supports_httpx_timeout(custom_llm_provider) == False - ): - read_timeout = timeout.read or 600 - timeout = read_timeout # default 10 min timeout - elif timeout is not None and not isinstance(timeout, httpx.Timeout): - timeout = float(timeout) # type: ignore - try: if base_url is not None: api_base = base_url @@ -721,9 +713,18 @@ def completion( "aws_region_name", None ) # support region-based pricing for bedrock + ### TIMEOUT LOGIC ### + timeout = timeout or kwargs.get("request_timeout", 600) or 600 + # set timeout for 10 minutes by default + if isinstance(timeout, httpx.Timeout) and not supports_httpx_timeout( + custom_llm_provider + ): + timeout = timeout.read or 600 # default 10 min timeout + elif not isinstance(timeout, httpx.Timeout): + timeout = float(timeout) # type: ignore + ### REGISTER CUSTOM MODEL PRICING -- IF GIVEN ### if input_cost_per_token is not None and output_cost_per_token is not None: - print_verbose(f"Registering model={model} in model cost map") litellm.register_model( { f"{custom_llm_provider}/{model}": { @@ -845,6 +846,10 @@ def completion( proxy_server_request=proxy_server_request, preset_cache_key=preset_cache_key, no_log=no_log, + input_cost_per_second=input_cost_per_second, + input_cost_per_token=input_cost_per_token, + output_cost_per_second=output_cost_per_second, + output_cost_per_token=output_cost_per_token, ) logging.update_environment_variables( model=model, @@ -1210,6 +1215,61 @@ def completion( ) response = model_response + elif ( + "clarifai" in model + or custom_llm_provider == "clarifai" + or model in litellm.clarifai_models + ): + clarifai_key = None + clarifai_key = ( + api_key + or litellm.clarifai_key + or litellm.api_key + or get_secret("CLARIFAI_API_KEY") + or get_secret("CLARIFAI_API_TOKEN") + ) + + api_base = ( + api_base + or litellm.api_base + or get_secret("CLARIFAI_API_BASE") + or "https://api.clarifai.com/v2" + ) + + custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict + model_response = clarifai.completion( + model=model, + messages=messages, + api_base=api_base, + model_response=model_response, + print_verbose=print_verbose, + optional_params=optional_params, + litellm_params=litellm_params, + acompletion=acompletion, + logger_fn=logger_fn, + encoding=encoding, # for calculating input/output tokens + api_key=clarifai_key, + logging_obj=logging, + custom_prompt_dict=custom_prompt_dict, + ) + + if "stream" in optional_params and optional_params["stream"] == True: + # don't try to access stream object, + ## LOGGING + logging.post_call( + input=messages, + api_key=api_key, + original_response=model_response, + ) + + if optional_params.get("stream", False) or acompletion == True: + ## LOGGING + logging.post_call( + input=messages, + api_key=clarifai_key, + original_response=model_response, + ) + response = model_response elif custom_llm_provider == "anthropic": api_key = ( @@ -1919,41 +1979,59 @@ def completion( elif custom_llm_provider == "bedrock": # boto3 reads keys from .env custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict - response = bedrock.completion( - model=model, - messages=messages, - custom_prompt_dict=litellm.custom_prompt_dict, - model_response=model_response, - print_verbose=print_verbose, - optional_params=optional_params, - litellm_params=litellm_params, - logger_fn=logger_fn, - encoding=encoding, - logging_obj=logging, - extra_headers=extra_headers, - timeout=timeout, - ) - if ( - "stream" in optional_params - and optional_params["stream"] == True - and not isinstance(response, CustomStreamWrapper) - ): - # don't try to access stream object, - if "ai21" in model: - response = CustomStreamWrapper( - response, - model, - custom_llm_provider="bedrock", - logging_obj=logging, - ) - else: - response = CustomStreamWrapper( - iter(response), - model, - custom_llm_provider="bedrock", - logging_obj=logging, - ) + if "cohere" in model: + response = bedrock_chat_completion.completion( + model=model, + messages=messages, + custom_prompt_dict=litellm.custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + optional_params=optional_params, + litellm_params=litellm_params, + logger_fn=logger_fn, + encoding=encoding, + logging_obj=logging, + extra_headers=extra_headers, + timeout=timeout, + acompletion=acompletion, + ) + else: + response = bedrock.completion( + model=model, + messages=messages, + custom_prompt_dict=litellm.custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + optional_params=optional_params, + litellm_params=litellm_params, + logger_fn=logger_fn, + encoding=encoding, + logging_obj=logging, + extra_headers=extra_headers, + timeout=timeout, + ) + + if ( + "stream" in optional_params + and optional_params["stream"] == True + and not isinstance(response, CustomStreamWrapper) + ): + # don't try to access stream object, + if "ai21" in model: + response = CustomStreamWrapper( + response, + model, + custom_llm_provider="bedrock", + logging_obj=logging, + ) + else: + response = CustomStreamWrapper( + iter(response), + model, + custom_llm_provider="bedrock", + logging_obj=logging, + ) if optional_params.get("stream", False): ## LOGGING @@ -2622,6 +2700,7 @@ async def aembedding(*args, **kwargs): or custom_llm_provider == "voyage" or custom_llm_provider == "mistral" or custom_llm_provider == "custom_openai" + or custom_llm_provider == "triton" or custom_llm_provider == "anyscale" or custom_llm_provider == "openrouter" or custom_llm_provider == "deepinfra" @@ -2779,6 +2858,7 @@ def embedding( "no-log", "region_name", "allowed_model_region", + "model_config", ] default_params = openai_params + litellm_params non_default_params = { @@ -2955,6 +3035,23 @@ def embedding( optional_params=optional_params, model_response=EmbeddingResponse(), ) + elif custom_llm_provider == "triton": + if api_base is None: + raise ValueError( + "api_base is required for triton. Please pass `api_base`" + ) + response = triton_chat_completions.embedding( + model=model, + input=input, + api_base=api_base, + api_key=api_key, + logging_obj=logging, + timeout=timeout, + model_response=EmbeddingResponse(), + optional_params=optional_params, + client=client, + aembedding=aembedding, + ) elif custom_llm_provider == "vertex_ai": vertex_ai_project = ( optional_params.pop("vertex_project", None) @@ -3662,6 +3759,7 @@ def image_generation( "cache", "region_name", "allowed_model_region", + "model_config", ] default_params = openai_params + litellm_params non_default_params = { diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 10c70a858d..f78afb08c6 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -9,6 +9,30 @@ "mode": "chat", "supports_function_calling": true }, + "gpt-4o": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000005, + "output_cost_per_token": 0.000015, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "gpt-4o-2024-05-13": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000005, + "output_cost_per_token": 0.000015, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, "gpt-4-turbo-preview": { "max_tokens": 4096, "max_input_tokens": 128000, @@ -1086,6 +1110,36 @@ "supports_tool_choice": true, "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" }, + "gemini-1.5-flash-preview-0514": { + "max_tokens": 8192, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_images_per_prompt": 3000, + "max_videos_per_prompt": 10, + "max_video_length": 1, + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_pdf_size_mb": 30, + "input_cost_per_token": 0, + "output_cost_per_token": 0, + "litellm_provider": "vertex_ai-language-models", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "gemini-1.5-pro-preview-0514": { + "max_tokens": 8192, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "input_cost_per_token": 0.000000625, + "output_cost_per_token": 0.000001875, + "litellm_provider": "vertex_ai-language-models", + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, "gemini-1.5-pro-preview-0215": { "max_tokens": 8192, "max_input_tokens": 1000000, @@ -1331,6 +1385,24 @@ "mode": "completion", "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" }, + "gemini/gemini-1.5-flash-latest": { + "max_tokens": 8192, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_images_per_prompt": 3000, + "max_videos_per_prompt": 10, + "max_video_length": 1, + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_pdf_size_mb": 30, + "input_cost_per_token": 0, + "output_cost_per_token": 0, + "litellm_provider": "vertex_ai-language-models", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, "gemini/gemini-pro": { "max_tokens": 8192, "max_input_tokens": 32760, @@ -1571,6 +1643,159 @@ "litellm_provider": "replicate", "mode": "chat" }, + "openrouter/microsoft/wizardlm-2-8x22b:nitro": { + "max_tokens": 65536, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000001, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/google/gemini-pro-1.5": { + "max_tokens": 8192, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.0000075, + "input_cost_per_image": 0.00265, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true + }, + "openrouter/mistralai/mixtral-8x22b-instruct": { + "max_tokens": 65536, + "input_cost_per_token": 0.00000065, + "output_cost_per_token": 0.00000065, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/cohere/command-r-plus": { + "max_tokens": 128000, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000015, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/databricks/dbrx-instruct": { + "max_tokens": 32768, + "input_cost_per_token": 0.0000006, + "output_cost_per_token": 0.0000006, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/anthropic/claude-3-haiku": { + "max_tokens": 200000, + "input_cost_per_token": 0.00000025, + "output_cost_per_token": 0.00000125, + "input_cost_per_image": 0.0004, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true + }, + "openrouter/anthropic/claude-3-sonnet": { + "max_tokens": 200000, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000015, + "input_cost_per_image": 0.0048, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true + }, + "openrouter/mistralai/mistral-large": { + "max_tokens": 32000, + "input_cost_per_token": 0.000008, + "output_cost_per_token": 0.000024, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/cognitivecomputations/dolphin-mixtral-8x7b": { + "max_tokens": 32769, + "input_cost_per_token": 0.0000005, + "output_cost_per_token": 0.0000005, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/google/gemini-pro-vision": { + "max_tokens": 45875, + "input_cost_per_token": 0.000000125, + "output_cost_per_token": 0.000000375, + "input_cost_per_image": 0.0025, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true + }, + "openrouter/fireworks/firellava-13b": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000002, + "output_cost_per_token": 0.0000002, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/meta-llama/llama-3-8b-instruct:free": { + "max_tokens": 8192, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/meta-llama/llama-3-8b-instruct:extended": { + "max_tokens": 16384, + "input_cost_per_token": 0.000000225, + "output_cost_per_token": 0.00000225, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/meta-llama/llama-3-70b-instruct:nitro": { + "max_tokens": 8192, + "input_cost_per_token": 0.0000009, + "output_cost_per_token": 0.0000009, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/meta-llama/llama-3-70b-instruct": { + "max_tokens": 8192, + "input_cost_per_token": 0.00000059, + "output_cost_per_token": 0.00000079, + "litellm_provider": "openrouter", + "mode": "chat" + }, + "openrouter/openai/gpt-4o": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000005, + "output_cost_per_token": 0.000015, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "openrouter/openai/gpt-4o-2024-05-13": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000005, + "output_cost_per_token": 0.000015, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "openrouter/openai/gpt-4-vision-preview": { + "max_tokens": 130000, + "input_cost_per_token": 0.00001, + "output_cost_per_token": 0.00003, + "input_cost_per_image": 0.01445, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true + }, "openrouter/openai/gpt-3.5-turbo": { "max_tokens": 4095, "input_cost_per_token": 0.0000015, @@ -1621,14 +1846,14 @@ "tool_use_system_prompt_tokens": 395 }, "openrouter/google/palm-2-chat-bison": { - "max_tokens": 8000, + "max_tokens": 25804, "input_cost_per_token": 0.0000005, "output_cost_per_token": 0.0000005, "litellm_provider": "openrouter", "mode": "chat" }, "openrouter/google/palm-2-codechat-bison": { - "max_tokens": 8000, + "max_tokens": 20070, "input_cost_per_token": 0.0000005, "output_cost_per_token": 0.0000005, "litellm_provider": "openrouter", @@ -1711,13 +1936,6 @@ "litellm_provider": "openrouter", "mode": "chat" }, - "openrouter/meta-llama/llama-3-70b-instruct": { - "max_tokens": 8192, - "input_cost_per_token": 0.0000008, - "output_cost_per_token": 0.0000008, - "litellm_provider": "openrouter", - "mode": "chat" - }, "j2-ultra": { "max_tokens": 8192, "max_input_tokens": 8192, @@ -2522,6 +2740,24 @@ "litellm_provider": "bedrock", "mode": "chat" }, + "cohere.command-r-plus-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000030, + "output_cost_per_token": 0.000015, + "litellm_provider": "bedrock", + "mode": "chat" + }, + "cohere.command-r-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000005, + "output_cost_per_token": 0.0000015, + "litellm_provider": "bedrock", + "mode": "chat" + }, "cohere.embed-english-v3": { "max_tokens": 512, "max_input_tokens": 512, @@ -2749,6 +2985,24 @@ "litellm_provider": "ollama", "mode": "completion" }, + "ollama/llama3": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "ollama", + "mode": "chat" + }, + "ollama/llama3:70b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "ollama", + "mode": "chat" + }, "ollama/mistral": { "max_tokens": 8192, "max_input_tokens": 8192, @@ -2758,6 +3012,42 @@ "litellm_provider": "ollama", "mode": "completion" }, + "ollama/mistral-7B-Instruct-v0.1": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "ollama", + "mode": "chat" + }, + "ollama/mistral-7B-Instruct-v0.2": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "ollama", + "mode": "chat" + }, + "ollama/mixtral-8x7B-Instruct-v0.1": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "ollama", + "mode": "chat" + }, + "ollama/mixtral-8x22B-Instruct-v0.1": { + "max_tokens": 65536, + "max_input_tokens": 65536, + "max_output_tokens": 65536, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "ollama", + "mode": "chat" + }, "ollama/codellama": { "max_tokens": 4096, "max_input_tokens": 4096, diff --git a/litellm/proxy/_experimental/out/404.html b/litellm/proxy/_experimental/out/404.html index 448d7cf877..b0b75d094b 100644 --- a/litellm/proxy/_experimental/out/404.html +++ b/litellm/proxy/_experimental/out/404.html @@ -1 +1 @@ -404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

\ No newline at end of file +404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

\ No newline at end of file diff --git a/litellm/proxy/_experimental/out/_next/static/K8KXTbmuI2ArWjjdMi2iq/_buildManifest.js b/litellm/proxy/_experimental/out/_next/static/2ASoJGxS-D4w-vat00xMy/_buildManifest.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/K8KXTbmuI2ArWjjdMi2iq/_buildManifest.js rename to litellm/proxy/_experimental/out/_next/static/2ASoJGxS-D4w-vat00xMy/_buildManifest.js diff --git a/litellm/proxy/_experimental/out/_next/static/K8KXTbmuI2ArWjjdMi2iq/_ssgManifest.js b/litellm/proxy/_experimental/out/_next/static/2ASoJGxS-D4w-vat00xMy/_ssgManifest.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/K8KXTbmuI2ArWjjdMi2iq/_ssgManifest.js rename to litellm/proxy/_experimental/out/_next/static/2ASoJGxS-D4w-vat00xMy/_ssgManifest.js diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/2f6dbc85-17d29013b8ff3da5.js b/litellm/proxy/_experimental/out/_next/static/chunks/2f6dbc85-052c4579f80d66ae.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/chunks/2f6dbc85-17d29013b8ff3da5.js rename to litellm/proxy/_experimental/out/_next/static/chunks/2f6dbc85-052c4579f80d66ae.js diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/69-e49705773ae41779.js b/litellm/proxy/_experimental/out/_next/static/chunks/69-04708d7d4a17c1ee.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/chunks/69-e49705773ae41779.js rename to litellm/proxy/_experimental/out/_next/static/chunks/69-04708d7d4a17c1ee.js diff --git a/ui/litellm-dashboard/out/_next/static/chunks/566-ccd699ab19124658.js b/litellm/proxy/_experimental/out/_next/static/chunks/884-7576ee407a2ecbe6.js similarity index 64% rename from ui/litellm-dashboard/out/_next/static/chunks/566-ccd699ab19124658.js rename to litellm/proxy/_experimental/out/_next/static/chunks/884-7576ee407a2ecbe6.js index 3b819d4158..f0f47e76ab 100644 --- a/ui/litellm-dashboard/out/_next/static/chunks/566-ccd699ab19124658.js +++ b/litellm/proxy/_experimental/out/_next/static/chunks/884-7576ee407a2ecbe6.js @@ -1,4 +1,4 @@ -"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[566],{12215:function(e,t,n){n.d(t,{iN:function(){return h},R_:function(){return d},EV:function(){return g},ez:function(){return p}});var r=n(41785),o=n(76991),a=[{index:7,opacity:.15},{index:6,opacity:.25},{index:5,opacity:.3},{index:5,opacity:.45},{index:5,opacity:.65},{index:5,opacity:.85},{index:4,opacity:.9},{index:3,opacity:.95},{index:2,opacity:.97},{index:1,opacity:.98}];function i(e){var t=e.r,n=e.g,o=e.b,a=(0,r.py)(t,n,o);return{h:360*a.h,s:a.s,v:a.v}}function l(e){var t=e.r,n=e.g,o=e.b;return"#".concat((0,r.vq)(t,n,o,!1))}function s(e,t,n){var r;return(r=Math.round(e.h)>=60&&240>=Math.round(e.h)?n?Math.round(e.h)-2*t:Math.round(e.h)+2*t:n?Math.round(e.h)+2*t:Math.round(e.h)-2*t)<0?r+=360:r>=360&&(r-=360),r}function c(e,t,n){var r;return 0===e.h&&0===e.s?e.s:((r=n?e.s-.16*t:4===t?e.s+.16:e.s+.05*t)>1&&(r=1),n&&5===t&&r>.1&&(r=.1),r<.06&&(r=.06),Number(r.toFixed(2)))}function u(e,t,n){var r;return(r=n?e.v+.05*t:e.v-.15*t)>1&&(r=1),Number(r.toFixed(2))}function d(e){for(var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=[],r=(0,o.uA)(e),d=5;d>0;d-=1){var p=i(r),f=l((0,o.uA)({h:s(p,d,!0),s:c(p,d,!0),v:u(p,d,!0)}));n.push(f)}n.push(l(r));for(var m=1;m<=4;m+=1){var g=i(r),h=l((0,o.uA)({h:s(g,m),s:c(g,m),v:u(g,m)}));n.push(h)}return"dark"===t.theme?a.map(function(e){var r,a,i,s=e.index,c=e.opacity;return l((r=(0,o.uA)(t.backgroundColor||"#141414"),a=(0,o.uA)(n[s]),i=100*c/100,{r:(a.r-r.r)*i+r.r,g:(a.g-r.g)*i+r.g,b:(a.b-r.b)*i+r.b}))}):n}var p={red:"#F5222D",volcano:"#FA541C",orange:"#FA8C16",gold:"#FAAD14",yellow:"#FADB14",lime:"#A0D911",green:"#52C41A",cyan:"#13C2C2",blue:"#1677FF",geekblue:"#2F54EB",purple:"#722ED1",magenta:"#EB2F96",grey:"#666666"},f={},m={};Object.keys(p).forEach(function(e){f[e]=d(p[e]),f[e].primary=f[e][5],m[e]=d(p[e],{theme:"dark",backgroundColor:"#141414"}),m[e].primary=m[e][5]}),f.red,f.volcano;var g=f.gold;f.orange,f.yellow,f.lime,f.green,f.cyan;var h=f.blue;f.geekblue,f.purple,f.magenta,f.grey,f.grey},8985:function(e,t,n){n.d(t,{E4:function(){return ej},jG:function(){return A},ks:function(){return Z},bf:function(){return F},CI:function(){return eD},fp:function(){return X},xy:function(){return eM}});var r,o,a=n(50833),i=n(80406),l=n(63787),s=n(5239),c=function(e){for(var t,n=0,r=0,o=e.length;o>=4;++r,o-=4)t=(65535&(t=255&e.charCodeAt(r)|(255&e.charCodeAt(++r))<<8|(255&e.charCodeAt(++r))<<16|(255&e.charCodeAt(++r))<<24))*1540483477+((t>>>16)*59797<<16),t^=t>>>24,n=(65535&t)*1540483477+((t>>>16)*59797<<16)^(65535&n)*1540483477+((n>>>16)*59797<<16);switch(o){case 3:n^=(255&e.charCodeAt(r+2))<<16;case 2:n^=(255&e.charCodeAt(r+1))<<8;case 1:n^=255&e.charCodeAt(r),n=(65535&n)*1540483477+((n>>>16)*59797<<16)}return n^=n>>>13,(((n=(65535&n)*1540483477+((n>>>16)*59797<<16))^n>>>15)>>>0).toString(36)},u=n(24050),d=n(64090),p=n.t(d,2);n(61475),n(92536);var f=n(47365),m=n(65127);function g(e){return e.join("%")}var h=function(){function e(t){(0,f.Z)(this,e),(0,a.Z)(this,"instanceId",void 0),(0,a.Z)(this,"cache",new Map),this.instanceId=t}return(0,m.Z)(e,[{key:"get",value:function(e){return this.opGet(g(e))}},{key:"opGet",value:function(e){return this.cache.get(e)||null}},{key:"update",value:function(e,t){return this.opUpdate(g(e),t)}},{key:"opUpdate",value:function(e,t){var n=t(this.cache.get(e));null===n?this.cache.delete(e):this.cache.set(e,n)}}]),e}(),b="data-token-hash",v="data-css-hash",y="__cssinjs_instance__",E=d.createContext({hashPriority:"low",cache:function(){var e=Math.random().toString(12).slice(2);if("undefined"!=typeof document&&document.head&&document.body){var t=document.body.querySelectorAll("style[".concat(v,"]"))||[],n=document.head.firstChild;Array.from(t).forEach(function(t){t[y]=t[y]||e,t[y]===e&&document.head.insertBefore(t,n)});var r={};Array.from(document.querySelectorAll("style[".concat(v,"]"))).forEach(function(t){var n,o=t.getAttribute(v);r[o]?t[y]===e&&(null===(n=t.parentNode)||void 0===n||n.removeChild(t)):r[o]=!0})}return new h(e)}(),defaultCache:!0}),w=n(6976),S=n(22127),x=function(){function e(){(0,f.Z)(this,e),(0,a.Z)(this,"cache",void 0),(0,a.Z)(this,"keys",void 0),(0,a.Z)(this,"cacheCallTimes",void 0),this.cache=new Map,this.keys=[],this.cacheCallTimes=0}return(0,m.Z)(e,[{key:"size",value:function(){return this.keys.length}},{key:"internalGet",value:function(e){var t,n,r=arguments.length>1&&void 0!==arguments[1]&&arguments[1],o={map:this.cache};return e.forEach(function(e){if(o){var t;o=null===(t=o)||void 0===t||null===(t=t.map)||void 0===t?void 0:t.get(e)}else o=void 0}),null!==(t=o)&&void 0!==t&&t.value&&r&&(o.value[1]=this.cacheCallTimes++),null===(n=o)||void 0===n?void 0:n.value}},{key:"get",value:function(e){var t;return null===(t=this.internalGet(e,!0))||void 0===t?void 0:t[0]}},{key:"has",value:function(e){return!!this.internalGet(e)}},{key:"set",value:function(t,n){var r=this;if(!this.has(t)){if(this.size()+1>e.MAX_CACHE_SIZE+e.MAX_CACHE_OFFSET){var o=this.keys.reduce(function(e,t){var n=(0,i.Z)(e,2)[1];return r.internalGet(t)[1]0,"[Ant Design CSS-in-JS] Theme should have at least one derivative function."),k+=1}return(0,m.Z)(e,[{key:"getDerivativeToken",value:function(e){return this.derivatives.reduce(function(t,n){return n(e,t)},void 0)}}]),e}(),T=new x;function A(e){var t=Array.isArray(e)?e:[e];return T.has(t)||T.set(t,new C(t)),T.get(t)}var N=new WeakMap,I={},R=new WeakMap;function _(e){var t=R.get(e)||"";return t||(Object.keys(e).forEach(function(n){var r=e[n];t+=n,r instanceof C?t+=r.id:r&&"object"===(0,w.Z)(r)?t+=_(r):t+=r}),R.set(e,t)),t}function P(e,t){return c("".concat(t,"_").concat(_(e)))}var M="random-".concat(Date.now(),"-").concat(Math.random()).replace(/\./g,""),L="_bAmBoO_",D=void 0,j=(0,S.Z)();function F(e){return"number"==typeof e?"".concat(e,"px"):e}function B(e,t,n){var r,o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},i=arguments.length>4&&void 0!==arguments[4]&&arguments[4];if(i)return e;var l=(0,s.Z)((0,s.Z)({},o),{},(r={},(0,a.Z)(r,b,t),(0,a.Z)(r,v,n),r)),c=Object.keys(l).map(function(e){var t=l[e];return t?"".concat(e,'="').concat(t,'"'):null}).filter(function(e){return e}).join(" ");return"")}var Z=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return"--".concat(t?"".concat(t,"-"):"").concat(e).replace(/([a-z0-9])([A-Z])/g,"$1-$2").replace(/([A-Z]+)([A-Z][a-z0-9]+)/g,"$1-$2").replace(/([a-z])([A-Z0-9])/g,"$1-$2").toLowerCase()},U=function(e,t,n){var r,o={},a={};return Object.entries(e).forEach(function(e){var t=(0,i.Z)(e,2),r=t[0],l=t[1];if(null!=n&&null!==(s=n.preserve)&&void 0!==s&&s[r])a[r]=l;else if(("string"==typeof l||"number"==typeof l)&&!(null!=n&&null!==(c=n.ignore)&&void 0!==c&&c[r])){var s,c,u,d=Z(r,null==n?void 0:n.prefix);o[d]="number"!=typeof l||null!=n&&null!==(u=n.unitless)&&void 0!==u&&u[r]?String(l):"".concat(l,"px"),a[r]="var(".concat(d,")")}}),[a,(r={scope:null==n?void 0:n.scope},Object.keys(o).length?".".concat(t).concat(null!=r&&r.scope?".".concat(r.scope):"","{").concat(Object.entries(o).map(function(e){var t=(0,i.Z)(e,2),n=t[0],r=t[1];return"".concat(n,":").concat(r,";")}).join(""),"}"):"")]},z=n(24800),H=(0,s.Z)({},p).useInsertionEffect,G=H?function(e,t,n){return H(function(){return e(),t()},n)}:function(e,t,n){d.useMemo(e,n),(0,z.Z)(function(){return t(!0)},n)},W=void 0!==(0,s.Z)({},p).useInsertionEffect?function(e){var t=[],n=!1;return d.useEffect(function(){return n=!1,function(){n=!0,t.length&&t.forEach(function(e){return e()})}},e),function(e){n||t.push(e)}}:function(){return function(e){e()}};function $(e,t,n,r,o){var a=d.useContext(E).cache,s=g([e].concat((0,l.Z)(t))),c=W([s]),u=function(e){a.opUpdate(s,function(t){var r=(0,i.Z)(t||[void 0,void 0],2),o=r[0],a=[void 0===o?0:o,r[1]||n()];return e?e(a):a})};d.useMemo(function(){u()},[s]);var p=a.opGet(s)[1];return G(function(){null==o||o(p)},function(e){return u(function(t){var n=(0,i.Z)(t,2),r=n[0],a=n[1];return e&&0===r&&(null==o||o(p)),[r+1,a]}),function(){a.opUpdate(s,function(t){var n=(0,i.Z)(t||[],2),o=n[0],l=void 0===o?0:o,u=n[1];return 0==l-1?(c(function(){(e||!a.opGet(s))&&(null==r||r(u,!1))}),null):[l-1,u]})}},[s]),p}var V={},q=new Map,Y=function(e,t,n,r){var o=n.getDerivativeToken(e),a=(0,s.Z)((0,s.Z)({},o),t);return r&&(a=r(a)),a},K="token";function X(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=(0,d.useContext)(E),o=r.cache.instanceId,a=r.container,p=n.salt,f=void 0===p?"":p,m=n.override,g=void 0===m?V:m,h=n.formatToken,w=n.getComputedToken,S=n.cssVar,x=function(e,t){for(var n=N,r=0;r=(q.get(e)||0)}),n.length-r.length>0&&r.forEach(function(e){"undefined"!=typeof document&&document.querySelectorAll("style[".concat(b,'="').concat(e,'"]')).forEach(function(e){if(e[y]===o){var t;null===(t=e.parentNode)||void 0===t||t.removeChild(e)}}),q.delete(e)})},function(e){var t=(0,i.Z)(e,4),n=t[0],r=t[3];if(S&&r){var l=(0,u.hq)(r,c("css-variables-".concat(n._themeKey)),{mark:v,prepend:"queue",attachTo:a,priority:-999});l[y]=o,l.setAttribute(b,n._themeKey)}})}var Q=n(14749),J={animationIterationCount:1,borderImageOutset:1,borderImageSlice:1,borderImageWidth:1,boxFlex:1,boxFlexGroup:1,boxOrdinalGroup:1,columnCount:1,columns:1,flex:1,flexGrow:1,flexPositive:1,flexShrink:1,flexNegative:1,flexOrder:1,gridRow:1,gridRowEnd:1,gridRowSpan:1,gridRowStart:1,gridColumn:1,gridColumnEnd:1,gridColumnSpan:1,gridColumnStart:1,msGridRow:1,msGridRowSpan:1,msGridColumn:1,msGridColumnSpan:1,fontWeight:1,lineHeight:1,opacity:1,order:1,orphans:1,tabSize:1,widows:1,zIndex:1,zoom:1,WebkitLineClamp:1,fillOpacity:1,floodOpacity:1,stopOpacity:1,strokeDasharray:1,strokeDashoffset:1,strokeMiterlimit:1,strokeOpacity:1,strokeWidth:1},ee="comm",et="rule",en="decl",er=Math.abs,eo=String.fromCharCode;function ea(e,t,n){return e.replace(t,n)}function ei(e,t){return 0|e.charCodeAt(t)}function el(e,t,n){return e.slice(t,n)}function es(e){return e.length}function ec(e,t){return t.push(e),e}function eu(e,t){for(var n="",r=0;r0?f[v]+" "+y:ea(y,/&\f/g,f[v])).trim())&&(s[b++]=E);return ev(e,t,n,0===o?et:l,s,c,u,d)}function eO(e,t,n,r,o){return ev(e,t,n,en,el(e,0,r),el(e,r+1,-1),r,o)}var ek="data-ant-cssinjs-cache-path",eC="_FILE_STYLE__",eT=!0,eA="_multi_value_";function eN(e){var t,n,r;return eu((r=function e(t,n,r,o,a,i,l,s,c){for(var u,d,p,f=0,m=0,g=l,h=0,b=0,v=0,y=1,E=1,w=1,S=0,x="",O=a,k=i,C=o,T=x;E;)switch(v=S,S=ey()){case 40:if(108!=v&&58==ei(T,g-1)){-1!=(d=T+=ea(eS(S),"&","&\f"),p=er(f?s[f-1]:0),d.indexOf("&\f",p))&&(w=-1);break}case 34:case 39:case 91:T+=eS(S);break;case 9:case 10:case 13:case 32:T+=function(e){for(;eh=eE();)if(eh<33)ey();else break;return ew(e)>2||ew(eh)>3?"":" "}(v);break;case 92:T+=function(e,t){for(var n;--t&&ey()&&!(eh<48)&&!(eh>102)&&(!(eh>57)||!(eh<65))&&(!(eh>70)||!(eh<97)););return n=eg+(t<6&&32==eE()&&32==ey()),el(eb,e,n)}(eg-1,7);continue;case 47:switch(eE()){case 42:case 47:ec(ev(u=function(e,t){for(;ey();)if(e+eh===57)break;else if(e+eh===84&&47===eE())break;return"/*"+el(eb,t,eg-1)+"*"+eo(47===e?e:ey())}(ey(),eg),n,r,ee,eo(eh),el(u,2,-2),0,c),c);break;default:T+="/"}break;case 123*y:s[f++]=es(T)*w;case 125*y:case 59:case 0:switch(S){case 0:case 125:E=0;case 59+m:-1==w&&(T=ea(T,/\f/g,"")),b>0&&es(T)-g&&ec(b>32?eO(T+";",o,r,g-1,c):eO(ea(T," ","")+";",o,r,g-2,c),c);break;case 59:T+=";";default:if(ec(C=ex(T,n,r,f,m,a,s,x,O=[],k=[],g,i),i),123===S){if(0===m)e(T,n,C,C,O,i,g,s,k);else switch(99===h&&110===ei(T,3)?100:h){case 100:case 108:case 109:case 115:e(t,C,C,o&&ec(ex(t,C,C,0,0,a,s,x,a,O=[],g,k),k),a,k,g,s,o?O:k);break;default:e(T,C,C,C,[""],k,0,s,k)}}}f=m=b=0,y=w=1,x=T="",g=l;break;case 58:g=1+es(T),b=v;default:if(y<1){if(123==S)--y;else if(125==S&&0==y++&&125==(eh=eg>0?ei(eb,--eg):0,ef--,10===eh&&(ef=1,ep--),eh))continue}switch(T+=eo(S),S*y){case 38:w=m>0?1:(T+="\f",-1);break;case 44:s[f++]=(es(T)-1)*w,w=1;break;case 64:45===eE()&&(T+=eS(ey())),h=eE(),m=g=es(x=T+=function(e){for(;!ew(eE());)ey();return el(eb,e,eg)}(eg)),S++;break;case 45:45===v&&2==es(T)&&(y=0)}}return i}("",null,null,null,[""],(n=t=e,ep=ef=1,em=es(eb=n),eg=0,t=[]),0,[0],t),eb="",r),ed).replace(/\{%%%\:[^;];}/g,";")}var eI=function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{root:!0,parentSelectors:[]},o=r.root,a=r.injectHash,c=r.parentSelectors,d=n.hashId,p=n.layer,f=(n.path,n.hashPriority),m=n.transformers,g=void 0===m?[]:m;n.linters;var h="",b={};function v(t){var r=t.getName(d);if(!b[r]){var o=e(t.style,n,{root:!1,parentSelectors:c}),a=(0,i.Z)(o,1)[0];b[r]="@keyframes ".concat(t.getName(d)).concat(a)}}if((function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return t.forEach(function(t){Array.isArray(t)?e(t,n):t&&n.push(t)}),n})(Array.isArray(t)?t:[t]).forEach(function(t){var r="string"!=typeof t||o?t:{};if("string"==typeof r)h+="".concat(r,"\n");else if(r._keyframe)v(r);else{var u=g.reduce(function(e,t){var n;return(null==t||null===(n=t.visit)||void 0===n?void 0:n.call(t,e))||e},r);Object.keys(u).forEach(function(t){var r=u[t];if("object"!==(0,w.Z)(r)||!r||"animationName"===t&&r._keyframe||"object"===(0,w.Z)(r)&&r&&("_skip_check_"in r||eA in r)){function p(e,t){var n=e.replace(/[A-Z]/g,function(e){return"-".concat(e.toLowerCase())}),r=t;J[e]||"number"!=typeof r||0===r||(r="".concat(r,"px")),"animationName"===e&&null!=t&&t._keyframe&&(v(t),r=t.getName(d)),h+="".concat(n,":").concat(r,";")}var m,g=null!==(m=null==r?void 0:r.value)&&void 0!==m?m:r;"object"===(0,w.Z)(r)&&null!=r&&r[eA]&&Array.isArray(g)?g.forEach(function(e){p(t,e)}):p(t,g)}else{var y=!1,E=t.trim(),S=!1;(o||a)&&d?E.startsWith("@")?y=!0:E=function(e,t,n){if(!t)return e;var r=".".concat(t),o="low"===n?":where(".concat(r,")"):r;return e.split(",").map(function(e){var t,n=e.trim().split(/\s+/),r=n[0]||"",a=(null===(t=r.match(/^\w+/))||void 0===t?void 0:t[0])||"";return[r="".concat(a).concat(o).concat(r.slice(a.length))].concat((0,l.Z)(n.slice(1))).join(" ")}).join(",")}(t,d,f):o&&!d&&("&"===E||""===E)&&(E="",S=!0);var x=e(r,n,{root:S,injectHash:y,parentSelectors:[].concat((0,l.Z)(c),[E])}),O=(0,i.Z)(x,2),k=O[0],C=O[1];b=(0,s.Z)((0,s.Z)({},b),C),h+="".concat(E).concat(k)}})}}),o){if(p&&(void 0===D&&(D=function(e,t,n){if((0,S.Z)()){(0,u.hq)(e,M);var r,o,a=document.createElement("div");a.style.position="fixed",a.style.left="0",a.style.top="0",null==t||t(a),document.body.appendChild(a);var i=n?n(a):null===(r=getComputedStyle(a).content)||void 0===r?void 0:r.includes(L);return null===(o=a.parentNode)||void 0===o||o.removeChild(a),(0,u.jL)(M),i}return!1}("@layer ".concat(M," { .").concat(M,' { content: "').concat(L,'"!important; } }'),function(e){e.className=M})),D)){var y=p.split(","),E=y[y.length-1].trim();h="@layer ".concat(E," {").concat(h,"}"),y.length>1&&(h="@layer ".concat(p,"{%%%:%}").concat(h))}}else h="{".concat(h,"}");return[h,b]};function eR(e,t){return c("".concat(e.join("%")).concat(t))}function e_(){return null}var eP="style";function eM(e,t){var n=e.token,o=e.path,s=e.hashId,c=e.layer,p=e.nonce,f=e.clientOnly,m=e.order,g=void 0===m?0:m,h=d.useContext(E),w=h.autoClear,x=(h.mock,h.defaultCache),O=h.hashPriority,k=h.container,C=h.ssrInline,T=h.transformers,A=h.linters,N=h.cache,I=n._tokenKey,R=[I].concat((0,l.Z)(o)),_=$(eP,R,function(){var e=R.join("|");if(!function(){if(!r&&(r={},(0,S.Z)())){var e,t=document.createElement("div");t.className=ek,t.style.position="fixed",t.style.visibility="hidden",t.style.top="-9999px",document.body.appendChild(t);var n=getComputedStyle(t).content||"";(n=n.replace(/^"/,"").replace(/"$/,"")).split(";").forEach(function(e){var t=e.split(":"),n=(0,i.Z)(t,2),o=n[0],a=n[1];r[o]=a});var o=document.querySelector("style[".concat(ek,"]"));o&&(eT=!1,null===(e=o.parentNode)||void 0===e||e.removeChild(o)),document.body.removeChild(t)}}(),r[e]){var n=function(e){var t=r[e],n=null;if(t&&(0,S.Z)()){if(eT)n=eC;else{var o=document.querySelector("style[".concat(v,'="').concat(r[e],'"]'));o?n=o.innerHTML:delete r[e]}}return[n,t]}(e),a=(0,i.Z)(n,2),l=a[0],u=a[1];if(l)return[l,I,u,{},f,g]}var d=eI(t(),{hashId:s,hashPriority:O,layer:c,path:o.join("-"),transformers:T,linters:A}),p=(0,i.Z)(d,2),m=p[0],h=p[1],b=eN(m),y=eR(R,b);return[b,I,y,h,f,g]},function(e,t){var n=(0,i.Z)(e,3)[2];(t||w)&&j&&(0,u.jL)(n,{mark:v})},function(e){var t=(0,i.Z)(e,4),n=t[0],r=(t[1],t[2]),o=t[3];if(j&&n!==eC){var a={mark:v,prepend:"queue",attachTo:k,priority:g},l="function"==typeof p?p():p;l&&(a.csp={nonce:l});var s=(0,u.hq)(n,r,a);s[y]=N.instanceId,s.setAttribute(b,I),Object.keys(o).forEach(function(e){(0,u.hq)(eN(o[e]),"_effect-".concat(e),a)})}}),P=(0,i.Z)(_,3),M=P[0],L=P[1],D=P[2];return function(e){var t,n;return t=C&&!j&&x?d.createElement("style",(0,Q.Z)({},(n={},(0,a.Z)(n,b,L),(0,a.Z)(n,v,D),n),{dangerouslySetInnerHTML:{__html:M}})):d.createElement(e_,null),d.createElement(d.Fragment,null,t,e)}}var eL="cssVar",eD=function(e,t){var n=e.key,r=e.prefix,o=e.unitless,a=e.ignore,s=e.token,c=e.scope,p=void 0===c?"":c,f=(0,d.useContext)(E),m=f.cache.instanceId,g=f.container,h=s._tokenKey,w=[].concat((0,l.Z)(e.path),[n,p,h]);return $(eL,w,function(){var e=U(t(),n,{prefix:r,unitless:o,ignore:a,scope:p}),l=(0,i.Z)(e,2),s=l[0],c=l[1],u=eR(w,c);return[s,c,u,n]},function(e){var t=(0,i.Z)(e,3)[2];j&&(0,u.jL)(t,{mark:v})},function(e){var t=(0,i.Z)(e,3),r=t[1],o=t[2];if(r){var a=(0,u.hq)(r,o,{mark:v,prepend:"queue",attachTo:g,priority:-999});a[y]=m,a.setAttribute(b,n)}})};o={},(0,a.Z)(o,eP,function(e,t,n){var r=(0,i.Z)(e,6),o=r[0],a=r[1],l=r[2],s=r[3],c=r[4],u=r[5],d=(n||{}).plain;if(c)return null;var p=o,f={"data-rc-order":"prependQueue","data-rc-priority":"".concat(u)};return p=B(o,a,l,f,d),s&&Object.keys(s).forEach(function(e){if(!t[e]){t[e]=!0;var n=eN(s[e]);p+=B(n,a,"_effect-".concat(e),f,d)}}),[u,l,p]}),(0,a.Z)(o,K,function(e,t,n){var r=(0,i.Z)(e,5),o=r[2],a=r[3],l=r[4],s=(n||{}).plain;if(!a)return null;var c=o._tokenKey,u=B(a,l,c,{"data-rc-order":"prependQueue","data-rc-priority":"".concat(-999)},s);return[-999,c,u]}),(0,a.Z)(o,eL,function(e,t,n){var r=(0,i.Z)(e,4),o=r[1],a=r[2],l=r[3],s=(n||{}).plain;if(!o)return null;var c=B(o,l,a,{"data-rc-order":"prependQueue","data-rc-priority":"".concat(-999)},s);return[-999,a,c]});var ej=function(){function e(t,n){(0,f.Z)(this,e),(0,a.Z)(this,"name",void 0),(0,a.Z)(this,"style",void 0),(0,a.Z)(this,"_keyframe",!0),this.name=t,this.style=n}return(0,m.Z)(e,[{key:"getName",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return e?"".concat(e,"-").concat(this.name):this.name}}]),e}();function eF(e){return e.notSplit=!0,e}eF(["borderTop","borderBottom"]),eF(["borderTop"]),eF(["borderBottom"]),eF(["borderLeft","borderRight"]),eF(["borderLeft"]),eF(["borderRight"])},60688:function(e,t,n){n.d(t,{Z:function(){return A}});var r=n(14749),o=n(80406),a=n(50833),i=n(60635),l=n(64090),s=n(16480),c=n.n(s),u=n(12215),d=n(67689),p=n(5239),f=n(6976),m=n(24050),g=n(74687),h=n(53850);function b(e){return"object"===(0,f.Z)(e)&&"string"==typeof e.name&&"string"==typeof e.theme&&("object"===(0,f.Z)(e.icon)||"function"==typeof e.icon)}function v(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return Object.keys(e).reduce(function(t,n){var r=e[n];return"class"===n?(t.className=r,delete t.class):(delete t[n],t[n.replace(/-(.)/g,function(e,t){return t.toUpperCase()})]=r),t},{})}function y(e){return(0,u.R_)(e)[0]}function E(e){return e?Array.isArray(e)?e:[e]:[]}var w=function(e){var t=(0,l.useContext)(d.Z),n=t.csp,r=t.prefixCls,o="\n.anticon {\n display: inline-block;\n color: inherit;\n font-style: normal;\n line-height: 0;\n text-align: center;\n text-transform: none;\n vertical-align: -0.125em;\n text-rendering: optimizeLegibility;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n\n.anticon > * {\n line-height: 1;\n}\n\n.anticon svg {\n display: inline-block;\n}\n\n.anticon::before {\n display: none;\n}\n\n.anticon .anticon-icon {\n display: block;\n}\n\n.anticon[tabindex] {\n cursor: pointer;\n}\n\n.anticon-spin::before,\n.anticon-spin {\n display: inline-block;\n -webkit-animation: loadingCircle 1s infinite linear;\n animation: loadingCircle 1s infinite linear;\n}\n\n@-webkit-keyframes loadingCircle {\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n\n@keyframes loadingCircle {\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n";r&&(o=o.replace(/anticon/g,r)),(0,l.useEffect)(function(){var t=e.current,r=(0,g.A)(t);(0,m.hq)(o,"@ant-design-icons",{prepend:!0,csp:n,attachTo:r})},[])},S=["icon","className","onClick","style","primaryColor","secondaryColor"],x={primaryColor:"#333",secondaryColor:"#E6E6E6",calculated:!1},O=function(e){var t,n,r=e.icon,o=e.className,a=e.onClick,s=e.style,c=e.primaryColor,u=e.secondaryColor,d=(0,i.Z)(e,S),f=l.useRef(),m=x;if(c&&(m={primaryColor:c,secondaryColor:u||y(c)}),w(f),t=b(r),n="icon should be icon definiton, but got ".concat(r),(0,h.ZP)(t,"[@ant-design/icons] ".concat(n)),!b(r))return null;var g=r;return g&&"function"==typeof g.icon&&(g=(0,p.Z)((0,p.Z)({},g),{},{icon:g.icon(m.primaryColor,m.secondaryColor)})),function e(t,n,r){return r?l.createElement(t.tag,(0,p.Z)((0,p.Z)({key:n},v(t.attrs)),r),(t.children||[]).map(function(r,o){return e(r,"".concat(n,"-").concat(t.tag,"-").concat(o))})):l.createElement(t.tag,(0,p.Z)({key:n},v(t.attrs)),(t.children||[]).map(function(r,o){return e(r,"".concat(n,"-").concat(t.tag,"-").concat(o))}))}(g.icon,"svg-".concat(g.name),(0,p.Z)((0,p.Z)({className:o,onClick:a,style:s,"data-icon":g.name,width:"1em",height:"1em",fill:"currentColor","aria-hidden":"true"},d),{},{ref:f}))};function k(e){var t=E(e),n=(0,o.Z)(t,2),r=n[0],a=n[1];return O.setTwoToneColors({primaryColor:r,secondaryColor:a})}O.displayName="IconReact",O.getTwoToneColors=function(){return(0,p.Z)({},x)},O.setTwoToneColors=function(e){var t=e.primaryColor,n=e.secondaryColor;x.primaryColor=t,x.secondaryColor=n||y(t),x.calculated=!!n};var C=["className","icon","spin","rotate","tabIndex","onClick","twoToneColor"];k(u.iN.primary);var T=l.forwardRef(function(e,t){var n,s=e.className,u=e.icon,p=e.spin,f=e.rotate,m=e.tabIndex,g=e.onClick,h=e.twoToneColor,b=(0,i.Z)(e,C),v=l.useContext(d.Z),y=v.prefixCls,w=void 0===y?"anticon":y,S=v.rootClassName,x=c()(S,w,(n={},(0,a.Z)(n,"".concat(w,"-").concat(u.name),!!u.name),(0,a.Z)(n,"".concat(w,"-spin"),!!p||"loading"===u.name),n),s),k=m;void 0===k&&g&&(k=-1);var T=E(h),A=(0,o.Z)(T,2),N=A[0],I=A[1];return l.createElement("span",(0,r.Z)({role:"img","aria-label":u.name},b,{ref:t,tabIndex:k,onClick:g,className:x}),l.createElement(O,{icon:u,primaryColor:N,secondaryColor:I,style:f?{msTransform:"rotate(".concat(f,"deg)"),transform:"rotate(".concat(f,"deg)")}:void 0}))});T.displayName="AntdIcon",T.getTwoToneColor=function(){var e=O.getTwoToneColors();return e.calculated?[e.primaryColor,e.secondaryColor]:e.primaryColor},T.setTwoToneColor=k;var A=T},67689:function(e,t,n){var r=(0,n(64090).createContext)({});t.Z=r},99537:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm193.5 301.7l-210.6 292a31.8 31.8 0 01-51.7 0L318.5 484.9c-3.8-5.3 0-12.7 6.5-12.7h46.9c10.2 0 19.9 4.9 25.9 13.3l71.2 98.8 157.2-218c6-8.3 15.6-13.3 25.9-13.3H699c6.5 0 10.3 7.4 6.5 12.7z"}}]},name:"check-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},90507:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M912 190h-69.9c-9.8 0-19.1 4.5-25.1 12.2L404.7 724.5 207 474a32 32 0 00-25.1-12.2H112c-6.7 0-10.4 7.7-6.3 12.9l273.9 347c12.8 16.2 37.4 16.2 50.3 0l488.4-618.9c4.1-5.1.4-12.8-6.3-12.8z"}}]},name:"check",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},77136:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{"fill-rule":"evenodd",viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64c247.4 0 448 200.6 448 448S759.4 960 512 960 64 759.4 64 512 264.6 64 512 64zm127.98 274.82h-.04l-.08.06L512 466.75 384.14 338.88c-.04-.05-.06-.06-.08-.06a.12.12 0 00-.07 0c-.03 0-.05.01-.09.05l-45.02 45.02a.2.2 0 00-.05.09.12.12 0 000 .07v.02a.27.27 0 00.06.06L466.75 512 338.88 639.86c-.05.04-.06.06-.06.08a.12.12 0 000 .07c0 .03.01.05.05.09l45.02 45.02a.2.2 0 00.09.05.12.12 0 00.07 0c.02 0 .04-.01.08-.05L512 557.25l127.86 127.87c.04.04.06.05.08.05a.12.12 0 00.07 0c.03 0 .05-.01.09-.05l45.02-45.02a.2.2 0 00.05-.09.12.12 0 000-.07v-.02a.27.27 0 00-.05-.06L557.25 512l127.87-127.86c.04-.04.05-.06.05-.08a.12.12 0 000-.07c0-.03-.01-.05-.05-.09l-45.02-45.02a.2.2 0 00-.09-.05.12.12 0 00-.07 0z"}}]},name:"close-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},81303:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{"fill-rule":"evenodd",viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M799.86 166.31c.02 0 .04.02.08.06l57.69 57.7c.04.03.05.05.06.08a.12.12 0 010 .06c0 .03-.02.05-.06.09L569.93 512l287.7 287.7c.04.04.05.06.06.09a.12.12 0 010 .07c0 .02-.02.04-.06.08l-57.7 57.69c-.03.04-.05.05-.07.06a.12.12 0 01-.07 0c-.03 0-.05-.02-.09-.06L512 569.93l-287.7 287.7c-.04.04-.06.05-.09.06a.12.12 0 01-.07 0c-.02 0-.04-.02-.08-.06l-57.69-57.7c-.04-.03-.05-.05-.06-.07a.12.12 0 010-.07c0-.03.02-.05.06-.09L454.07 512l-287.7-287.7c-.04-.04-.05-.06-.06-.09a.12.12 0 010-.07c0-.02.02-.04.06-.08l57.7-57.69c.03-.04.05-.05.07-.06a.12.12 0 01.07 0c.03 0 .05.02.09.06L512 454.07l287.7-287.7c.04-.04.06-.05.09-.06a.12.12 0 01.07 0z"}}]},name:"close",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},20383:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M884 256h-75c-5.1 0-9.9 2.5-12.9 6.6L512 654.2 227.9 262.6c-3-4.1-7.8-6.6-12.9-6.6h-75c-6.5 0-10.3 7.4-6.5 12.7l352.6 486.1c12.8 17.6 39 17.6 51.7 0l352.6-486.1c3.9-5.3.1-12.7-6.4-12.7z"}}]},name:"down",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},31413:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M176 511a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0z"}}]},name:"ellipsis",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},20653:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm-32 232c0-4.4 3.6-8 8-8h48c4.4 0 8 3.6 8 8v272c0 4.4-3.6 8-8 8h-48c-4.4 0-8-3.6-8-8V296zm32 440a48.01 48.01 0 010-96 48.01 48.01 0 010 96z"}}]},name:"exclamation-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},41311:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M942.2 486.2C847.4 286.5 704.1 186 512 186c-192.2 0-335.4 100.5-430.2 300.3a60.3 60.3 0 000 51.5C176.6 737.5 319.9 838 512 838c192.2 0 335.4-100.5 430.2-300.3 7.7-16.2 7.7-35 0-51.5zM512 766c-161.3 0-279.4-81.8-362.7-254C232.6 339.8 350.7 258 512 258c161.3 0 279.4 81.8 362.7 254C791.5 684.2 673.4 766 512 766zm-4-430c-97.2 0-176 78.8-176 176s78.8 176 176 176 176-78.8 176-176-78.8-176-176-176zm0 288c-61.9 0-112-50.1-112-112s50.1-112 112-112 112 50.1 112 112-50.1 112-112 112z"}}]},name:"eye",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},40388:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm32 664c0 4.4-3.6 8-8 8h-48c-4.4 0-8-3.6-8-8V456c0-4.4 3.6-8 8-8h48c4.4 0 8 3.6 8 8v272zm-32-344a48.01 48.01 0 010-96 48.01 48.01 0 010 96z"}}]},name:"info-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},66155:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"0 0 1024 1024",focusable:"false"},children:[{tag:"path",attrs:{d:"M988 548c-19.9 0-36-16.1-36-36 0-59.4-11.6-117-34.6-171.3a440.45 440.45 0 00-94.3-139.9 437.71 437.71 0 00-139.9-94.3C629 83.6 571.4 72 512 72c-19.9 0-36-16.1-36-36s16.1-36 36-36c69.1 0 136.2 13.5 199.3 40.3C772.3 66 827 103 874 150c47 47 83.9 101.8 109.7 162.7 26.7 63.1 40.2 130.2 40.2 199.3.1 19.9-16 36-35.9 36z"}}]},name:"loading",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},50459:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M765.7 486.8L314.9 134.7A7.97 7.97 0 00302 141v77.3c0 4.9 2.3 9.6 6.1 12.6l360 281.1-360 281.1c-3.9 3-6.1 7.7-6.1 12.6V883c0 6.7 7.7 10.4 12.9 6.3l450.8-352.1a31.96 31.96 0 000-50.4z"}}]},name:"right",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},96871:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M909.6 854.5L649.9 594.8C690.2 542.7 712 479 712 412c0-80.2-31.3-155.4-87.9-212.1-56.6-56.7-132-87.9-212.1-87.9s-155.5 31.3-212.1 87.9C143.2 256.5 112 331.8 112 412c0 80.1 31.3 155.5 87.9 212.1C256.5 680.8 331.8 712 412 712c67 0 130.6-21.8 182.7-62l259.7 259.6a8.2 8.2 0 0011.6 0l43.6-43.5a8.2 8.2 0 000-11.6zM570.4 570.4C528 612.7 471.8 636 412 636s-116-23.3-158.4-65.6C211.3 528 188 471.8 188 412s23.3-116.1 65.6-158.4C296 211.3 352.2 188 412 188s116.1 23.2 158.4 65.6S636 352.2 636 412s-23.3 116.1-65.6 158.4z"}}]},name:"search",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},97766:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M400 317.7h73.9V656c0 4.4 3.6 8 8 8h60c4.4 0 8-3.6 8-8V317.7H624c6.7 0 10.4-7.7 6.3-12.9L518.3 163a8 8 0 00-12.6 0l-112 141.7c-4.1 5.3-.4 13 6.3 13zM878 626h-60c-4.4 0-8 3.6-8 8v154H214V634c0-4.4-3.6-8-8-8h-60c-4.4 0-8 3.6-8 8v198c0 17.7 14.3 32 32 32h684c17.7 0 32-14.3 32-32V634c0-4.4-3.6-8-8-8z"}}]},name:"upload",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},41785:function(e,t,n){n.d(t,{T6:function(){return p},VD:function(){return f},WE:function(){return c},Yt:function(){return m},lC:function(){return a},py:function(){return s},rW:function(){return o},s:function(){return d},ve:function(){return l},vq:function(){return u}});var r=n(27974);function o(e,t,n){return{r:255*(0,r.sh)(e,255),g:255*(0,r.sh)(t,255),b:255*(0,r.sh)(n,255)}}function a(e,t,n){var o=Math.max(e=(0,r.sh)(e,255),t=(0,r.sh)(t,255),n=(0,r.sh)(n,255)),a=Math.min(e,t,n),i=0,l=0,s=(o+a)/2;if(o===a)l=0,i=0;else{var c=o-a;switch(l=s>.5?c/(2-o-a):c/(o+a),o){case e:i=(t-n)/c+(t1&&(n-=1),n<1/6)?e+6*n*(t-e):n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}function l(e,t,n){if(e=(0,r.sh)(e,360),t=(0,r.sh)(t,100),n=(0,r.sh)(n,100),0===t)a=n,l=n,o=n;else{var o,a,l,s=n<.5?n*(1+t):n+t-n*t,c=2*n-s;o=i(c,s,e+1/3),a=i(c,s,e),l=i(c,s,e-1/3)}return{r:255*o,g:255*a,b:255*l}}function s(e,t,n){var o=Math.max(e=(0,r.sh)(e,255),t=(0,r.sh)(t,255),n=(0,r.sh)(n,255)),a=Math.min(e,t,n),i=0,l=o-a;if(o===a)i=0;else{switch(o){case e:i=(t-n)/l+(t>16,g:(65280&e)>>8,b:255&e}}},6564:function(e,t,n){n.d(t,{R:function(){return r}});var r={aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",goldenrod:"#daa520",gold:"#ffd700",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavenderblush:"#fff0f5",lavender:"#e6e6fa",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",rebeccapurple:"#663399",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"}},76991:function(e,t,n){n.d(t,{uA:function(){return i}});var r=n(41785),o=n(6564),a=n(27974);function i(e){var t={r:0,g:0,b:0},n=1,i=null,l=null,s=null,c=!1,p=!1;return"string"==typeof e&&(e=function(e){if(0===(e=e.trim().toLowerCase()).length)return!1;var t=!1;if(o.R[e])e=o.R[e],t=!0;else if("transparent"===e)return{r:0,g:0,b:0,a:0,format:"name"};var n=u.rgb.exec(e);return n?{r:n[1],g:n[2],b:n[3]}:(n=u.rgba.exec(e))?{r:n[1],g:n[2],b:n[3],a:n[4]}:(n=u.hsl.exec(e))?{h:n[1],s:n[2],l:n[3]}:(n=u.hsla.exec(e))?{h:n[1],s:n[2],l:n[3],a:n[4]}:(n=u.hsv.exec(e))?{h:n[1],s:n[2],v:n[3]}:(n=u.hsva.exec(e))?{h:n[1],s:n[2],v:n[3],a:n[4]}:(n=u.hex8.exec(e))?{r:(0,r.VD)(n[1]),g:(0,r.VD)(n[2]),b:(0,r.VD)(n[3]),a:(0,r.T6)(n[4]),format:t?"name":"hex8"}:(n=u.hex6.exec(e))?{r:(0,r.VD)(n[1]),g:(0,r.VD)(n[2]),b:(0,r.VD)(n[3]),format:t?"name":"hex"}:(n=u.hex4.exec(e))?{r:(0,r.VD)(n[1]+n[1]),g:(0,r.VD)(n[2]+n[2]),b:(0,r.VD)(n[3]+n[3]),a:(0,r.T6)(n[4]+n[4]),format:t?"name":"hex8"}:!!(n=u.hex3.exec(e))&&{r:(0,r.VD)(n[1]+n[1]),g:(0,r.VD)(n[2]+n[2]),b:(0,r.VD)(n[3]+n[3]),format:t?"name":"hex"}}(e)),"object"==typeof e&&(d(e.r)&&d(e.g)&&d(e.b)?(t=(0,r.rW)(e.r,e.g,e.b),c=!0,p="%"===String(e.r).substr(-1)?"prgb":"rgb"):d(e.h)&&d(e.s)&&d(e.v)?(i=(0,a.JX)(e.s),l=(0,a.JX)(e.v),t=(0,r.WE)(e.h,i,l),c=!0,p="hsv"):d(e.h)&&d(e.s)&&d(e.l)&&(i=(0,a.JX)(e.s),s=(0,a.JX)(e.l),t=(0,r.ve)(e.h,i,s),c=!0,p="hsl"),Object.prototype.hasOwnProperty.call(e,"a")&&(n=e.a)),n=(0,a.Yq)(n),{ok:c,format:e.format||p,r:Math.min(255,Math.max(t.r,0)),g:Math.min(255,Math.max(t.g,0)),b:Math.min(255,Math.max(t.b,0)),a:n}}var l="(?:".concat("[-\\+]?\\d*\\.\\d+%?",")|(?:").concat("[-\\+]?\\d+%?",")"),s="[\\s|\\(]+(".concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")\\s*\\)?"),c="[\\s|\\(]+(".concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")\\s*\\)?"),u={CSS_UNIT:new RegExp(l),rgb:RegExp("rgb"+s),rgba:RegExp("rgba"+c),hsl:RegExp("hsl"+s),hsla:RegExp("hsla"+c),hsv:RegExp("hsv"+s),hsva:RegExp("hsva"+c),hex3:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex6:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,hex4:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex8:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/};function d(e){return!!u.CSS_UNIT.exec(String(e))}},6336:function(e,t,n){n.d(t,{C:function(){return l}});var r=n(41785),o=n(6564),a=n(76991),i=n(27974),l=function(){function e(t,n){if(void 0===t&&(t=""),void 0===n&&(n={}),t instanceof e)return t;"number"==typeof t&&(t=(0,r.Yt)(t)),this.originalInput=t;var o,i=(0,a.uA)(t);this.originalInput=t,this.r=i.r,this.g=i.g,this.b=i.b,this.a=i.a,this.roundA=Math.round(100*this.a)/100,this.format=null!==(o=n.format)&&void 0!==o?o:i.format,this.gradientType=n.gradientType,this.r<1&&(this.r=Math.round(this.r)),this.g<1&&(this.g=Math.round(this.g)),this.b<1&&(this.b=Math.round(this.b)),this.isValid=i.ok}return e.prototype.isDark=function(){return 128>this.getBrightness()},e.prototype.isLight=function(){return!this.isDark()},e.prototype.getBrightness=function(){var e=this.toRgb();return(299*e.r+587*e.g+114*e.b)/1e3},e.prototype.getLuminance=function(){var e=this.toRgb(),t=e.r/255,n=e.g/255,r=e.b/255;return .2126*(t<=.03928?t/12.92:Math.pow((t+.055)/1.055,2.4))+.7152*(n<=.03928?n/12.92:Math.pow((n+.055)/1.055,2.4))+.0722*(r<=.03928?r/12.92:Math.pow((r+.055)/1.055,2.4))},e.prototype.getAlpha=function(){return this.a},e.prototype.setAlpha=function(e){return this.a=(0,i.Yq)(e),this.roundA=Math.round(100*this.a)/100,this},e.prototype.isMonochrome=function(){return 0===this.toHsl().s},e.prototype.toHsv=function(){var e=(0,r.py)(this.r,this.g,this.b);return{h:360*e.h,s:e.s,v:e.v,a:this.a}},e.prototype.toHsvString=function(){var e=(0,r.py)(this.r,this.g,this.b),t=Math.round(360*e.h),n=Math.round(100*e.s),o=Math.round(100*e.v);return 1===this.a?"hsv(".concat(t,", ").concat(n,"%, ").concat(o,"%)"):"hsva(".concat(t,", ").concat(n,"%, ").concat(o,"%, ").concat(this.roundA,")")},e.prototype.toHsl=function(){var e=(0,r.lC)(this.r,this.g,this.b);return{h:360*e.h,s:e.s,l:e.l,a:this.a}},e.prototype.toHslString=function(){var e=(0,r.lC)(this.r,this.g,this.b),t=Math.round(360*e.h),n=Math.round(100*e.s),o=Math.round(100*e.l);return 1===this.a?"hsl(".concat(t,", ").concat(n,"%, ").concat(o,"%)"):"hsla(".concat(t,", ").concat(n,"%, ").concat(o,"%, ").concat(this.roundA,")")},e.prototype.toHex=function(e){return void 0===e&&(e=!1),(0,r.vq)(this.r,this.g,this.b,e)},e.prototype.toHexString=function(e){return void 0===e&&(e=!1),"#"+this.toHex(e)},e.prototype.toHex8=function(e){return void 0===e&&(e=!1),(0,r.s)(this.r,this.g,this.b,this.a,e)},e.prototype.toHex8String=function(e){return void 0===e&&(e=!1),"#"+this.toHex8(e)},e.prototype.toHexShortString=function(e){return void 0===e&&(e=!1),1===this.a?this.toHexString(e):this.toHex8String(e)},e.prototype.toRgb=function(){return{r:Math.round(this.r),g:Math.round(this.g),b:Math.round(this.b),a:this.a}},e.prototype.toRgbString=function(){var e=Math.round(this.r),t=Math.round(this.g),n=Math.round(this.b);return 1===this.a?"rgb(".concat(e,", ").concat(t,", ").concat(n,")"):"rgba(".concat(e,", ").concat(t,", ").concat(n,", ").concat(this.roundA,")")},e.prototype.toPercentageRgb=function(){var e=function(e){return"".concat(Math.round(100*(0,i.sh)(e,255)),"%")};return{r:e(this.r),g:e(this.g),b:e(this.b),a:this.a}},e.prototype.toPercentageRgbString=function(){var e=function(e){return Math.round(100*(0,i.sh)(e,255))};return 1===this.a?"rgb(".concat(e(this.r),"%, ").concat(e(this.g),"%, ").concat(e(this.b),"%)"):"rgba(".concat(e(this.r),"%, ").concat(e(this.g),"%, ").concat(e(this.b),"%, ").concat(this.roundA,")")},e.prototype.toName=function(){if(0===this.a)return"transparent";if(this.a<1)return!1;for(var e="#"+(0,r.vq)(this.r,this.g,this.b,!1),t=0,n=Object.entries(o.R);t=0;return!t&&r&&(e.startsWith("hex")||"name"===e)?"name"===e&&0===this.a?this.toName():this.toRgbString():("rgb"===e&&(n=this.toRgbString()),"prgb"===e&&(n=this.toPercentageRgbString()),("hex"===e||"hex6"===e)&&(n=this.toHexString()),"hex3"===e&&(n=this.toHexString(!0)),"hex4"===e&&(n=this.toHex8String(!0)),"hex8"===e&&(n=this.toHex8String()),"name"===e&&(n=this.toName()),"hsl"===e&&(n=this.toHslString()),"hsv"===e&&(n=this.toHsvString()),n||this.toHexString())},e.prototype.toNumber=function(){return(Math.round(this.r)<<16)+(Math.round(this.g)<<8)+Math.round(this.b)},e.prototype.clone=function(){return new e(this.toString())},e.prototype.lighten=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.l+=t/100,n.l=(0,i.V2)(n.l),new e(n)},e.prototype.brighten=function(t){void 0===t&&(t=10);var n=this.toRgb();return n.r=Math.max(0,Math.min(255,n.r-Math.round(-(t/100*255)))),n.g=Math.max(0,Math.min(255,n.g-Math.round(-(t/100*255)))),n.b=Math.max(0,Math.min(255,n.b-Math.round(-(t/100*255)))),new e(n)},e.prototype.darken=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.l-=t/100,n.l=(0,i.V2)(n.l),new e(n)},e.prototype.tint=function(e){return void 0===e&&(e=10),this.mix("white",e)},e.prototype.shade=function(e){return void 0===e&&(e=10),this.mix("black",e)},e.prototype.desaturate=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.s-=t/100,n.s=(0,i.V2)(n.s),new e(n)},e.prototype.saturate=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.s+=t/100,n.s=(0,i.V2)(n.s),new e(n)},e.prototype.greyscale=function(){return this.desaturate(100)},e.prototype.spin=function(t){var n=this.toHsl(),r=(n.h+t)%360;return n.h=r<0?360+r:r,new e(n)},e.prototype.mix=function(t,n){void 0===n&&(n=50);var r=this.toRgb(),o=new e(t).toRgb(),a=n/100;return new e({r:(o.r-r.r)*a+r.r,g:(o.g-r.g)*a+r.g,b:(o.b-r.b)*a+r.b,a:(o.a-r.a)*a+r.a})},e.prototype.analogous=function(t,n){void 0===t&&(t=6),void 0===n&&(n=30);var r=this.toHsl(),o=360/n,a=[this];for(r.h=(r.h-(o*t>>1)+720)%360;--t;)r.h=(r.h+o)%360,a.push(new e(r));return a},e.prototype.complement=function(){var t=this.toHsl();return t.h=(t.h+180)%360,new e(t)},e.prototype.monochromatic=function(t){void 0===t&&(t=6);for(var n=this.toHsv(),r=n.h,o=n.s,a=n.v,i=[],l=1/t;t--;)i.push(new e({h:r,s:o,v:a})),a=(a+l)%1;return i},e.prototype.splitcomplement=function(){var t=this.toHsl(),n=t.h;return[this,new e({h:(n+72)%360,s:t.s,l:t.l}),new e({h:(n+216)%360,s:t.s,l:t.l})]},e.prototype.onBackground=function(t){var n=this.toRgb(),r=new e(t).toRgb(),o=n.a+r.a*(1-n.a);return new e({r:(n.r*n.a+r.r*r.a*(1-n.a))/o,g:(n.g*n.a+r.g*r.a*(1-n.a))/o,b:(n.b*n.a+r.b*r.a*(1-n.a))/o,a:o})},e.prototype.triad=function(){return this.polyad(3)},e.prototype.tetrad=function(){return this.polyad(4)},e.prototype.polyad=function(t){for(var n=this.toHsl(),r=n.h,o=[this],a=360/t,i=1;iMath.abs(e-t))?1:e=360===t?(e<0?e%t+t:e%t)/parseFloat(String(t)):e%t/parseFloat(String(t))}function o(e){return Math.min(1,Math.max(0,e))}function a(e){return(isNaN(e=parseFloat(e))||e<0||e>1)&&(e=1),e}function i(e){return e<=1?"".concat(100*Number(e),"%"):e}function l(e){return 1===e.length?"0"+e:String(e)}n.d(t,{FZ:function(){return l},JX:function(){return i},V2:function(){return o},Yq:function(){return a},sh:function(){return r}})},88804:function(e,t,n){n.d(t,{Z:function(){return y}});var r,o=n(80406),a=n(64090),i=n(89542),l=n(22127);n(53850);var s=n(74084),c=a.createContext(null),u=n(63787),d=n(24800),p=[],f=n(24050);function m(e){var t=e.match(/^(.*)px$/),n=Number(null==t?void 0:t[1]);return Number.isNaN(n)?function(e){if("undefined"==typeof document)return 0;if(void 0===r){var t=document.createElement("div");t.style.width="100%",t.style.height="200px";var n=document.createElement("div"),o=n.style;o.position="absolute",o.top="0",o.left="0",o.pointerEvents="none",o.visibility="hidden",o.width="200px",o.height="150px",o.overflow="hidden",n.appendChild(t),document.body.appendChild(n);var a=t.offsetWidth;n.style.overflow="scroll";var i=t.offsetWidth;a===i&&(i=n.clientWidth),document.body.removeChild(n),r=a-i}return r}():n}var g="rc-util-locker-".concat(Date.now()),h=0,b=!1,v=function(e){return!1!==e&&((0,l.Z)()&&e?"string"==typeof e?document.querySelector(e):"function"==typeof e?e():e:null)},y=a.forwardRef(function(e,t){var n,r,y,E,w=e.open,S=e.autoLock,x=e.getContainer,O=(e.debug,e.autoDestroy),k=void 0===O||O,C=e.children,T=a.useState(w),A=(0,o.Z)(T,2),N=A[0],I=A[1],R=N||w;a.useEffect(function(){(k||w)&&I(w)},[w,k]);var _=a.useState(function(){return v(x)}),P=(0,o.Z)(_,2),M=P[0],L=P[1];a.useEffect(function(){var e=v(x);L(null!=e?e:null)});var D=function(e,t){var n=a.useState(function(){return(0,l.Z)()?document.createElement("div"):null}),r=(0,o.Z)(n,1)[0],i=a.useRef(!1),s=a.useContext(c),f=a.useState(p),m=(0,o.Z)(f,2),g=m[0],h=m[1],b=s||(i.current?void 0:function(e){h(function(t){return[e].concat((0,u.Z)(t))})});function v(){r.parentElement||document.body.appendChild(r),i.current=!0}function y(){var e;null===(e=r.parentElement)||void 0===e||e.removeChild(r),i.current=!1}return(0,d.Z)(function(){return e?s?s(v):v():y(),y},[e]),(0,d.Z)(function(){g.length&&(g.forEach(function(e){return e()}),h(p))},[g]),[r,b]}(R&&!M,0),j=(0,o.Z)(D,2),F=j[0],B=j[1],Z=null!=M?M:F;n=!!(S&&w&&(0,l.Z)()&&(Z===F||Z===document.body)),r=a.useState(function(){return h+=1,"".concat(g,"_").concat(h)}),y=(0,o.Z)(r,1)[0],(0,d.Z)(function(){if(n){var e=function(e){if("undefined"==typeof document||!e||!(e instanceof Element))return{width:0,height:0};var t=getComputedStyle(e,"::-webkit-scrollbar"),n=t.width,r=t.height;return{width:m(n),height:m(r)}}(document.body).width,t=document.body.scrollHeight>(window.innerHeight||document.documentElement.clientHeight)&&window.innerWidth>document.body.offsetWidth;(0,f.hq)("\nhtml body {\n overflow-y: hidden;\n ".concat(t?"width: calc(100% - ".concat(e,"px);"):"","\n}"),y)}else(0,f.jL)(y);return function(){(0,f.jL)(y)}},[n,y]);var U=null;C&&(0,s.Yr)(C)&&t&&(U=C.ref);var z=(0,s.x1)(U,t);if(!R||!(0,l.Z)()||void 0===M)return null;var H=!1===Z||("boolean"==typeof E&&(b=E),b),G=C;return t&&(G=a.cloneElement(C,{ref:z})),a.createElement(c.Provider,{value:B},H?G:(0,i.createPortal)(G,Z))})},44101:function(e,t,n){n.d(t,{Z:function(){return z}});var r=n(5239),o=n(80406),a=n(60635),i=n(88804),l=n(16480),s=n.n(l),c=n(46505),u=n(97472),d=n(74687),p=n(54811),f=n(91010),m=n(24800),g=n(76158),h=n(64090),b=n(14749),v=n(49367),y=n(74084);function E(e){var t=e.prefixCls,n=e.align,r=e.arrow,o=e.arrowPos,a=r||{},i=a.className,l=a.content,c=o.x,u=o.y,d=h.useRef();if(!n||!n.points)return null;var p={position:"absolute"};if(!1!==n.autoArrow){var f=n.points[0],m=n.points[1],g=f[0],b=f[1],v=m[0],y=m[1];g!==v&&["t","b"].includes(g)?"t"===g?p.top=0:p.bottom=0:p.top=void 0===u?0:u,b!==y&&["l","r"].includes(b)?"l"===b?p.left=0:p.right=0:p.left=void 0===c?0:c}return h.createElement("div",{ref:d,className:s()("".concat(t,"-arrow"),i),style:p},l)}function w(e){var t=e.prefixCls,n=e.open,r=e.zIndex,o=e.mask,a=e.motion;return o?h.createElement(v.ZP,(0,b.Z)({},a,{motionAppear:!0,visible:n,removeOnLeave:!0}),function(e){var n=e.className;return h.createElement("div",{style:{zIndex:r},className:s()("".concat(t,"-mask"),n)})}):null}var S=h.memo(function(e){return e.children},function(e,t){return t.cache}),x=h.forwardRef(function(e,t){var n=e.popup,a=e.className,i=e.prefixCls,l=e.style,u=e.target,d=e.onVisibleChanged,p=e.open,f=e.keepDom,g=e.fresh,x=e.onClick,O=e.mask,k=e.arrow,C=e.arrowPos,T=e.align,A=e.motion,N=e.maskMotion,I=e.forceRender,R=e.getPopupContainer,_=e.autoDestroy,P=e.portal,M=e.zIndex,L=e.onMouseEnter,D=e.onMouseLeave,j=e.onPointerEnter,F=e.ready,B=e.offsetX,Z=e.offsetY,U=e.offsetR,z=e.offsetB,H=e.onAlign,G=e.onPrepare,W=e.stretch,$=e.targetWidth,V=e.targetHeight,q="function"==typeof n?n():n,Y=p||f,K=(null==R?void 0:R.length)>0,X=h.useState(!R||!K),Q=(0,o.Z)(X,2),J=Q[0],ee=Q[1];if((0,m.Z)(function(){!J&&K&&u&&ee(!0)},[J,K,u]),!J)return null;var et="auto",en={left:"-1000vw",top:"-1000vh",right:et,bottom:et};if(F||!p){var er,eo=T.points,ea=T.dynamicInset||(null===(er=T._experimental)||void 0===er?void 0:er.dynamicInset),ei=ea&&"r"===eo[0][1],el=ea&&"b"===eo[0][0];ei?(en.right=U,en.left=et):(en.left=B,en.right=et),el?(en.bottom=z,en.top=et):(en.top=Z,en.bottom=et)}var es={};return W&&(W.includes("height")&&V?es.height=V:W.includes("minHeight")&&V&&(es.minHeight=V),W.includes("width")&&$?es.width=$:W.includes("minWidth")&&$&&(es.minWidth=$)),p||(es.pointerEvents="none"),h.createElement(P,{open:I||Y,getContainer:R&&function(){return R(u)},autoDestroy:_},h.createElement(w,{prefixCls:i,open:p,zIndex:M,mask:O,motion:N}),h.createElement(c.Z,{onResize:H,disabled:!p},function(e){return h.createElement(v.ZP,(0,b.Z)({motionAppear:!0,motionEnter:!0,motionLeave:!0,removeOnLeave:!1,forceRender:I,leavedClassName:"".concat(i,"-hidden")},A,{onAppearPrepare:G,onEnterPrepare:G,visible:p,onVisibleChanged:function(e){var t;null==A||null===(t=A.onVisibleChanged)||void 0===t||t.call(A,e),d(e)}}),function(n,o){var c=n.className,u=n.style,d=s()(i,c,a);return h.createElement("div",{ref:(0,y.sQ)(e,t,o),className:d,style:(0,r.Z)((0,r.Z)((0,r.Z)((0,r.Z)({"--arrow-x":"".concat(C.x||0,"px"),"--arrow-y":"".concat(C.y||0,"px")},en),es),u),{},{boxSizing:"border-box",zIndex:M},l),onMouseEnter:L,onMouseLeave:D,onPointerEnter:j,onClick:x},k&&h.createElement(E,{prefixCls:i,arrow:k,arrowPos:C,align:T}),h.createElement(S,{cache:!p&&!g},q))})}))}),O=h.forwardRef(function(e,t){var n=e.children,r=e.getTriggerDOMNode,o=(0,y.Yr)(n),a=h.useCallback(function(e){(0,y.mH)(t,r?r(e):e)},[r]),i=(0,y.x1)(a,n.ref);return o?h.cloneElement(n,{ref:i}):n}),k=h.createContext(null);function C(e){return e?Array.isArray(e)?e:[e]:[]}var T=n(73193);function A(e,t,n,r){return t||(n?{motionName:"".concat(e,"-").concat(n)}:r?{motionName:r}:null)}function N(e){return e.ownerDocument.defaultView}function I(e){for(var t=[],n=null==e?void 0:e.parentElement,r=["hidden","scroll","clip","auto"];n;){var o=N(n).getComputedStyle(n);[o.overflowX,o.overflowY,o.overflow].some(function(e){return r.includes(e)})&&t.push(n),n=n.parentElement}return t}function R(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1;return Number.isNaN(e)?t:e}function _(e){return R(parseFloat(e),0)}function P(e,t){var n=(0,r.Z)({},e);return(t||[]).forEach(function(e){if(!(e instanceof HTMLBodyElement||e instanceof HTMLHtmlElement)){var t=N(e).getComputedStyle(e),r=t.overflow,o=t.overflowClipMargin,a=t.borderTopWidth,i=t.borderBottomWidth,l=t.borderLeftWidth,s=t.borderRightWidth,c=e.getBoundingClientRect(),u=e.offsetHeight,d=e.clientHeight,p=e.offsetWidth,f=e.clientWidth,m=_(a),g=_(i),h=_(l),b=_(s),v=R(Math.round(c.width/p*1e3)/1e3),y=R(Math.round(c.height/u*1e3)/1e3),E=m*y,w=h*v,S=0,x=0;if("clip"===r){var O=_(o);S=O*v,x=O*y}var k=c.x+w-S,C=c.y+E-x,T=k+c.width+2*S-w-b*v-(p-f-h-b)*v,A=C+c.height+2*x-E-g*y-(u-d-m-g)*y;n.left=Math.max(n.left,k),n.top=Math.max(n.top,C),n.right=Math.min(n.right,T),n.bottom=Math.min(n.bottom,A)}}),n}function M(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,n="".concat(t),r=n.match(/^(.*)\%$/);return r?parseFloat(r[1])/100*e:parseFloat(n)}function L(e,t){var n=(0,o.Z)(t||[],2),r=n[0],a=n[1];return[M(e.width,r),M(e.height,a)]}function D(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return[e[0],e[1]]}function j(e,t){var n,r=t[0],o=t[1];return n="t"===r?e.y:"b"===r?e.y+e.height:e.y+e.height/2,{x:"l"===o?e.x:"r"===o?e.x+e.width:e.x+e.width/2,y:n}}function F(e,t){var n={t:"b",b:"t",l:"r",r:"l"};return e.map(function(e,r){return r===t?n[e]||"c":e}).join("")}var B=n(63787);n(53850);var Z=n(19223),U=["prefixCls","children","action","showAction","hideAction","popupVisible","defaultPopupVisible","onPopupVisibleChange","afterPopupVisibleChange","mouseEnterDelay","mouseLeaveDelay","focusDelay","blurDelay","mask","maskClosable","getPopupContainer","forceRender","autoDestroy","destroyPopupOnHide","popup","popupClassName","popupStyle","popupPlacement","builtinPlacements","popupAlign","zIndex","stretch","getPopupClassNameFromAlign","fresh","alignPoint","onPopupClick","onPopupAlign","arrow","popupMotion","maskMotion","popupTransitionName","popupAnimation","maskTransitionName","maskAnimation","className","getTriggerDOMNode"],z=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:i.Z;return h.forwardRef(function(t,n){var i,l,b,v,y,E,w,S,_,M,z,H,G,W,$,V,q,Y=t.prefixCls,K=void 0===Y?"rc-trigger-popup":Y,X=t.children,Q=t.action,J=t.showAction,ee=t.hideAction,et=t.popupVisible,en=t.defaultPopupVisible,er=t.onPopupVisibleChange,eo=t.afterPopupVisibleChange,ea=t.mouseEnterDelay,ei=t.mouseLeaveDelay,el=void 0===ei?.1:ei,es=t.focusDelay,ec=t.blurDelay,eu=t.mask,ed=t.maskClosable,ep=t.getPopupContainer,ef=t.forceRender,em=t.autoDestroy,eg=t.destroyPopupOnHide,eh=t.popup,eb=t.popupClassName,ev=t.popupStyle,ey=t.popupPlacement,eE=t.builtinPlacements,ew=void 0===eE?{}:eE,eS=t.popupAlign,ex=t.zIndex,eO=t.stretch,ek=t.getPopupClassNameFromAlign,eC=t.fresh,eT=t.alignPoint,eA=t.onPopupClick,eN=t.onPopupAlign,eI=t.arrow,eR=t.popupMotion,e_=t.maskMotion,eP=t.popupTransitionName,eM=t.popupAnimation,eL=t.maskTransitionName,eD=t.maskAnimation,ej=t.className,eF=t.getTriggerDOMNode,eB=(0,a.Z)(t,U),eZ=h.useState(!1),eU=(0,o.Z)(eZ,2),ez=eU[0],eH=eU[1];(0,m.Z)(function(){eH((0,g.Z)())},[]);var eG=h.useRef({}),eW=h.useContext(k),e$=h.useMemo(function(){return{registerSubPopup:function(e,t){eG.current[e]=t,null==eW||eW.registerSubPopup(e,t)}}},[eW]),eV=(0,f.Z)(),eq=h.useState(null),eY=(0,o.Z)(eq,2),eK=eY[0],eX=eY[1],eQ=(0,p.Z)(function(e){(0,u.S)(e)&&eK!==e&&eX(e),null==eW||eW.registerSubPopup(eV,e)}),eJ=h.useState(null),e0=(0,o.Z)(eJ,2),e1=e0[0],e2=e0[1],e4=h.useRef(null),e3=(0,p.Z)(function(e){(0,u.S)(e)&&e1!==e&&(e2(e),e4.current=e)}),e6=h.Children.only(X),e5=(null==e6?void 0:e6.props)||{},e8={},e9=(0,p.Z)(function(e){var t,n;return(null==e1?void 0:e1.contains(e))||(null===(t=(0,d.A)(e1))||void 0===t?void 0:t.host)===e||e===e1||(null==eK?void 0:eK.contains(e))||(null===(n=(0,d.A)(eK))||void 0===n?void 0:n.host)===e||e===eK||Object.values(eG.current).some(function(t){return(null==t?void 0:t.contains(e))||e===t})}),e7=A(K,eR,eM,eP),te=A(K,e_,eD,eL),tt=h.useState(en||!1),tn=(0,o.Z)(tt,2),tr=tn[0],to=tn[1],ta=null!=et?et:tr,ti=(0,p.Z)(function(e){void 0===et&&to(e)});(0,m.Z)(function(){to(et||!1)},[et]);var tl=h.useRef(ta);tl.current=ta;var ts=h.useRef([]);ts.current=[];var tc=(0,p.Z)(function(e){var t;ti(e),(null!==(t=ts.current[ts.current.length-1])&&void 0!==t?t:ta)!==e&&(ts.current.push(e),null==er||er(e))}),tu=h.useRef(),td=function(){clearTimeout(tu.current)},tp=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0;td(),0===t?tc(e):tu.current=setTimeout(function(){tc(e)},1e3*t)};h.useEffect(function(){return td},[]);var tf=h.useState(!1),tm=(0,o.Z)(tf,2),tg=tm[0],th=tm[1];(0,m.Z)(function(e){(!e||ta)&&th(!0)},[ta]);var tb=h.useState(null),tv=(0,o.Z)(tb,2),ty=tv[0],tE=tv[1],tw=h.useState([0,0]),tS=(0,o.Z)(tw,2),tx=tS[0],tO=tS[1],tk=function(e){tO([e.clientX,e.clientY])},tC=(i=eT?tx:e1,l=h.useState({ready:!1,offsetX:0,offsetY:0,offsetR:0,offsetB:0,arrowX:0,arrowY:0,scaleX:1,scaleY:1,align:ew[ey]||{}}),v=(b=(0,o.Z)(l,2))[0],y=b[1],E=h.useRef(0),w=h.useMemo(function(){return eK?I(eK):[]},[eK]),S=h.useRef({}),ta||(S.current={}),_=(0,p.Z)(function(){if(eK&&i&&ta){var e,t,n,a,l,s,c,d=eK.ownerDocument,p=N(eK).getComputedStyle(eK),f=p.width,m=p.height,g=p.position,h=eK.style.left,b=eK.style.top,v=eK.style.right,E=eK.style.bottom,x=eK.style.overflow,O=(0,r.Z)((0,r.Z)({},ew[ey]),eS),k=d.createElement("div");if(null===(e=eK.parentElement)||void 0===e||e.appendChild(k),k.style.left="".concat(eK.offsetLeft,"px"),k.style.top="".concat(eK.offsetTop,"px"),k.style.position=g,k.style.height="".concat(eK.offsetHeight,"px"),k.style.width="".concat(eK.offsetWidth,"px"),eK.style.left="0",eK.style.top="0",eK.style.right="auto",eK.style.bottom="auto",eK.style.overflow="hidden",Array.isArray(i))n={x:i[0],y:i[1],width:0,height:0};else{var C=i.getBoundingClientRect();n={x:C.x,y:C.y,width:C.width,height:C.height}}var A=eK.getBoundingClientRect(),I=d.documentElement,_=I.clientWidth,M=I.clientHeight,B=I.scrollWidth,Z=I.scrollHeight,U=I.scrollTop,z=I.scrollLeft,H=A.height,G=A.width,W=n.height,$=n.width,V=O.htmlRegion,q="visible",Y="visibleFirst";"scroll"!==V&&V!==Y&&(V=q);var K=V===Y,X=P({left:-z,top:-U,right:B-z,bottom:Z-U},w),Q=P({left:0,top:0,right:_,bottom:M},w),J=V===q?Q:X,ee=K?Q:J;eK.style.left="auto",eK.style.top="auto",eK.style.right="0",eK.style.bottom="0";var et=eK.getBoundingClientRect();eK.style.left=h,eK.style.top=b,eK.style.right=v,eK.style.bottom=E,eK.style.overflow=x,null===(t=eK.parentElement)||void 0===t||t.removeChild(k);var en=R(Math.round(G/parseFloat(f)*1e3)/1e3),er=R(Math.round(H/parseFloat(m)*1e3)/1e3);if(!(0===en||0===er||(0,u.S)(i)&&!(0,T.Z)(i))){var eo=O.offset,ea=O.targetOffset,ei=L(A,eo),el=(0,o.Z)(ei,2),es=el[0],ec=el[1],eu=L(n,ea),ed=(0,o.Z)(eu,2),ep=ed[0],ef=ed[1];n.x-=ep,n.y-=ef;var em=O.points||[],eg=(0,o.Z)(em,2),eh=eg[0],eb=D(eg[1]),ev=D(eh),eE=j(n,eb),ex=j(A,ev),eO=(0,r.Z)({},O),ek=eE.x-ex.x+es,eC=eE.y-ex.y+ec,eT=tt(ek,eC),eA=tt(ek,eC,Q),eI=j(n,["t","l"]),eR=j(A,["t","l"]),e_=j(n,["b","r"]),eP=j(A,["b","r"]),eM=O.overflow||{},eL=eM.adjustX,eD=eM.adjustY,ej=eM.shiftX,eF=eM.shiftY,eB=function(e){return"boolean"==typeof e?e:e>=0};tn();var eZ=eB(eD),eU=ev[0]===eb[0];if(eZ&&"t"===ev[0]&&(l>ee.bottom||S.current.bt)){var ez=eC;eU?ez-=H-W:ez=eI.y-eP.y-ec;var eH=tt(ek,ez),eG=tt(ek,ez,Q);eH>eT||eH===eT&&(!K||eG>=eA)?(S.current.bt=!0,eC=ez,ec=-ec,eO.points=[F(ev,0),F(eb,0)]):S.current.bt=!1}if(eZ&&"b"===ev[0]&&(aeT||e$===eT&&(!K||eV>=eA)?(S.current.tb=!0,eC=eW,ec=-ec,eO.points=[F(ev,0),F(eb,0)]):S.current.tb=!1}var eq=eB(eL),eY=ev[1]===eb[1];if(eq&&"l"===ev[1]&&(c>ee.right||S.current.rl)){var eX=ek;eY?eX-=G-$:eX=eI.x-eP.x-es;var eQ=tt(eX,eC),eJ=tt(eX,eC,Q);eQ>eT||eQ===eT&&(!K||eJ>=eA)?(S.current.rl=!0,ek=eX,es=-es,eO.points=[F(ev,1),F(eb,1)]):S.current.rl=!1}if(eq&&"r"===ev[1]&&(seT||e1===eT&&(!K||e2>=eA)?(S.current.lr=!0,ek=e0,es=-es,eO.points=[F(ev,1),F(eb,1)]):S.current.lr=!1}tn();var e4=!0===ej?0:ej;"number"==typeof e4&&(sQ.right&&(ek-=c-Q.right-es,n.x>Q.right-e4&&(ek+=n.x-Q.right+e4)));var e3=!0===eF?0:eF;"number"==typeof e3&&(aQ.bottom&&(eC-=l-Q.bottom-ec,n.y>Q.bottom-e3&&(eC+=n.y-Q.bottom+e3)));var e6=A.x+ek,e5=A.y+eC,e8=n.x,e9=n.y;null==eN||eN(eK,eO);var e7=et.right-A.x-(ek+A.width),te=et.bottom-A.y-(eC+A.height);y({ready:!0,offsetX:ek/en,offsetY:eC/er,offsetR:e7/en,offsetB:te/er,arrowX:((Math.max(e6,e8)+Math.min(e6+G,e8+$))/2-e6)/en,arrowY:((Math.max(e5,e9)+Math.min(e5+H,e9+W))/2-e5)/er,scaleX:en,scaleY:er,align:eO})}function tt(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:J,r=A.x+e,o=A.y+t,a=Math.max(r,n.left),i=Math.max(o,n.top);return Math.max(0,(Math.min(r+G,n.right)-a)*(Math.min(o+H,n.bottom)-i))}function tn(){l=(a=A.y+eC)+H,c=(s=A.x+ek)+G}}}),M=function(){y(function(e){return(0,r.Z)((0,r.Z)({},e),{},{ready:!1})})},(0,m.Z)(M,[ey]),(0,m.Z)(function(){ta||M()},[ta]),[v.ready,v.offsetX,v.offsetY,v.offsetR,v.offsetB,v.arrowX,v.arrowY,v.scaleX,v.scaleY,v.align,function(){E.current+=1;var e=E.current;Promise.resolve().then(function(){E.current===e&&_()})}]),tT=(0,o.Z)(tC,11),tA=tT[0],tN=tT[1],tI=tT[2],tR=tT[3],t_=tT[4],tP=tT[5],tM=tT[6],tL=tT[7],tD=tT[8],tj=tT[9],tF=tT[10],tB=(z=void 0===Q?"hover":Q,h.useMemo(function(){var e=C(null!=J?J:z),t=C(null!=ee?ee:z),n=new Set(e),r=new Set(t);return ez&&(n.has("hover")&&(n.delete("hover"),n.add("click")),r.has("hover")&&(r.delete("hover"),r.add("click"))),[n,r]},[ez,z,J,ee])),tZ=(0,o.Z)(tB,2),tU=tZ[0],tz=tZ[1],tH=tU.has("click"),tG=tz.has("click")||tz.has("contextMenu"),tW=(0,p.Z)(function(){tg||tF()});H=function(){tl.current&&eT&&tG&&tp(!1)},(0,m.Z)(function(){if(ta&&e1&&eK){var e=I(e1),t=I(eK),n=N(eK),r=new Set([n].concat((0,B.Z)(e),(0,B.Z)(t)));function o(){tW(),H()}return r.forEach(function(e){e.addEventListener("scroll",o,{passive:!0})}),n.addEventListener("resize",o,{passive:!0}),tW(),function(){r.forEach(function(e){e.removeEventListener("scroll",o),n.removeEventListener("resize",o)})}}},[ta,e1,eK]),(0,m.Z)(function(){tW()},[tx,ey]),(0,m.Z)(function(){ta&&!(null!=ew&&ew[ey])&&tW()},[JSON.stringify(eS)]);var t$=h.useMemo(function(){var e=function(e,t,n,r){for(var o=n.points,a=Object.keys(e),i=0;i0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],n=arguments.length>2?arguments[2]:void 0;return n?e[0]===t[0]:e[0]===t[0]&&e[1]===t[1]}(null===(l=e[s])||void 0===l?void 0:l.points,o,r))return"".concat(t,"-placement-").concat(s)}return""}(ew,K,tj,eT);return s()(e,null==ek?void 0:ek(tj))},[tj,ek,ew,K,eT]);h.useImperativeHandle(n,function(){return{nativeElement:e4.current,forceAlign:tW}});var tV=h.useState(0),tq=(0,o.Z)(tV,2),tY=tq[0],tK=tq[1],tX=h.useState(0),tQ=(0,o.Z)(tX,2),tJ=tQ[0],t0=tQ[1],t1=function(){if(eO&&e1){var e=e1.getBoundingClientRect();tK(e.width),t0(e.height)}};function t2(e,t,n,r){e8[e]=function(o){var a;null==r||r(o),tp(t,n);for(var i=arguments.length,l=Array(i>1?i-1:0),s=1;s1?n-1:0),o=1;o1?n-1:0),o=1;o{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),o.createElement("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M19 9l-7 7-7-7"}))}},8903:function(e,t,n){n.d(t,{Z:function(){return a}});var r=n(69703),o=n(64090);let a=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 20 20",fill:"currentColor"}),o.createElement("path",{fillRule:"evenodd",d:"M10 18a8 8 0 100-16 8 8 0 000 16zM8.707 7.293a1 1 0 00-1.414 1.414L8.586 10l-1.293 1.293a1 1 0 101.414 1.414L10 11.414l1.293 1.293a1 1 0 001.414-1.414L11.414 10l1.293-1.293a1 1 0 00-1.414-1.414L10 8.586 8.707 7.293z",clipRule:"evenodd"}))}},57750:function(e,t,n){n.d(t,{Z:function(){return eg}});var r=n(69703),o=n(64090),a=n(26587),i=n(65558),l=n(75504),s=n(30638),c=n(80509),u=n.n(c),d=n(5037),p=n.n(d),f=n(71292),m=n.n(f),g=n(96240),h=n.n(g),b=n(93574),v=n.n(b),y=n(72996),E=n(84487),w=n(7986),S=n(71594),x=n(68139),O=n(20757),k=n(9586),C=n(765),T=["layout","type","stroke","connectNulls","isRange","ref"];function A(e){return(A="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function N(){return(N=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(a,T));return o.createElement(w.m,{clipPath:n?"url(#clipPath-".concat(r,")"):null},o.createElement(y.H,N({},(0,C.L6)(d,!0),{points:e,connectNulls:c,type:l,baseLine:t,layout:i,stroke:"none",className:"recharts-area-area"})),"none"!==s&&o.createElement(y.H,N({},(0,C.L6)(this.props,!1),{className:"recharts-area-curve",layout:i,type:l,connectNulls:c,fill:"none",points:e})),"none"!==s&&u&&o.createElement(y.H,N({},(0,C.L6)(this.props,!1),{className:"recharts-area-curve",layout:i,type:l,connectNulls:c,fill:"none",points:t})))}},{key:"renderAreaWithAnimation",value:function(e,t){var n=this,r=this.props,a=r.points,i=r.baseLine,l=r.isAnimationActive,c=r.animationBegin,u=r.animationDuration,d=r.animationEasing,p=r.animationId,f=this.state,g=f.prevPoints,b=f.prevBaseLine;return o.createElement(s.ZP,{begin:c,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"area-".concat(p),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var l=r.t;if(g){var s,c=g.length/a.length,u=a.map(function(e,t){var n=Math.floor(t*c);if(g[n]){var r=g[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return R(R({},e),{},{x:o(l),y:a(l)})}return e});return s=(0,O.hj)(i)&&"number"==typeof i?(0,O.k4)(b,i)(l):m()(i)||h()(i)?(0,O.k4)(b,0)(l):i.map(function(e,t){var n=Math.floor(t*c);if(b[n]){var r=b[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return R(R({},e),{},{x:o(l),y:a(l)})}return e}),n.renderAreaStatically(u,s,e,t)}return o.createElement(w.m,null,o.createElement("defs",null,o.createElement("clipPath",{id:"animationClipPath-".concat(t)},n.renderClipRect(l))),o.createElement(w.m,{clipPath:"url(#animationClipPath-".concat(t,")")},n.renderAreaStatically(a,i,e,t)))})}},{key:"renderArea",value:function(e,t){var n=this.props,r=n.points,o=n.baseLine,a=n.isAnimationActive,i=this.state,l=i.prevPoints,s=i.prevBaseLine,c=i.totalLength;return a&&r&&r.length&&(!l&&c>0||!v()(l,r)||!v()(s,o))?this.renderAreaWithAnimation(e,t):this.renderAreaStatically(r,o,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,a=t.points,i=t.className,s=t.top,c=t.left,u=t.xAxis,d=t.yAxis,p=t.width,f=t.height,g=t.isAnimationActive,h=t.id;if(n||!a||!a.length)return null;var b=this.state.isAnimationFinished,v=1===a.length,y=(0,l.Z)("recharts-area",i),E=u&&u.allowDataOverflow,x=d&&d.allowDataOverflow,O=E||x,k=m()(h)?this.id:h,T=null!==(e=(0,C.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},A=T.r,N=T.strokeWidth,I=((0,C.$k)(r)?r:{}).clipDot,R=void 0===I||I,_=2*(void 0===A?3:A)+(void 0===N?2:N);return o.createElement(w.m,{className:y},E||x?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(k)},o.createElement("rect",{x:E?c:c-p/2,y:x?s:s-f/2,width:E?p:2*p,height:x?f:2*f})),!R&&o.createElement("clipPath",{id:"clipPath-dots-".concat(k)},o.createElement("rect",{x:c-_/2,y:s-_/2,width:p+_,height:f+_}))):null,v?null:this.renderArea(O,k),(r||v)&&this.renderDots(O,R,k),(!g||b)&&S.e.renderCallByParent(this.props,a))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,curBaseLine:e.baseLine,prevPoints:t.curPoints,prevBaseLine:t.curBaseLine}:e.points!==t.curPoints||e.baseLine!==t.curBaseLine?{curPoints:e.points,curBaseLine:e.baseLine}:null}}],n&&_(i.prototype,n),r&&_(i,r),Object.defineProperty(i,"prototype",{writable:!1}),i}(o.PureComponent);D(F,"displayName","Area"),D(F,"defaultProps",{stroke:"#3182bd",fill:"#3182bd",fillOpacity:.6,xAxisId:0,yAxisId:0,legendType:"line",connectNulls:!1,points:[],dot:!1,activeDot:!0,hide:!1,isAnimationActive:!x.x.isSsr,animationBegin:0,animationDuration:1500,animationEasing:"ease"}),D(F,"getBaseValue",function(e,t,n,r){var o=e.layout,a=e.baseValue,i=t.props.baseValue,l=null!=i?i:a;if((0,O.hj)(l)&&"number"==typeof l)return l;var s="horizontal"===o?r:n,c=s.scale.domain();if("number"===s.type){var u=Math.max(c[0],c[1]),d=Math.min(c[0],c[1]);return"dataMin"===l?d:"dataMax"===l?u:u<0?u:Math.max(Math.min(c[0],c[1]),0)}return"dataMin"===l?c[0]:"dataMax"===l?c[1]:c[0]}),D(F,"getComposedData",function(e){var t,n=e.props,r=e.item,o=e.xAxis,a=e.yAxis,i=e.xAxisTicks,l=e.yAxisTicks,s=e.bandSize,c=e.dataKey,u=e.stackedData,d=e.dataStartIndex,p=e.displayedData,f=e.offset,m=n.layout,g=u&&u.length,h=F.getBaseValue(n,r,o,a),b="horizontal"===m,v=!1,y=p.map(function(e,t){g?n=u[d+t]:Array.isArray(n=(0,k.F$)(e,c))?v=!0:n=[h,n];var n,r=null==n[1]||g&&null==(0,k.F$)(e,c);return b?{x:(0,k.Hv)({axis:o,ticks:i,bandSize:s,entry:e,index:t}),y:r?null:a.scale(n[1]),value:n,payload:e}:{x:r?null:o.scale(n[1]),y:(0,k.Hv)({axis:a,ticks:l,bandSize:s,entry:e,index:t}),value:n,payload:e}});return t=g||v?y.map(function(e){var t=Array.isArray(e.value)?e.value[0]:null;return b?{x:e.x,y:null!=t&&null!=e.y?a.scale(t):null}:{x:null!=t?o.scale(t):null,y:e.y}}):b?a.scale(h):o.scale(h),R({points:y,baseLine:t,layout:m,isRange:v},f)}),D(F,"renderDotItem",function(e,t){return o.isValidElement(e)?o.cloneElement(e,t):u()(e)?e(t):o.createElement(E.o,N({},t,{className:"recharts-area-dot"}))});var B=n(23356),Z=n(22983),U=n(12627),z=(0,i.z)({chartName:"AreaChart",GraphicalChild:F,axisComponents:[{axisType:"xAxis",AxisComp:B.K},{axisType:"yAxis",AxisComp:Z.B}],formatAxisMap:U.t9}),H=n(38333),G=n(10166),W=n(94866),$=n(99355),V=["type","layout","connectNulls","ref"];function q(e){return(q="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function Y(){return(Y=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);na){s=[].concat(Q(r.slice(0,c)),[a-u]);break}var d=s.length%2==0?[0,l]:[l];return[].concat(Q(i.repeat(r,Math.floor(t/o))),Q(s),d).map(function(e){return"".concat(e,"px")}).join(", ")}),eo(en(e),"id",(0,O.EL)("recharts-line-")),eo(en(e),"pathRef",function(t){e.mainCurve=t}),eo(en(e),"handleAnimationEnd",function(){e.setState({isAnimationFinished:!0}),e.props.onAnimationEnd&&e.props.onAnimationEnd()}),eo(en(e),"handleAnimationStart",function(){e.setState({isAnimationFinished:!1}),e.props.onAnimationStart&&e.props.onAnimationStart()}),e}return n=[{key:"componentDidMount",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();this.setState({totalLength:e})}}},{key:"componentDidUpdate",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();e!==this.state.totalLength&&this.setState({totalLength:e})}}},{key:"getTotalLength",value:function(){var e=this.mainCurve;try{return e&&e.getTotalLength&&e.getTotalLength()||0}catch(e){return 0}}},{key:"renderErrorBar",value:function(e,t){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var n=this.props,r=n.points,a=n.xAxis,i=n.yAxis,l=n.layout,s=n.children,c=(0,C.NN)(s,$.W);if(!c)return null;var u=function(e,t){return{x:e.x,y:e.y,value:e.value,errorVal:(0,k.F$)(e.payload,t)}};return o.createElement(w.m,{clipPath:e?"url(#clipPath-".concat(t,")"):null},c.map(function(e){return o.cloneElement(e,{key:"bar-".concat(e.props.dataKey),data:r,xAxis:a,yAxis:i,layout:l,dataPointFormatter:u})}))}},{key:"renderDots",value:function(e,t,n){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var r=this.props,a=r.dot,l=r.points,s=r.dataKey,c=(0,C.L6)(this.props,!1),u=(0,C.L6)(a,!0),d=l.map(function(e,t){var n=X(X(X({key:"dot-".concat(t),r:3},c),u),{},{value:e.value,dataKey:s,cx:e.x,cy:e.y,index:t,payload:e.payload});return i.renderDotItem(a,n)}),p={clipPath:e?"url(#clipPath-".concat(t?"":"dots-").concat(n,")"):null};return o.createElement(w.m,Y({className:"recharts-line-dots",key:"dots"},p),d)}},{key:"renderCurveStatically",value:function(e,t,n,r){var a=this.props,i=a.type,l=a.layout,s=a.connectNulls,c=(a.ref,function(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},a=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(a,V)),u=X(X(X({},(0,C.L6)(c,!0)),{},{fill:"none",className:"recharts-line-curve",clipPath:t?"url(#clipPath-".concat(n,")"):null,points:e},r),{},{type:i,layout:l,connectNulls:s});return o.createElement(y.H,Y({},u,{pathRef:this.pathRef}))}},{key:"renderCurveWithAnimation",value:function(e,t){var n=this,r=this.props,a=r.points,i=r.strokeDasharray,l=r.isAnimationActive,c=r.animationBegin,u=r.animationDuration,d=r.animationEasing,p=r.animationId,f=r.animateNewValues,m=r.width,g=r.height,h=this.state,b=h.prevPoints,v=h.totalLength;return o.createElement(s.ZP,{begin:c,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"line-".concat(p),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var o,l=r.t;if(b){var s=b.length/a.length,c=a.map(function(e,t){var n=Math.floor(t*s);if(b[n]){var r=b[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return X(X({},e),{},{x:o(l),y:a(l)})}if(f){var i=(0,O.k4)(2*m,e.x),c=(0,O.k4)(g/2,e.y);return X(X({},e),{},{x:i(l),y:c(l)})}return X(X({},e),{},{x:e.x,y:e.y})});return n.renderCurveStatically(c,e,t)}var u=(0,O.k4)(0,v)(l);if(i){var d="".concat(i).split(/[,\s]+/gim).map(function(e){return parseFloat(e)});o=n.getStrokeDasharray(u,v,d)}else o=n.generateSimpleStrokeDasharray(v,u);return n.renderCurveStatically(a,e,t,{strokeDasharray:o})})}},{key:"renderCurve",value:function(e,t){var n=this.props,r=n.points,o=n.isAnimationActive,a=this.state,i=a.prevPoints,l=a.totalLength;return o&&r&&r.length&&(!i&&l>0||!v()(i,r))?this.renderCurveWithAnimation(e,t):this.renderCurveStatically(r,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,a=t.points,i=t.className,s=t.xAxis,c=t.yAxis,u=t.top,d=t.left,p=t.width,f=t.height,g=t.isAnimationActive,h=t.id;if(n||!a||!a.length)return null;var b=this.state.isAnimationFinished,v=1===a.length,y=(0,l.Z)("recharts-line",i),E=s&&s.allowDataOverflow,x=c&&c.allowDataOverflow,O=E||x,k=m()(h)?this.id:h,T=null!==(e=(0,C.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},A=T.r,N=T.strokeWidth,I=((0,C.$k)(r)?r:{}).clipDot,R=void 0===I||I,_=2*(void 0===A?3:A)+(void 0===N?2:N);return o.createElement(w.m,{className:y},E||x?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(k)},o.createElement("rect",{x:E?d:d-p/2,y:x?u:u-f/2,width:E?p:2*p,height:x?f:2*f})),!R&&o.createElement("clipPath",{id:"clipPath-dots-".concat(k)},o.createElement("rect",{x:d-_/2,y:u-_/2,width:p+_,height:f+_}))):null,!v&&this.renderCurve(O,k),this.renderErrorBar(O,k),(v||r)&&this.renderDots(O,R,k),(!g||b)&&S.e.renderCallByParent(this.props,a))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,prevPoints:t.curPoints}:e.points!==t.curPoints?{curPoints:e.points}:null}},{key:"repeat",value:function(e,t){for(var n=e.length%2!=0?[].concat(Q(e),[0]):e,r=[],o=0;o{let{data:n=[],categories:i=[],index:l,stack:s=!1,colors:c=ep.s,valueFormatter:u=em.Cj,startEndOnly:d=!1,showXAxis:p=!0,showYAxis:f=!0,yAxisWidth:m=56,intervalType:g="equidistantPreserveStart",showAnimation:h=!1,animationDuration:b=900,showTooltip:v=!0,showLegend:y=!0,showGridLines:w=!0,showGradient:S=!0,autoMinValue:x=!1,curveType:O="linear",minValue:k,maxValue:C,connectNulls:T=!1,allowDecimals:A=!0,noDataText:N,className:I,onValueChange:R,enableLegendSlider:_=!1,customTooltip:P,rotateLabelX:M,tickGap:L=5}=e,D=(0,r._T)(e,["data","categories","index","stack","colors","valueFormatter","startEndOnly","showXAxis","showYAxis","yAxisWidth","intervalType","showAnimation","animationDuration","showTooltip","showLegend","showGridLines","showGradient","autoMinValue","curveType","minValue","maxValue","connectNulls","allowDecimals","noDataText","className","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap"]),j=(p||f)&&(!d||f)?20:0,[U,$]=(0,o.useState)(60),[V,q]=(0,o.useState)(void 0),[Y,K]=(0,o.useState)(void 0),X=(0,eu.me)(i,c),Q=(0,eu.i4)(x,k,C),J=!!R;function ee(e){J&&(e===Y&&!V||(0,eu.FB)(n,e)&&V&&V.dataKey===e?(K(void 0),null==R||R(null)):(K(e),null==R||R({eventType:"category",categoryClicked:e})),q(void 0))}return o.createElement("div",Object.assign({ref:t,className:(0,ef.q)("w-full h-80",I)},D),o.createElement(a.h,{className:"h-full w-full"},(null==n?void 0:n.length)?o.createElement(z,{data:n,onClick:J&&(Y||V)?()=>{q(void 0),K(void 0),null==R||R(null)}:void 0},w?o.createElement(H.q,{className:(0,ef.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:!0,vertical:!1}):null,o.createElement(B.K,{padding:{left:j,right:j},hide:!p,dataKey:l,tick:{transform:"translate(0, 6)"},ticks:d?[n[0][l],n[n.length-1][l]]:void 0,fill:"",stroke:"",className:(0,ef.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),interval:d?"preserveStartEnd":g,tickLine:!1,axisLine:!1,minTickGap:L,angle:null==M?void 0:M.angle,dy:null==M?void 0:M.verticalShift,height:null==M?void 0:M.xAxisHeight}),o.createElement(Z.B,{width:m,hide:!f,axisLine:!1,tickLine:!1,type:"number",domain:Q,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,ef.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:u,allowDecimals:A}),o.createElement(G.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{stroke:"#d1d5db",strokeWidth:1},content:v?e=>{let{active:t,payload:n,label:r}=e;return P?o.createElement(P,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=X.get(e.dataKey))&&void 0!==t?t:ed.fr.Gray})}),active:t,label:r}):o.createElement(es.ZP,{active:t,payload:n,label:r,valueFormatter:u,categoryColors:X})}:o.createElement(o.Fragment,null),position:{y:0}}),y?o.createElement(W.D,{verticalAlign:"top",height:U,content:e=>{let{payload:t}=e;return(0,el.Z)({payload:t},X,$,Y,J?e=>ee(e):void 0,_)}}):null,i.map(e=>{var t,n;return o.createElement("defs",{key:e},S?o.createElement("linearGradient",{className:(0,em.bM)(null!==(t=X.get(e))&&void 0!==t?t:ed.fr.Gray,ep.K.text).textColor,id:X.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{offset:"5%",stopColor:"currentColor",stopOpacity:V||Y&&Y!==e?.15:.4}),o.createElement("stop",{offset:"95%",stopColor:"currentColor",stopOpacity:0})):o.createElement("linearGradient",{className:(0,em.bM)(null!==(n=X.get(e))&&void 0!==n?n:ed.fr.Gray,ep.K.text).textColor,id:X.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{stopColor:"currentColor",stopOpacity:V||Y&&Y!==e?.1:.3})))}),i.map(e=>{var t;return o.createElement(F,{className:(0,em.bM)(null!==(t=X.get(e))&&void 0!==t?t:ed.fr.Gray,ep.K.text).strokeColor,strokeOpacity:V||Y&&Y!==e?.3:1,activeDot:e=>{var t;let{cx:r,cy:a,stroke:i,strokeLinecap:l,strokeLinejoin:s,strokeWidth:c,dataKey:u}=e;return o.createElement(E.o,{className:(0,ef.q)("stroke-tremor-background dark:stroke-dark-tremor-background",R?"cursor-pointer":"",(0,em.bM)(null!==(t=X.get(u))&&void 0!==t?t:ed.fr.Gray,ep.K.text).fillColor),cx:r,cy:a,r:5,fill:"",stroke:i,strokeLinecap:l,strokeLinejoin:s,strokeWidth:c,onClick:(t,r)=>{r.stopPropagation(),J&&(e.index===(null==V?void 0:V.index)&&e.dataKey===(null==V?void 0:V.dataKey)||(0,eu.FB)(n,e.dataKey)&&Y&&Y===e.dataKey?(K(void 0),q(void 0),null==R||R(null)):(K(e.dataKey),q({index:e.index,dataKey:e.dataKey}),null==R||R(Object.assign({eventType:"dot",categoryClicked:e.dataKey},e.payload))))}})},dot:t=>{var r;let{stroke:a,strokeLinecap:i,strokeLinejoin:l,strokeWidth:s,cx:c,cy:u,dataKey:d,index:p}=t;return(0,eu.FB)(n,e)&&!(V||Y&&Y!==e)||(null==V?void 0:V.index)===p&&(null==V?void 0:V.dataKey)===e?o.createElement(E.o,{key:p,cx:c,cy:u,r:5,stroke:a,fill:"",strokeLinecap:i,strokeLinejoin:l,strokeWidth:s,className:(0,ef.q)("stroke-tremor-background dark:stroke-dark-tremor-background",R?"cursor-pointer":"",(0,em.bM)(null!==(r=X.get(d))&&void 0!==r?r:ed.fr.Gray,ep.K.text).fillColor)}):o.createElement(o.Fragment,{key:p})},key:e,name:e,type:O,dataKey:e,stroke:"",fill:"url(#".concat(X.get(e),")"),strokeWidth:2,strokeLinejoin:"round",strokeLinecap:"round",isAnimationActive:h,animationDuration:b,stackId:s?"a":void 0,connectNulls:T})}),R?i.map(e=>o.createElement(ei,{className:(0,ef.q)("cursor-pointer"),strokeOpacity:0,key:e,name:e,type:O,dataKey:e,stroke:"transparent",fill:"transparent",legendType:"none",tooltipType:"none",strokeWidth:12,connectNulls:T,onClick:(e,t)=>{t.stopPropagation();let{name:n}=e;ee(n)}})):null):o.createElement(ec.Z,{noDataText:N})))});eg.displayName="AreaChart"},44041:function(e,t,n){n.d(t,{Z:function(){return x}});var r=n(69703),o=n(54942),a=n(2898),i=n(99250),l=n(65492),s=n(64090),c=n(26587),u=n(65558),d=n(28485),p=n(23356),f=n(22983),m=n(12627),g=(0,u.z)({chartName:"BarChart",GraphicalChild:d.$,defaultTooltipEventType:"axis",validateTooltipEventTypes:["axis","item"],axisComponents:[{axisType:"xAxis",AxisComp:p.K},{axisType:"yAxis",AxisComp:f.B}],formatAxisMap:m.t9}),h=n(38333),b=n(10166),v=n(94866),y=n(17280),E=n(30470),w=n(77448),S=n(36342);let x=s.forwardRef((e,t)=>{let{data:n=[],categories:u=[],index:m,colors:x=a.s,valueFormatter:O=l.Cj,layout:k="horizontal",stack:C=!1,relative:T=!1,startEndOnly:A=!1,animationDuration:N=900,showAnimation:I=!1,showXAxis:R=!0,showYAxis:_=!0,yAxisWidth:P=56,intervalType:M="equidistantPreserveStart",showTooltip:L=!0,showLegend:D=!0,showGridLines:j=!0,autoMinValue:F=!1,minValue:B,maxValue:Z,allowDecimals:U=!0,noDataText:z,onValueChange:H,enableLegendSlider:G=!1,customTooltip:W,rotateLabelX:$,tickGap:V=5,className:q}=e,Y=(0,r._T)(e,["data","categories","index","colors","valueFormatter","layout","stack","relative","startEndOnly","animationDuration","showAnimation","showXAxis","showYAxis","yAxisWidth","intervalType","showTooltip","showLegend","showGridLines","autoMinValue","minValue","maxValue","allowDecimals","noDataText","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap","className"]),K=R||_?20:0,[X,Q]=(0,s.useState)(60),J=(0,S.me)(u,x),[ee,et]=s.useState(void 0),[en,er]=(0,s.useState)(void 0),eo=!!H;function ea(e,t,n){var r,o,a,i;n.stopPropagation(),H&&((0,S.vZ)(ee,Object.assign(Object.assign({},e.payload),{value:e.value}))?(er(void 0),et(void 0),null==H||H(null)):(er(null===(o=null===(r=e.tooltipPayload)||void 0===r?void 0:r[0])||void 0===o?void 0:o.dataKey),et(Object.assign(Object.assign({},e.payload),{value:e.value})),null==H||H(Object.assign({eventType:"bar",categoryClicked:null===(i=null===(a=e.tooltipPayload)||void 0===a?void 0:a[0])||void 0===i?void 0:i.dataKey},e.payload))))}let ei=(0,S.i4)(F,B,Z);return s.createElement("div",Object.assign({ref:t,className:(0,i.q)("w-full h-80",q)},Y),s.createElement(c.h,{className:"h-full w-full"},(null==n?void 0:n.length)?s.createElement(g,{data:n,stackOffset:C?"sign":T?"expand":"none",layout:"vertical"===k?"vertical":"horizontal",onClick:eo&&(en||ee)?()=>{et(void 0),er(void 0),null==H||H(null)}:void 0},j?s.createElement(h.q,{className:(0,i.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:"vertical"!==k,vertical:"vertical"===k}):null,"vertical"!==k?s.createElement(p.K,{padding:{left:K,right:K},hide:!R,dataKey:m,interval:A?"preserveStartEnd":M,tick:{transform:"translate(0, 6)"},ticks:A?[n[0][m],n[n.length-1][m]]:void 0,fill:"",stroke:"",className:(0,i.q)("mt-4 text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,angle:null==$?void 0:$.angle,dy:null==$?void 0:$.verticalShift,height:null==$?void 0:$.xAxisHeight,minTickGap:V}):s.createElement(p.K,{hide:!R,type:"number",tick:{transform:"translate(-3, 0)"},domain:ei,fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,tickFormatter:O,minTickGap:V,allowDecimals:U,angle:null==$?void 0:$.angle,dy:null==$?void 0:$.verticalShift,height:null==$?void 0:$.xAxisHeight}),"vertical"!==k?s.createElement(f.B,{width:P,hide:!_,axisLine:!1,tickLine:!1,type:"number",domain:ei,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:T?e=>"".concat((100*e).toString()," %"):O,allowDecimals:U}):s.createElement(f.B,{width:P,hide:!_,dataKey:m,axisLine:!1,tickLine:!1,ticks:A?[n[0][m],n[n.length-1][m]]:void 0,type:"category",interval:"preserveStartEnd",tick:{transform:"translate(0, 6)"},fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content")}),s.createElement(b.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{fill:"#d1d5db",opacity:"0.15"},content:L?e=>{let{active:t,payload:n,label:r}=e;return W?s.createElement(W,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=J.get(e.dataKey))&&void 0!==t?t:o.fr.Gray})}),active:t,label:r}):s.createElement(E.ZP,{active:t,payload:n,label:r,valueFormatter:O,categoryColors:J})}:s.createElement(s.Fragment,null),position:{y:0}}),D?s.createElement(v.D,{verticalAlign:"top",height:X,content:e=>{let{payload:t}=e;return(0,y.Z)({payload:t},J,Q,en,eo?e=>{eo&&(e!==en||ee?(er(e),null==H||H({eventType:"category",categoryClicked:e})):(er(void 0),null==H||H(null)),et(void 0))}:void 0,G)}}):null,u.map(e=>{var t;return s.createElement(d.$,{className:(0,i.q)((0,l.bM)(null!==(t=J.get(e))&&void 0!==t?t:o.fr.Gray,a.K.background).fillColor,H?"cursor-pointer":""),key:e,name:e,type:"linear",stackId:C||T?"a":void 0,dataKey:e,fill:"",isAnimationActive:I,animationDuration:N,shape:e=>((e,t,n,r)=>{let{fillOpacity:o,name:a,payload:i,value:l}=e,{x:c,width:u,y:d,height:p}=e;return"horizontal"===r&&p<0?(d+=p,p=Math.abs(p)):"vertical"===r&&u<0&&(c+=u,u=Math.abs(u)),s.createElement("rect",{x:c,y:d,width:u,height:p,opacity:t||n&&n!==a?(0,S.vZ)(t,Object.assign(Object.assign({},i),{value:l}))?o:.3:o})})(e,ee,en,k),onClick:ea})})):s.createElement(w.Z,{noDataText:z})))});x.displayName="BarChart"},17280:function(e,t,n){n.d(t,{Z:function(){return g}});var r=n(64090);let o=(e,t)=>{let[n,o]=(0,r.useState)(t);(0,r.useEffect)(()=>{let t=()=>{o(window.innerWidth),e()};return t(),window.addEventListener("resize",t),()=>window.removeEventListener("resize",t)},[e,n])};var a=n(69703),i=n(2898),l=n(99250),s=n(65492);let c=e=>{var t=(0,a._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M8 12L14 6V18L8 12Z"}))},u=e=>{var t=(0,a._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M16 12L10 18V6L16 12Z"}))},d=(0,s.fn)("Legend"),p=e=>{let{name:t,color:n,onClick:o,activeLegend:a}=e,c=!!o;return r.createElement("li",{className:(0,l.q)(d("legendItem"),"group inline-flex items-center px-2 py-0.5 rounded-tremor-small transition whitespace-nowrap",c?"cursor-pointer":"cursor-default","text-tremor-content",c?"hover:bg-tremor-background-subtle":"","dark:text-dark-tremor-content",c?"dark:hover:bg-dark-tremor-background-subtle":""),onClick:e=>{e.stopPropagation(),null==o||o(t,n)}},r.createElement("svg",{className:(0,l.q)("flex-none h-2 w-2 mr-1.5",(0,s.bM)(n,i.K.text).textColor,a&&a!==t?"opacity-40":"opacity-100"),fill:"currentColor",viewBox:"0 0 8 8"},r.createElement("circle",{cx:4,cy:4,r:4})),r.createElement("p",{className:(0,l.q)("whitespace-nowrap truncate text-tremor-default","text-tremor-content",c?"group-hover:text-tremor-content-emphasis":"","dark:text-dark-tremor-content",a&&a!==t?"opacity-40":"opacity-100",c?"dark:group-hover:text-dark-tremor-content-emphasis":"")},t))},f=e=>{let{icon:t,onClick:n,disabled:o}=e,[a,i]=r.useState(!1),s=r.useRef(null);return r.useEffect(()=>(a?s.current=setInterval(()=>{null==n||n()},300):clearInterval(s.current),()=>clearInterval(s.current)),[a,n]),(0,r.useEffect)(()=>{o&&(clearInterval(s.current),i(!1))},[o]),r.createElement("button",{type:"button",className:(0,l.q)(d("legendSliderButton"),"w-5 group inline-flex items-center truncate rounded-tremor-small transition",o?"cursor-not-allowed":"cursor-pointer",o?"text-tremor-content-subtle":"text-tremor-content hover:text-tremor-content-emphasis hover:bg-tremor-background-subtle",o?"dark:text-dark-tremor-subtle":"dark:text-dark-tremor dark:hover:text-tremor-content-emphasis dark:hover:bg-dark-tremor-background-subtle"),disabled:o,onClick:e=>{e.stopPropagation(),null==n||n()},onMouseDown:e=>{e.stopPropagation(),i(!0)},onMouseUp:e=>{e.stopPropagation(),i(!1)}},r.createElement(t,{className:"w-full"}))},m=r.forwardRef((e,t)=>{var n,o;let{categories:s,colors:m=i.s,className:g,onClickLegendItem:h,activeLegend:b,enableLegendSlider:v=!1}=e,y=(0,a._T)(e,["categories","colors","className","onClickLegendItem","activeLegend","enableLegendSlider"]),E=r.useRef(null),[w,S]=r.useState(null),[x,O]=r.useState(null),k=r.useRef(null),C=(0,r.useCallback)(()=>{let e=null==E?void 0:E.current;e&&S({left:e.scrollLeft>0,right:e.scrollWidth-e.clientWidth>e.scrollLeft})},[S]),T=(0,r.useCallback)(e=>{var t;let n=null==E?void 0:E.current,r=null!==(t=null==n?void 0:n.clientWidth)&&void 0!==t?t:0;n&&v&&(n.scrollTo({left:"left"===e?n.scrollLeft-r:n.scrollLeft+r,behavior:"smooth"}),setTimeout(()=>{C()},400))},[v,C]);r.useEffect(()=>{let e=e=>{"ArrowLeft"===e?T("left"):"ArrowRight"===e&&T("right")};return x?(e(x),k.current=setInterval(()=>{e(x)},300)):clearInterval(k.current),()=>clearInterval(k.current)},[x,T]);let A=e=>{e.stopPropagation(),"ArrowLeft"!==e.key&&"ArrowRight"!==e.key||(e.preventDefault(),O(e.key))},N=e=>{e.stopPropagation(),O(null)};return r.useEffect(()=>{let e=null==E?void 0:E.current;return v&&(C(),null==e||e.addEventListener("keydown",A),null==e||e.addEventListener("keyup",N)),()=>{null==e||e.removeEventListener("keydown",A),null==e||e.removeEventListener("keyup",N)}},[C,v]),r.createElement("ol",Object.assign({ref:t,className:(0,l.q)(d("root"),"relative overflow-hidden",g)},y),r.createElement("div",{ref:E,tabIndex:0,className:(0,l.q)("h-full flex",v?(null==w?void 0:w.right)||(null==w?void 0:w.left)?"pl-4 pr-12 items-center overflow-auto snap-mandatory [&::-webkit-scrollbar]:hidden [scrollbar-width:none]":"":"flex-wrap")},s.map((e,t)=>r.createElement(p,{key:"item-".concat(t),name:e,color:m[t],onClick:h,activeLegend:b}))),v&&((null==w?void 0:w.right)||(null==w?void 0:w.left))?r.createElement(r.Fragment,null,r.createElement("div",{className:(0,l.q)("absolute top-0 bottom-0 left-0 w-4 bg-gradient-to-r from-white to-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("absolute top-0 bottom-0 right-10 w-4 bg-gradient-to-r from-transparent to-white pointer-events-none")}),r.createElement("div",{className:(0,l.q)("absolute flex top-0 pr-1 bottom-0 right-0 items-center justify-center h-full bg-tremor-background")},r.createElement(f,{icon:c,onClick:()=>{O(null),T("left")},disabled:!(null==w?void 0:w.left)}),r.createElement(f,{icon:u,onClick:()=>{O(null),T("right")},disabled:!(null==w?void 0:w.right)}))):null)});m.displayName="Legend";let g=(e,t,n,a,i,l)=>{let{payload:s}=e,c=(0,r.useRef)(null);o(()=>{var e,t;n((t=null===(e=c.current)||void 0===e?void 0:e.clientHeight)?Number(t)+20:60)});let u=s.filter(e=>"none"!==e.type);return r.createElement("div",{ref:c,className:"flex items-center justify-end"},r.createElement(m,{categories:u.map(e=>e.value),colors:u.map(e=>t.get(e.value)),onClickLegendItem:i,activeLegend:a,enableLegendSlider:l}))}},30470:function(e,t,n){n.d(t,{ZP:function(){return u}});var r=n(64090),o=n(54942),a=n(2898),i=n(99250),l=n(65492);let s=e=>{let{children:t}=e;return r.createElement("div",{className:(0,i.q)("rounded-tremor-default text-tremor-default border","bg-tremor-background shadow-tremor-dropdown border-tremor-border","dark:bg-dark-tremor-background dark:shadow-dark-tremor-dropdown dark:border-dark-tremor-border")},t)},c=e=>{let{value:t,name:n,color:o}=e;return r.createElement("div",{className:"flex items-center justify-between space-x-8"},r.createElement("div",{className:"flex items-center space-x-2"},r.createElement("span",{className:(0,i.q)("shrink-0 rounded-tremor-full border-2 h-3 w-3","border-tremor-background shadow-tremor-card","dark:border-dark-tremor-background dark:shadow-dark-tremor-card",(0,l.bM)(o,a.K.background).bgColor)}),r.createElement("p",{className:(0,i.q)("text-right whitespace-nowrap","text-tremor-content","dark:text-dark-tremor-content")},n)),r.createElement("p",{className:(0,i.q)("font-medium tabular-nums text-right whitespace-nowrap","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},t))},u=e=>{let{active:t,payload:n,label:a,categoryColors:l,valueFormatter:u}=e;if(t&&n){let e=n.filter(e=>"none"!==e.type);return r.createElement(s,null,r.createElement("div",{className:(0,i.q)("border-tremor-border border-b px-4 py-2","dark:border-dark-tremor-border")},r.createElement("p",{className:(0,i.q)("font-medium","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},a)),r.createElement("div",{className:(0,i.q)("px-4 py-2 space-y-1")},e.map((e,t)=>{var n;let{value:a,name:i}=e;return r.createElement(c,{key:"id-".concat(t),value:u(a),name:i,color:null!==(n=l.get(i))&&void 0!==n?n:o.fr.Blue})})))}return null}},77448:function(e,t,n){n.d(t,{Z:function(){return p}});var r=n(99250),o=n(64090),a=n(69703);let i=(0,n(65492).fn)("Flex"),l={start:"justify-start",end:"justify-end",center:"justify-center",between:"justify-between",around:"justify-around",evenly:"justify-evenly"},s={start:"items-start",end:"items-end",center:"items-center",baseline:"items-baseline",stretch:"items-stretch"},c={row:"flex-row",col:"flex-col","row-reverse":"flex-row-reverse","col-reverse":"flex-col-reverse"},u=o.forwardRef((e,t)=>{let{flexDirection:n="row",justifyContent:u="between",alignItems:d="center",children:p,className:f}=e,m=(0,a._T)(e,["flexDirection","justifyContent","alignItems","children","className"]);return o.createElement("div",Object.assign({ref:t,className:(0,r.q)(i("root"),"flex w-full",c[n],l[u],s[d],f)},m),p)});u.displayName="Flex";var d=n(71801);let p=e=>{let{noDataText:t="No data"}=e;return o.createElement(u,{alignItems:"center",justifyContent:"center",className:(0,r.q)("w-full h-full border border-dashed rounded-tremor-default","border-tremor-border","dark:border-dark-tremor-border")},o.createElement(d.Z,{className:(0,r.q)("text-tremor-content","dark:text-dark-tremor-content")},t))}},36342:function(e,t,n){n.d(t,{FB:function(){return a},i4:function(){return o},me:function(){return r},vZ:function(){return function e(t,n){if(t===n)return!0;if("object"!=typeof t||"object"!=typeof n||null===t||null===n)return!1;let r=Object.keys(t),o=Object.keys(n);if(r.length!==o.length)return!1;for(let a of r)if(!o.includes(a)||!e(t[a],n[a]))return!1;return!0}}});let r=(e,t)=>{let n=new Map;return e.forEach((e,r)=>{n.set(e,t[r])}),n},o=(e,t,n)=>[e?"auto":null!=t?t:0,null!=n?n:"auto"];function a(e,t){let n=[];for(let r of e)if(Object.prototype.hasOwnProperty.call(r,t)&&(n.push(r[t]),n.length>1))return!1;return!0}},5:function(e,t,n){n.d(t,{Z:function(){return f}});var r=n(69703),o=n(64090),a=n(58437),i=n(54942),l=n(2898),s=n(99250),c=n(65492);let u={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},d={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},p=(0,c.fn)("Badge"),f=o.forwardRef((e,t)=>{let{color:n,icon:f,size:m=i.u8.SM,tooltip:g,className:h,children:b}=e,v=(0,r._T)(e,["color","icon","size","tooltip","className","children"]),y=f||null,{tooltipProps:E,getReferenceProps:w}=(0,a.l)();return o.createElement("span",Object.assign({ref:(0,c.lq)([t,E.refs.setReference]),className:(0,s.q)(p("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full",n?(0,s.q)((0,c.bM)(n,l.K.background).bgColor,(0,c.bM)(n,l.K.text).textColor,"bg-opacity-20 dark:bg-opacity-25"):(0,s.q)("bg-tremor-brand-muted text-tremor-brand-emphasis","dark:bg-dark-tremor-brand-muted dark:text-dark-tremor-brand-emphasis"),u[m].paddingX,u[m].paddingY,u[m].fontSize,h)},w,v),o.createElement(a.Z,Object.assign({text:g},E)),y?o.createElement(y,{className:(0,s.q)(p("icon"),"shrink-0 -ml-1 mr-1.5",d[m].height,d[m].width)}):null,o.createElement("p",{className:(0,s.q)(p("text"),"text-sm whitespace-nowrap")},b))});f.displayName="Badge"},61244:function(e,t,n){n.d(t,{Z:function(){return g}});var r=n(69703),o=n(64090),a=n(58437),i=n(54942),l=n(99250),s=n(65492),c=n(2898);let u={xs:{paddingX:"px-1.5",paddingY:"py-1.5"},sm:{paddingX:"px-1.5",paddingY:"py-1.5"},md:{paddingX:"px-2",paddingY:"py-2"},lg:{paddingX:"px-2",paddingY:"py-2"},xl:{paddingX:"px-2.5",paddingY:"py-2.5"}},d={xs:{height:"h-3",width:"w-3"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-7",width:"w-7"},xl:{height:"h-9",width:"w-9"}},p={simple:{rounded:"",border:"",ring:"",shadow:""},light:{rounded:"rounded-tremor-default",border:"",ring:"",shadow:""},shadow:{rounded:"rounded-tremor-default",border:"border",ring:"",shadow:"shadow-tremor-card dark:shadow-dark-tremor-card"},solid:{rounded:"rounded-tremor-default",border:"border-2",ring:"ring-1",shadow:""},outlined:{rounded:"rounded-tremor-default",border:"border",ring:"ring-2",shadow:""}},f=(e,t)=>{switch(e){case"simple":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:"",borderColor:"",ringColor:""};case"light":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand-muted dark:bg-dark-tremor-brand-muted",borderColor:"",ringColor:""};case"shadow":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:"border-tremor-border dark:border-dark-tremor-border",ringColor:""};case"solid":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand dark:bg-dark-tremor-brand",borderColor:"border-tremor-brand-inverted dark:border-dark-tremor-brand-inverted",ringColor:"ring-tremor-ring dark:ring-dark-tremor-ring"};case"outlined":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:t?(0,s.bM)(t,c.K.ring).borderColor:"border-tremor-brand-subtle dark:border-dark-tremor-brand-subtle",ringColor:t?(0,l.q)((0,s.bM)(t,c.K.ring).ringColor,"ring-opacity-40"):"ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted"}}},m=(0,s.fn)("Icon"),g=o.forwardRef((e,t)=>{let{icon:n,variant:c="simple",tooltip:g,size:h=i.u8.SM,color:b,className:v}=e,y=(0,r._T)(e,["icon","variant","tooltip","size","color","className"]),E=f(c,b),{tooltipProps:w,getReferenceProps:S}=(0,a.l)();return o.createElement("span",Object.assign({ref:(0,s.lq)([t,w.refs.setReference]),className:(0,l.q)(m("root"),"inline-flex flex-shrink-0 items-center",E.bgColor,E.textColor,E.borderColor,E.ringColor,p[c].rounded,p[c].border,p[c].shadow,p[c].ring,u[h].paddingX,u[h].paddingY,v)},S,y),o.createElement(a.Z,Object.assign({text:g},w)),o.createElement(n,{className:(0,l.q)(m("icon"),"shrink-0",d[h].height,d[h].width)}))});g.displayName="Icon"},16450:function(e,t,n){n.d(t,{Z:function(){return A}});var r=n(69703),o=n(58437),a=n(64090),i=n(70444),l=n(89988),s=n(89542),c={disabled:!1},u=a.createContext(null),d="unmounted",p="exited",f="entering",m="entered",g="exiting",h=function(e){function t(t,n){r=e.call(this,t,n)||this;var r,o,a=n&&!n.isMounting?t.enter:t.appear;return r.appearStatus=null,t.in?a?(o=p,r.appearStatus=f):o=m:o=t.unmountOnExit||t.mountOnEnter?d:p,r.state={status:o},r.nextCallback=null,r}t.prototype=Object.create(e.prototype),t.prototype.constructor=t,(0,l.Z)(t,e),t.getDerivedStateFromProps=function(e,t){return e.in&&t.status===d?{status:p}:null};var n=t.prototype;return n.componentDidMount=function(){this.updateStatus(!0,this.appearStatus)},n.componentDidUpdate=function(e){var t=null;if(e!==this.props){var n=this.state.status;this.props.in?n!==f&&n!==m&&(t=f):(n===f||n===m)&&(t=g)}this.updateStatus(!1,t)},n.componentWillUnmount=function(){this.cancelNextCallback()},n.getTimeouts=function(){var e,t,n,r=this.props.timeout;return e=t=n=r,null!=r&&"number"!=typeof r&&(e=r.exit,t=r.enter,n=void 0!==r.appear?r.appear:t),{exit:e,enter:t,appear:n}},n.updateStatus=function(e,t){if(void 0===e&&(e=!1),null!==t){if(this.cancelNextCallback(),t===f){if(this.props.unmountOnExit||this.props.mountOnEnter){var n=this.props.nodeRef?this.props.nodeRef.current:s.findDOMNode(this);n&&n.scrollTop}this.performEnter(e)}else this.performExit()}else this.props.unmountOnExit&&this.state.status===p&&this.setState({status:d})},n.performEnter=function(e){var t=this,n=this.props.enter,r=this.context?this.context.isMounting:e,o=this.props.nodeRef?[r]:[s.findDOMNode(this),r],a=o[0],i=o[1],l=this.getTimeouts(),u=r?l.appear:l.enter;if(!e&&!n||c.disabled){this.safeSetState({status:m},function(){t.props.onEntered(a)});return}this.props.onEnter(a,i),this.safeSetState({status:f},function(){t.props.onEntering(a,i),t.onTransitionEnd(u,function(){t.safeSetState({status:m},function(){t.props.onEntered(a,i)})})})},n.performExit=function(){var e=this,t=this.props.exit,n=this.getTimeouts(),r=this.props.nodeRef?void 0:s.findDOMNode(this);if(!t||c.disabled){this.safeSetState({status:p},function(){e.props.onExited(r)});return}this.props.onExit(r),this.safeSetState({status:g},function(){e.props.onExiting(r),e.onTransitionEnd(n.exit,function(){e.safeSetState({status:p},function(){e.props.onExited(r)})})})},n.cancelNextCallback=function(){null!==this.nextCallback&&(this.nextCallback.cancel(),this.nextCallback=null)},n.safeSetState=function(e,t){t=this.setNextCallback(t),this.setState(e,t)},n.setNextCallback=function(e){var t=this,n=!0;return this.nextCallback=function(r){n&&(n=!1,t.nextCallback=null,e(r))},this.nextCallback.cancel=function(){n=!1},this.nextCallback},n.onTransitionEnd=function(e,t){this.setNextCallback(t);var n=this.props.nodeRef?this.props.nodeRef.current:s.findDOMNode(this),r=null==e&&!this.props.addEndListener;if(!n||r){setTimeout(this.nextCallback,0);return}if(this.props.addEndListener){var o=this.props.nodeRef?[this.nextCallback]:[n,this.nextCallback],a=o[0],i=o[1];this.props.addEndListener(a,i)}null!=e&&setTimeout(this.nextCallback,e)},n.render=function(){var e=this.state.status;if(e===d)return null;var t=this.props,n=t.children,r=(t.in,t.mountOnEnter,t.unmountOnExit,t.appear,t.enter,t.exit,t.timeout,t.addEndListener,t.onEnter,t.onEntering,t.onEntered,t.onExit,t.onExiting,t.onExited,t.nodeRef,(0,i.Z)(t,["children","in","mountOnEnter","unmountOnExit","appear","enter","exit","timeout","addEndListener","onEnter","onEntering","onEntered","onExit","onExiting","onExited","nodeRef"]));return a.createElement(u.Provider,{value:null},"function"==typeof n?n(e,r):a.cloneElement(a.Children.only(n),r))},t}(a.Component);function b(){}h.contextType=u,h.propTypes={},h.defaultProps={in:!1,mountOnEnter:!1,unmountOnExit:!1,appear:!1,enter:!0,exit:!0,onEnter:b,onEntering:b,onEntered:b,onExit:b,onExiting:b,onExited:b},h.UNMOUNTED=d,h.EXITED=p,h.ENTERING=f,h.ENTERED=m,h.EXITING=g;var v=n(54942),y=n(99250),E=n(65492);let w=e=>{var t=(0,r._T)(e,[]);return a.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),a.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),a.createElement("path",{d:"M18.364 5.636L16.95 7.05A7 7 0 1 0 19 12h2a9 9 0 1 1-2.636-6.364z"}))};var S=n(2898);let x={xs:{height:"h-4",width:"w-4"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-6",width:"w-6"},xl:{height:"h-6",width:"w-6"}},O=e=>"light"!==e?{xs:{paddingX:"px-2.5",paddingY:"py-1.5",fontSize:"text-xs"},sm:{paddingX:"px-4",paddingY:"py-2",fontSize:"text-sm"},md:{paddingX:"px-4",paddingY:"py-2",fontSize:"text-md"},lg:{paddingX:"px-4",paddingY:"py-2.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-3",fontSize:"text-xl"}}:{xs:{paddingX:"",paddingY:"",fontSize:"text-xs"},sm:{paddingX:"",paddingY:"",fontSize:"text-sm"},md:{paddingX:"",paddingY:"",fontSize:"text-md"},lg:{paddingX:"",paddingY:"",fontSize:"text-lg"},xl:{paddingX:"",paddingY:"",fontSize:"text-xl"}},k=(e,t)=>{switch(e){case"primary":return{textColor:t?(0,E.bM)("white").textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",hoverTextColor:t?(0,E.bM)("white").textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,E.bM)(t,S.K.background).bgColor:"bg-tremor-brand dark:bg-dark-tremor-brand",hoverBgColor:t?(0,E.bM)(t,S.K.darkBackground).hoverBgColor:"hover:bg-tremor-brand-emphasis dark:hover:bg-dark-tremor-brand-emphasis",borderColor:t?(0,E.bM)(t,S.K.border).borderColor:"border-tremor-brand dark:border-dark-tremor-brand",hoverBorderColor:t?(0,E.bM)(t,S.K.darkBorder).hoverBorderColor:"hover:border-tremor-brand-emphasis dark:hover:border-dark-tremor-brand-emphasis"};case"secondary":return{textColor:t?(0,E.bM)(t,S.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",hoverTextColor:t?(0,E.bM)(t,S.K.text).textColor:"hover:text-tremor-brand-emphasis dark:hover:text-dark-tremor-brand-emphasis",bgColor:(0,E.bM)("transparent").bgColor,hoverBgColor:t?(0,y.q)((0,E.bM)(t,S.K.background).hoverBgColor,"hover:bg-opacity-20 dark:hover:bg-opacity-20"):"hover:bg-tremor-brand-faint dark:hover:bg-dark-tremor-brand-faint",borderColor:t?(0,E.bM)(t,S.K.border).borderColor:"border-tremor-brand dark:border-dark-tremor-brand"};case"light":return{textColor:t?(0,E.bM)(t,S.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",hoverTextColor:t?(0,E.bM)(t,S.K.darkText).hoverTextColor:"hover:text-tremor-brand-emphasis dark:hover:text-dark-tremor-brand-emphasis",bgColor:(0,E.bM)("transparent").bgColor,borderColor:"",hoverBorderColor:""}}},C=(0,E.fn)("Button"),T=e=>{let{loading:t,iconSize:n,iconPosition:r,Icon:o,needMargin:i,transitionState:l}=e,s=i?r===v.zS.Left?(0,y.q)("-ml-1","mr-1.5"):(0,y.q)("-mr-1","ml-1.5"):"",c=(0,y.q)("w-0 h-0"),u={default:c,entering:c,entered:n,exiting:n,exited:c};return t?a.createElement(w,{className:(0,y.q)(C("icon"),"animate-spin shrink-0",s,u.default,u[l]),style:{transition:"width 150ms"}}):a.createElement(o,{className:(0,y.q)(C("icon"),"shrink-0",n,s)})},A=a.forwardRef((e,t)=>{let{icon:n,iconPosition:i=v.zS.Left,size:l=v.u8.SM,color:s,variant:c="primary",disabled:u,loading:d=!1,loadingText:p,children:f,tooltip:m,className:g}=e,b=(0,r._T)(e,["icon","iconPosition","size","color","variant","disabled","loading","loadingText","children","tooltip","className"]),w=d||u,S=void 0!==n||d,A=d&&p,N=!(!f&&!A),I=(0,y.q)(x[l].height,x[l].width),R="light"!==c?(0,y.q)("rounded-tremor-default border","shadow-tremor-input","dark:shadow-dark-tremor-input"):"",_=k(c,s),P=O(c)[l],{tooltipProps:M,getReferenceProps:L}=(0,o.l)(300);return a.createElement(h,{in:d,timeout:50},e=>a.createElement("button",Object.assign({ref:(0,E.lq)([t,M.refs.setReference]),className:(0,y.q)(C("root"),"flex-shrink-0 inline-flex justify-center items-center group font-medium outline-none",R,P.paddingX,P.paddingY,P.fontSize,_.textColor,_.bgColor,_.borderColor,_.hoverBorderColor,w?"opacity-50 cursor-not-allowed":(0,y.q)(k(c,s).hoverTextColor,k(c,s).hoverBgColor,k(c,s).hoverBorderColor),g),disabled:w},L,b),a.createElement(o.Z,Object.assign({text:m},M)),S&&i!==v.zS.Right?a.createElement(T,{loading:d,iconSize:I,iconPosition:i,Icon:n,transitionState:e,needMargin:N}):null,A||f?a.createElement("span",{className:(0,y.q)(C("text"),"text-sm whitespace-nowrap")},A?p:f):null,S&&i===v.zS.Right?a.createElement(T,{loading:d,iconSize:I,iconPosition:i,Icon:n,transitionState:e,needMargin:N}):null))});A.displayName="Button"},5474:function(e,t,n){n.d(t,{Z:function(){return eD}});var r,o,a=n(69703),i=n(64090),l=n(73832),s=n(10641),c=n(15740),u=n(92381),d=n(39790),p=n(85235),f=n(71679),m=n(37130),g=n(71454),h=n(31820),b=n(36601),v=n(83839),y=n(37700),E=n(88358),w=n(84152),S=n(48803),x=n(72640),O=n(94819),k=n(18318),C=n(67409),T=((r=T||{})[r.Open=0]="Open",r[r.Closed=1]="Closed",r),A=((o=A||{})[o.TogglePopover=0]="TogglePopover",o[o.ClosePopover=1]="ClosePopover",o[o.SetButton=2]="SetButton",o[o.SetButtonId=3]="SetButtonId",o[o.SetPanel=4]="SetPanel",o[o.SetPanelId=5]="SetPanelId",o);let N={0:e=>{let t={...e,popoverState:(0,x.E)(e.popoverState,{0:1,1:0})};return 0===t.popoverState&&(t.__demoMode=!1),t},1:e=>1===e.popoverState?e:{...e,popoverState:1},2:(e,t)=>e.button===t.button?e:{...e,button:t.button},3:(e,t)=>e.buttonId===t.buttonId?e:{...e,buttonId:t.buttonId},4:(e,t)=>e.panel===t.panel?e:{...e,panel:t.panel},5:(e,t)=>e.panelId===t.panelId?e:{...e,panelId:t.panelId}},I=(0,i.createContext)(null);function R(e){let t=(0,i.useContext)(I);if(null===t){let t=Error("<".concat(e," /> is missing a parent component."));throw Error.captureStackTrace&&Error.captureStackTrace(t,R),t}return t}I.displayName="PopoverContext";let _=(0,i.createContext)(null);function P(e){let t=(0,i.useContext)(_);if(null===t){let t=Error("<".concat(e," /> is missing a parent component."));throw Error.captureStackTrace&&Error.captureStackTrace(t,P),t}return t}_.displayName="PopoverAPIContext";let M=(0,i.createContext)(null);function L(){return(0,i.useContext)(M)}M.displayName="PopoverGroupContext";let D=(0,i.createContext)(null);function j(e,t){return(0,x.E)(t.type,N,e,t)}D.displayName="PopoverPanelContext";let F=k.AN.RenderStrategy|k.AN.Static,B=k.AN.RenderStrategy|k.AN.Static,Z=Object.assign((0,k.yV)(function(e,t){var n;let{__demoMode:r=!1,...o}=e,a=(0,i.useRef)(null),u=(0,b.T)(t,(0,b.h)(e=>{a.current=e})),d=(0,i.useRef)([]),g=(0,i.useReducer)(j,{__demoMode:r,popoverState:r?0:1,buttons:d,button:null,buttonId:null,panel:null,panelId:null,beforePanelSentinel:(0,i.createRef)(),afterPanelSentinel:(0,i.createRef)()}),[{popoverState:v,button:y,buttonId:w,panel:O,panelId:C,beforePanelSentinel:T,afterPanelSentinel:A},N]=g,R=(0,m.i)(null!=(n=a.current)?n:y),P=(0,i.useMemo)(()=>{if(!y||!O)return!1;for(let e of document.querySelectorAll("body > *"))if(Number(null==e?void 0:e.contains(y))^Number(null==e?void 0:e.contains(O)))return!0;let e=(0,S.GO)(),t=e.indexOf(y),n=(t+e.length-1)%e.length,r=(t+1)%e.length,o=e[n],a=e[r];return!O.contains(o)&&!O.contains(a)},[y,O]),M=(0,p.E)(w),F=(0,p.E)(C),B=(0,i.useMemo)(()=>({buttonId:M,panelId:F,close:()=>N({type:1})}),[M,F,N]),Z=L(),U=null==Z?void 0:Z.registerPopover,z=(0,s.z)(()=>{var e;return null!=(e=null==Z?void 0:Z.isFocusWithinPopoverGroup())?e:(null==R?void 0:R.activeElement)&&((null==y?void 0:y.contains(R.activeElement))||(null==O?void 0:O.contains(R.activeElement)))});(0,i.useEffect)(()=>null==U?void 0:U(B),[U,B]);let[H,G]=(0,l.k)(),W=(0,h.v)({mainTreeNodeRef:null==Z?void 0:Z.mainTreeNodeRef,portals:H,defaultContainers:[y,O]});(0,c.O)(null==R?void 0:R.defaultView,"focus",e=>{var t,n,r,o;e.target!==window&&e.target instanceof HTMLElement&&0===v&&(z()||y&&O&&(W.contains(e.target)||null!=(n=null==(t=T.current)?void 0:t.contains)&&n.call(t,e.target)||null!=(o=null==(r=A.current)?void 0:r.contains)&&o.call(r,e.target)||N({type:1})))},!0),(0,f.O)(W.resolveContainers,(e,t)=>{N({type:1}),(0,S.sP)(t,S.tJ.Loose)||(e.preventDefault(),null==y||y.focus())},0===v);let $=(0,s.z)(e=>{N({type:1});let t=e?e instanceof HTMLElement?e:"current"in e&&e.current instanceof HTMLElement?e.current:y:y;null==t||t.focus()}),V=(0,i.useMemo)(()=>({close:$,isPortalled:P}),[$,P]),q=(0,i.useMemo)(()=>({open:0===v,close:$}),[v,$]);return i.createElement(D.Provider,{value:null},i.createElement(I.Provider,{value:g},i.createElement(_.Provider,{value:V},i.createElement(E.up,{value:(0,x.E)(v,{0:E.ZM.Open,1:E.ZM.Closed})},i.createElement(G,null,(0,k.sY)({ourProps:{ref:u},theirProps:o,slot:q,defaultTag:"div",name:"Popover"}),i.createElement(W.MainTreeNode,null))))))}),{Button:(0,k.yV)(function(e,t){let n=(0,u.M)(),{id:r="headlessui-popover-button-".concat(n),...o}=e,[a,l]=R("Popover.Button"),{isPortalled:c}=P("Popover.Button"),d=(0,i.useRef)(null),p="headlessui-focus-sentinel-".concat((0,u.M)()),f=L(),h=null==f?void 0:f.closeOthers,E=null!==(0,i.useContext)(D);(0,i.useEffect)(()=>{if(!E)return l({type:3,buttonId:r}),()=>{l({type:3,buttonId:null})}},[E,r,l]);let[O]=(0,i.useState)(()=>Symbol()),T=(0,b.T)(d,t,E?null:e=>{if(e)a.buttons.current.push(O);else{let e=a.buttons.current.indexOf(O);-1!==e&&a.buttons.current.splice(e,1)}a.buttons.current.length>1&&console.warn("You are already using a but only 1 is supported."),e&&l({type:2,button:e})}),A=(0,b.T)(d,t),N=(0,m.i)(d),I=(0,s.z)(e=>{var t,n,r;if(E){if(1===a.popoverState)return;switch(e.key){case C.R.Space:case C.R.Enter:e.preventDefault(),null==(n=(t=e.target).click)||n.call(t),l({type:1}),null==(r=a.button)||r.focus()}}else switch(e.key){case C.R.Space:case C.R.Enter:e.preventDefault(),e.stopPropagation(),1===a.popoverState&&(null==h||h(a.buttonId)),l({type:0});break;case C.R.Escape:if(0!==a.popoverState)return null==h?void 0:h(a.buttonId);if(!d.current||null!=N&&N.activeElement&&!d.current.contains(N.activeElement))return;e.preventDefault(),e.stopPropagation(),l({type:1})}}),_=(0,s.z)(e=>{E||e.key===C.R.Space&&e.preventDefault()}),M=(0,s.z)(t=>{var n,r;(0,w.P)(t.currentTarget)||e.disabled||(E?(l({type:1}),null==(n=a.button)||n.focus()):(t.preventDefault(),t.stopPropagation(),1===a.popoverState&&(null==h||h(a.buttonId)),l({type:0}),null==(r=a.button)||r.focus()))}),j=(0,s.z)(e=>{e.preventDefault(),e.stopPropagation()}),F=0===a.popoverState,B=(0,i.useMemo)(()=>({open:F}),[F]),Z=(0,g.f)(e,d),U=E?{ref:A,type:Z,onKeyDown:I,onClick:M}:{ref:T,id:a.buttonId,type:Z,"aria-expanded":0===a.popoverState,"aria-controls":a.panel?a.panelId:void 0,onKeyDown:I,onKeyUp:_,onClick:M,onMouseDown:j},z=(0,v.l)(),H=(0,s.z)(()=>{let e=a.panel;e&&(0,x.E)(z.current,{[v.N.Forwards]:()=>(0,S.jA)(e,S.TO.First),[v.N.Backwards]:()=>(0,S.jA)(e,S.TO.Last)})===S.fE.Error&&(0,S.jA)((0,S.GO)().filter(e=>"true"!==e.dataset.headlessuiFocusGuard),(0,x.E)(z.current,{[v.N.Forwards]:S.TO.Next,[v.N.Backwards]:S.TO.Previous}),{relativeTo:a.button})});return i.createElement(i.Fragment,null,(0,k.sY)({ourProps:U,theirProps:o,slot:B,defaultTag:"button",name:"Popover.Button"}),F&&!E&&c&&i.createElement(y._,{id:p,features:y.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:H}))}),Overlay:(0,k.yV)(function(e,t){let n=(0,u.M)(),{id:r="headlessui-popover-overlay-".concat(n),...o}=e,[{popoverState:a},l]=R("Popover.Overlay"),c=(0,b.T)(t),d=(0,E.oJ)(),p=null!==d?(d&E.ZM.Open)===E.ZM.Open:0===a,f=(0,s.z)(e=>{if((0,w.P)(e.currentTarget))return e.preventDefault();l({type:1})}),m=(0,i.useMemo)(()=>({open:0===a}),[a]);return(0,k.sY)({ourProps:{ref:c,id:r,"aria-hidden":!0,onClick:f},theirProps:o,slot:m,defaultTag:"div",features:F,visible:p,name:"Popover.Overlay"})}),Panel:(0,k.yV)(function(e,t){let n=(0,u.M)(),{id:r="headlessui-popover-panel-".concat(n),focus:o=!1,...a}=e,[l,c]=R("Popover.Panel"),{close:p,isPortalled:f}=P("Popover.Panel"),g="headlessui-focus-sentinel-before-".concat((0,u.M)()),h="headlessui-focus-sentinel-after-".concat((0,u.M)()),w=(0,i.useRef)(null),O=(0,b.T)(w,t,e=>{c({type:4,panel:e})}),T=(0,m.i)(w),A=(0,k.Y2)();(0,d.e)(()=>(c({type:5,panelId:r}),()=>{c({type:5,panelId:null})}),[r,c]);let N=(0,E.oJ)(),I=null!==N?(N&E.ZM.Open)===E.ZM.Open:0===l.popoverState,_=(0,s.z)(e=>{var t;if(e.key===C.R.Escape){if(0!==l.popoverState||!w.current||null!=T&&T.activeElement&&!w.current.contains(T.activeElement))return;e.preventDefault(),e.stopPropagation(),c({type:1}),null==(t=l.button)||t.focus()}});(0,i.useEffect)(()=>{var t;e.static||1===l.popoverState&&(null==(t=e.unmount)||t)&&c({type:4,panel:null})},[l.popoverState,e.unmount,e.static,c]),(0,i.useEffect)(()=>{if(l.__demoMode||!o||0!==l.popoverState||!w.current)return;let e=null==T?void 0:T.activeElement;w.current.contains(e)||(0,S.jA)(w.current,S.TO.First)},[l.__demoMode,o,w,l.popoverState]);let M=(0,i.useMemo)(()=>({open:0===l.popoverState,close:p}),[l,p]),L={ref:O,id:r,onKeyDown:_,onBlur:o&&0===l.popoverState?e=>{var t,n,r,o,a;let i=e.relatedTarget;i&&w.current&&(null!=(t=w.current)&&t.contains(i)||(c({type:1}),(null!=(r=null==(n=l.beforePanelSentinel.current)?void 0:n.contains)&&r.call(n,i)||null!=(a=null==(o=l.afterPanelSentinel.current)?void 0:o.contains)&&a.call(o,i))&&i.focus({preventScroll:!0})))}:void 0,tabIndex:-1},j=(0,v.l)(),F=(0,s.z)(()=>{let e=w.current;e&&(0,x.E)(j.current,{[v.N.Forwards]:()=>{var t;(0,S.jA)(e,S.TO.First)===S.fE.Error&&(null==(t=l.afterPanelSentinel.current)||t.focus())},[v.N.Backwards]:()=>{var e;null==(e=l.button)||e.focus({preventScroll:!0})}})}),Z=(0,s.z)(()=>{let e=w.current;e&&(0,x.E)(j.current,{[v.N.Forwards]:()=>{var e;if(!l.button)return;let t=(0,S.GO)(),n=t.indexOf(l.button),r=t.slice(0,n+1),o=[...t.slice(n+1),...r];for(let t of o.slice())if("true"===t.dataset.headlessuiFocusGuard||null!=(e=l.panel)&&e.contains(t)){let e=o.indexOf(t);-1!==e&&o.splice(e,1)}(0,S.jA)(o,S.TO.First,{sorted:!1})},[v.N.Backwards]:()=>{var t;(0,S.jA)(e,S.TO.Previous)===S.fE.Error&&(null==(t=l.button)||t.focus())}})});return i.createElement(D.Provider,{value:r},I&&f&&i.createElement(y._,{id:g,ref:l.beforePanelSentinel,features:y.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:F}),(0,k.sY)({mergeRefs:A,ourProps:L,theirProps:a,slot:M,defaultTag:"div",features:B,visible:I,name:"Popover.Panel"}),I&&f&&i.createElement(y._,{id:h,ref:l.afterPanelSentinel,features:y.A.Focusable,"data-headlessui-focus-guard":!0,as:"button",type:"button",onFocus:Z}))}),Group:(0,k.yV)(function(e,t){let n=(0,i.useRef)(null),r=(0,b.T)(n,t),[o,a]=(0,i.useState)([]),l=(0,h.H)(),c=(0,s.z)(e=>{a(t=>{let n=t.indexOf(e);if(-1!==n){let e=t.slice();return e.splice(n,1),e}return t})}),u=(0,s.z)(e=>(a(t=>[...t,e]),()=>c(e))),d=(0,s.z)(()=>{var e;let t=(0,O.r)(n);if(!t)return!1;let r=t.activeElement;return!!(null!=(e=n.current)&&e.contains(r))||o.some(e=>{var n,o;return(null==(n=t.getElementById(e.buttonId.current))?void 0:n.contains(r))||(null==(o=t.getElementById(e.panelId.current))?void 0:o.contains(r))})}),p=(0,s.z)(e=>{for(let t of o)t.buttonId.current!==e&&t.close()}),f=(0,i.useMemo)(()=>({registerPopover:u,unregisterPopover:c,isFocusWithinPopoverGroup:d,closeOthers:p,mainTreeNodeRef:l.mainTreeNodeRef}),[u,c,d,p,l.mainTreeNodeRef]),m=(0,i.useMemo)(()=>({}),[]);return i.createElement(M.Provider,{value:f},(0,k.sY)({ourProps:{ref:r},theirProps:e,slot:m,defaultTag:"div",name:"Popover.Group"}),i.createElement(l.MainTreeNode,null))})});var U=n(70129),z=n(25163);let H=e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 20 20",fill:"currentColor"}),i.createElement("path",{fillRule:"evenodd",d:"M6 2a1 1 0 00-1 1v1H4a2 2 0 00-2 2v10a2 2 0 002 2h12a2 2 0 002-2V6a2 2 0 00-2-2h-1V3a1 1 0 10-2 0v1H7V3a1 1 0 00-1-1zm0 5a1 1 0 000 2h8a1 1 0 100-2H6z",clipRule:"evenodd"}))};var G=n(8903),W=n(49492);function $(){return(0,W.Z)(Date.now())}var V=n(32633),q=n(99250),Y=n(91753),K=n(74416),X=n(50295),Q=n(6976),J=n(13256),ee=n(68309),et=n(84120),en=n(27552);function er(e,t){if((0,en.Z)(2,arguments),!t||"object"!==(0,Q.Z)(t))return new Date(NaN);var n=t.years?(0,ee.Z)(t.years):0,r=t.months?(0,ee.Z)(t.months):0,o=t.weeks?(0,ee.Z)(t.weeks):0,a=t.days?(0,ee.Z)(t.days):0,i=t.hours?(0,ee.Z)(t.hours):0,l=t.minutes?(0,ee.Z)(t.minutes):0,s=t.seconds?(0,ee.Z)(t.seconds):0,c=function(e,t){(0,en.Z)(2,arguments);var n=(0,ee.Z)(t);return(0,et.Z)(e,-n)}(e,r+12*n);return new Date((0,J.Z)(c,a+7*o).getTime()-1e3*(s+60*(l+60*i)))}var eo=n(8053),ea=n(68005),ei=n(22893),el=n(65492);let es=(0,el.fn)("DateRangePicker"),ec=(e,t,n,r)=>{var o;if(n&&(e=null===(o=r.get(n))||void 0===o?void 0:o.from),e)return(0,W.Z)(e&&!t?e:(0,K.Z)([e,t]))},eu=(e,t,n,r)=>{var o,a;if(n&&(e=(0,W.Z)(null!==(a=null===(o=r.get(n))||void 0===o?void 0:o.to)&&void 0!==a?a:$())),e)return(0,W.Z)(e&&!t?e:(0,X.Z)([e,t]))},ed=[{value:"tdy",text:"Today",from:$()},{value:"w",text:"Last 7 days",from:er($(),{days:7})},{value:"t",text:"Last 30 days",from:er($(),{days:30})},{value:"m",text:"Month to Date",from:(0,V.Z)($())},{value:"y",text:"Year to Date",from:(0,eo.Z)($())}],ep=(e,t,n,r)=>{let o=(null==n?void 0:n.code)||"en-US";if(!e&&!t)return"";if(e&&!t)return r?(0,ea.Z)(e,r):e.toLocaleDateString(o,{year:"numeric",month:"short",day:"numeric"});if(e&&t){if(function(e,t){(0,en.Z)(2,arguments);var n=(0,ei.Z)(e),r=(0,ei.Z)(t);return n.getTime()===r.getTime()}(e,t))return r?(0,ea.Z)(e,r):e.toLocaleDateString(o,{year:"numeric",month:"short",day:"numeric"});if(e.getMonth()===t.getMonth()&&e.getFullYear()===t.getFullYear())return r?"".concat((0,ea.Z)(e,r)," - ").concat((0,ea.Z)(t,r)):"".concat(e.toLocaleDateString(o,{month:"short",day:"numeric"})," - \n ").concat(t.getDate(),", ").concat(t.getFullYear());{if(r)return"".concat((0,ea.Z)(e,r)," - ").concat((0,ea.Z)(t,r));let n={year:"numeric",month:"short",day:"numeric"};return"".concat(e.toLocaleDateString(o,n)," - \n ").concat(t.toLocaleDateString(o,n))}}return""};var ef=n(26463);let em=e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"},t),i.createElement("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M15 19l-7-7 7-7"}))},eg=e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"},t),i.createElement("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M9 5l7 7-7 7"}))},eh=e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),i.createElement("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M11 19l-7-7 7-7m8 14l-7-7 7-7"}))},eb=e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2.5"}),i.createElement("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M13 5l7 7-7 7M5 5l7 7-7 7"}))};var ev=n(45503),ey=n(71801);n(5);var eE=n(58437),ew=n(54942),eS=n(2898);let ex={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-1",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-1.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-1.5",fontSize:"text-lg"},xl:{paddingX:"px-3.5",paddingY:"py-1.5",fontSize:"text-xl"}},eO={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},ek={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},eC={[ew.wu.Increase]:{bgColor:(0,el.bM)(ew.fr.Emerald,eS.K.background).bgColor,textColor:(0,el.bM)(ew.fr.Emerald,eS.K.text).textColor},[ew.wu.ModerateIncrease]:{bgColor:(0,el.bM)(ew.fr.Emerald,eS.K.background).bgColor,textColor:(0,el.bM)(ew.fr.Emerald,eS.K.text).textColor},[ew.wu.Decrease]:{bgColor:(0,el.bM)(ew.fr.Rose,eS.K.background).bgColor,textColor:(0,el.bM)(ew.fr.Rose,eS.K.text).textColor},[ew.wu.ModerateDecrease]:{bgColor:(0,el.bM)(ew.fr.Rose,eS.K.background).bgColor,textColor:(0,el.bM)(ew.fr.Rose,eS.K.text).textColor},[ew.wu.Unchanged]:{bgColor:(0,el.bM)(ew.fr.Orange,eS.K.background).bgColor,textColor:(0,el.bM)(ew.fr.Orange,eS.K.text).textColor}},eT={[ew.wu.Increase]:e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",width:"24",height:"24"}),i.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),i.createElement("path",{fill:"currentColor",d:"M13 7.828V20h-2V7.828l-5.364 5.364-1.414-1.414L12 4l7.778 7.778-1.414 1.414L13 7.828z"}))},[ew.wu.ModerateIncrease]:e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",width:"24",height:"24"}),i.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),i.createElement("path",{fill:"currentColor",d:"M16.004 9.414l-8.607 8.607-1.414-1.414L14.589 8H7.004V6h11v11h-2V9.414z"}))},[ew.wu.Decrease]:e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",width:"24",height:"24"}),i.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),i.createElement("path",{fill:"currentColor",d:"M13 16.172l5.364-5.364 1.414 1.414L12 20l-7.778-7.778 1.414-1.414L11 16.172V4h2v12.172z"}))},[ew.wu.ModerateDecrease]:e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",width:"24",height:"24"}),i.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),i.createElement("path",{fill:"currentColor",d:"M14.59 16.004L5.982 7.397l1.414-1.414 8.607 8.606V7.004h2v11h-11v-2z"}))},[ew.wu.Unchanged]:e=>{var t=(0,a._T)(e,[]);return i.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",width:"24",height:"24"}),i.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),i.createElement("path",{fill:"currentColor",d:"M16.172 11l-5.364-5.364 1.414-1.414L20 12l-7.778 7.778-1.414-1.414L16.172 13H4v-2z"}))}},eA=(0,el.fn)("BadgeDelta");i.forwardRef((e,t)=>{let{deltaType:n=ew.wu.Increase,isIncreasePositive:r=!0,size:o=ew.u8.SM,tooltip:l,children:s,className:c}=e,u=(0,a._T)(e,["deltaType","isIncreasePositive","size","tooltip","children","className"]),d=eT[n],p=(0,el.Fo)(n,r),f=s?eO:ex,{tooltipProps:m,getReferenceProps:g}=(0,eE.l)();return i.createElement("span",Object.assign({ref:(0,el.lq)([t,m.refs.setReference]),className:(0,q.q)(eA("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full bg-opacity-20 dark:bg-opacity-25",eC[p].bgColor,eC[p].textColor,f[o].paddingX,f[o].paddingY,f[o].fontSize,c)},g,u),i.createElement(eE.Z,Object.assign({text:l},m)),i.createElement(d,{className:(0,q.q)(eA("icon"),"shrink-0",s?(0,q.q)("-ml-1 mr-1.5"):ek[o].height,ek[o].width)}),s?i.createElement("p",{className:(0,q.q)(eA("text"),"text-sm whitespace-nowrap")},s):null)}).displayName="BadgeDelta";var eN=n(61244);let eI=e=>{var{onClick:t,icon:n}=e,r=(0,a._T)(e,["onClick","icon"]);return i.createElement("button",Object.assign({type:"button",className:(0,q.q)("flex items-center justify-center p-1 h-7 w-7 outline-none focus:ring-2 transition duration-100 border border-tremor-border dark:border-dark-tremor-border hover:bg-tremor-background-muted dark:hover:bg-dark-tremor-background-muted rounded-tremor-small focus:border-tremor-brand-subtle select-none dark:focus:border-dark-tremor-brand-subtle focus:ring-tremor-brand-muted dark:focus:ring-dark-tremor-brand-muted text-tremor-content-subtle dark:text-dark-tremor-content-subtle hover:text-tremor-content dark:hover:text-dark-tremor-content")},r),i.createElement(eN.Z,{onClick:t,icon:n,variant:"simple",color:"slate",size:"xs"}))};function eR(e){var{mode:t,defaultMonth:n,selected:r,onSelect:o,locale:l,disabled:s,enableYearNavigation:c,classNames:u,weekStartsOn:d=0}=e,p=(0,a._T)(e,["mode","defaultMonth","selected","onSelect","locale","disabled","enableYearNavigation","classNames","weekStartsOn"]);return i.createElement(ef._W,Object.assign({showOutsideDays:!0,mode:t,defaultMonth:n,selected:r,onSelect:o,locale:l,disabled:s,weekStartsOn:d,classNames:Object.assign({months:"flex flex-col sm:flex-row space-y-4 sm:space-x-4 sm:space-y-0",month:"space-y-4",caption:"flex justify-center pt-2 relative items-center",caption_label:"text-tremor-default text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis font-medium",nav:"space-x-1 flex items-center",nav_button:"flex items-center justify-center p-1 h-7 w-7 outline-none focus:ring-2 transition duration-100 border border-tremor-border dark:border-dark-tremor-border hover:bg-tremor-background-muted dark:hover:bg-dark-tremor-background-muted rounded-tremor-small focus:border-tremor-brand-subtle dark:focus:border-dark-tremor-brand-subtle focus:ring-tremor-brand-muted dark:focus:ring-dark-tremor-brand-muted text-tremor-content-subtle dark:text-dark-tremor-content-subtle hover:text-tremor-content dark:hover:text-dark-tremor-content",nav_button_previous:"absolute left-1",nav_button_next:"absolute right-1",table:"w-full border-collapse space-y-1",head_row:"flex",head_cell:"w-9 font-normal text-center text-tremor-content-subtle dark:text-dark-tremor-content-subtle",row:"flex w-full mt-0.5",cell:"text-center p-0 relative focus-within:relative text-tremor-default text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis",day:"h-9 w-9 p-0 hover:bg-tremor-background-subtle dark:hover:bg-dark-tremor-background-subtle outline-tremor-brand dark:outline-dark-tremor-brand rounded-tremor-default",day_today:"font-bold",day_selected:"aria-selected:bg-tremor-background-emphasis aria-selected:text-tremor-content-inverted dark:aria-selected:bg-dark-tremor-background-emphasis dark:aria-selected:text-dark-tremor-content-inverted ",day_disabled:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle disabled:hover:bg-transparent",day_outside:"text-tremor-content-subtle dark:text-dark-tremor-content-subtle"},u),components:{IconLeft:e=>{var t=(0,a._T)(e,[]);return i.createElement(em,Object.assign({className:"h-4 w-4"},t))},IconRight:e=>{var t=(0,a._T)(e,[]);return i.createElement(eg,Object.assign({className:"h-4 w-4"},t))},Caption:e=>{var t=(0,a._T)(e,[]);let{goToMonth:n,nextMonth:r,previousMonth:o,currentMonth:s}=(0,ef.HJ)();return i.createElement("div",{className:"flex justify-between items-center"},i.createElement("div",{className:"flex items-center space-x-1"},c&&i.createElement(eI,{onClick:()=>s&&n((0,ev.Z)(s,-1)),icon:eh}),i.createElement(eI,{onClick:()=>o&&n(o),icon:em})),i.createElement(ey.Z,{className:"text-tremor-default tabular-nums capitalize text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis font-medium"},(0,ea.Z)(t.displayMonth,"LLLL yyy",{locale:l})),i.createElement("div",{className:"flex items-center space-x-1"},i.createElement(eI,{onClick:()=>r&&n(r),icon:eg}),c&&i.createElement(eI,{onClick:()=>s&&n((0,ev.Z)(s,1)),icon:eb})))}}},p))}eR.displayName="DateRangePicker",n(95093);var e_=n(27166),eP=n(82985),eM=n(46457);let eL=$(),eD=i.forwardRef((e,t)=>{var n,r;let{value:o,defaultValue:l,onValueChange:s,enableSelect:c=!0,minDate:u,maxDate:d,placeholder:p="Select range",selectPlaceholder:f="Select range",disabled:m=!1,locale:g=eP.Z,enableClear:h=!0,displayFormat:b,children:v,className:y,enableYearNavigation:E=!1,weekStartsOn:w=0,disabledDates:S}=e,x=(0,a._T)(e,["value","defaultValue","onValueChange","enableSelect","minDate","maxDate","placeholder","selectPlaceholder","disabled","locale","enableClear","displayFormat","children","className","enableYearNavigation","weekStartsOn","disabledDates"]),[O,k]=(0,eM.Z)(l,o),[C,T]=(0,i.useState)(!1),[A,N]=(0,i.useState)(!1),I=(0,i.useMemo)(()=>{let e=[];return u&&e.push({before:u}),d&&e.push({after:d}),[...e,...null!=S?S:[]]},[u,d,S]),R=(0,i.useMemo)(()=>{let e=new Map;return v?i.Children.forEach(v,t=>{var n;e.set(t.props.value,{text:null!==(n=(0,Y.qg)(t))&&void 0!==n?n:t.props.value,from:t.props.from,to:t.props.to})}):ed.forEach(t=>{e.set(t.value,{text:t.text,from:t.from,to:eL})}),e},[v]),_=(0,i.useMemo)(()=>{if(v)return(0,Y.sl)(v);let e=new Map;return ed.forEach(t=>e.set(t.value,t.text)),e},[v]),P=(null==O?void 0:O.selectValue)||"",M=ec(null==O?void 0:O.from,u,P,R),L=eu(null==O?void 0:O.to,d,P,R),D=M||L?ep(M,L,g,b):p,j=(0,V.Z)(null!==(r=null!==(n=null!=L?L:M)&&void 0!==n?n:d)&&void 0!==r?r:eL),F=h&&!m;return i.createElement("div",Object.assign({ref:t,className:(0,q.q)("w-full min-w-[10rem] relative flex justify-between text-tremor-default max-w-sm shadow-tremor-input dark:shadow-dark-tremor-input rounded-tremor-default",y)},x),i.createElement(Z,{as:"div",className:(0,q.q)("w-full",c?"rounded-l-tremor-default":"rounded-tremor-default",C&&"ring-2 ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted z-10")},i.createElement("div",{className:"relative w-full"},i.createElement(Z.Button,{onFocus:()=>T(!0),onBlur:()=>T(!1),disabled:m,className:(0,q.q)("w-full outline-none text-left whitespace-nowrap truncate focus:ring-2 transition duration-100 rounded-l-tremor-default flex flex-nowrap border pl-3 py-2","rounded-l-tremor-default border-tremor-border text-tremor-content-emphasis focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:text-dark-tremor-content-emphasis dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",c?"rounded-l-tremor-default":"rounded-tremor-default",F?"pr-8":"pr-4",(0,Y.um)((0,Y.Uh)(M||L),m))},i.createElement(H,{className:(0,q.q)(es("calendarIcon"),"flex-none shrink-0 h-5 w-5 -ml-0.5 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle"),"aria-hidden":"true"}),i.createElement("p",{className:"truncate"},D)),F&&M?i.createElement("button",{type:"button",className:(0,q.q)("absolute outline-none inset-y-0 right-0 flex items-center transition duration-100 mr-4"),onClick:e=>{e.preventDefault(),null==s||s({}),k({})}},i.createElement(G.Z,{className:(0,q.q)(es("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null),i.createElement(U.u,{className:"absolute z-10 min-w-min left-0",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},i.createElement(Z.Panel,{focus:!0,className:(0,q.q)("divide-y overflow-y-auto outline-none rounded-tremor-default p-3 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},i.createElement(eR,Object.assign({mode:"range",showOutsideDays:!0,defaultMonth:j,selected:{from:M,to:L},onSelect:e=>{null==s||s({from:null==e?void 0:e.from,to:null==e?void 0:e.to}),k({from:null==e?void 0:e.from,to:null==e?void 0:e.to})},locale:g,disabled:I,enableYearNavigation:E,classNames:{day_range_middle:(0,q.q)("!rounded-none aria-selected:!bg-tremor-background-subtle aria-selected:dark:!bg-dark-tremor-background-subtle aria-selected:!text-tremor-content aria-selected:dark:!bg-dark-tremor-background-subtle"),day_range_start:"rounded-r-none rounded-l-tremor-small aria-selected:text-tremor-brand-inverted dark:aria-selected:text-dark-tremor-brand-inverted",day_range_end:"rounded-l-none rounded-r-tremor-small aria-selected:text-tremor-brand-inverted dark:aria-selected:text-dark-tremor-brand-inverted"},weekStartsOn:w},e))))),c&&i.createElement(z.R,{as:"div",className:(0,q.q)("w-48 -ml-px rounded-r-tremor-default",A&&"ring-2 ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted z-10"),value:P,onChange:e=>{let{from:t,to:n}=R.get(e),r=null!=n?n:eL;null==s||s({from:t,to:r,selectValue:e}),k({from:t,to:r,selectValue:e})},disabled:m},e=>{var t;let{value:n}=e;return i.createElement(i.Fragment,null,i.createElement(z.R.Button,{onFocus:()=>N(!0),onBlur:()=>N(!1),className:(0,q.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-r-tremor-default transition duration-100 border px-4 py-2","border-tremor-border shadow-tremor-input text-tremor-content-emphasis focus:border-tremor-brand-subtle","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:text-dark-tremor-content-emphasis dark:focus:border-dark-tremor-brand-subtle",(0,Y.um)((0,Y.Uh)(n),m))},n&&null!==(t=_.get(n))&&void 0!==t?t:f),i.createElement(U.u,{className:"absolute z-10 w-full inset-x-0 right-0",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},i.createElement(z.R.Options,{className:(0,q.q)("divide-y overflow-y-auto outline-none border my-1","shadow-tremor-dropdown bg-tremor-background border-tremor-border divide-tremor-border rounded-tremor-default","dark:shadow-dark-tremor-dropdown dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border")},null!=v?v:ed.map(e=>i.createElement(e_.Z,{key:e.value,value:e.value},e.text)))))}))});eD.displayName="DateRangePicker"},47047:function(e,t,n){n.d(t,{Z:function(){return b}});var r=n(69703),o=n(64090);n(50027),n(18174),n(21871);var a=n(41213),i=n(46457),l=n(54518);let s=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 20 20",fill:"currentColor"}),o.createElement("path",{fillRule:"evenodd",d:"M8 4a4 4 0 100 8 4 4 0 000-8zM2 8a6 6 0 1110.89 3.476l4.817 4.817a1 1 0 01-1.414 1.414l-4.816-4.816A6 6 0 012 8z",clipRule:"evenodd"}))};var c=n(8903),u=n(25163),d=n(70129);let p=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",width:"100%",height:"100%",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"},t),o.createElement("line",{x1:"18",y1:"6",x2:"6",y2:"18"}),o.createElement("line",{x1:"6",y1:"6",x2:"18",y2:"18"}))};var f=n(99250),m=n(65492),g=n(91753);let h=(0,m.fn)("MultiSelect"),b=o.forwardRef((e,t)=>{let{defaultValue:n,value:m,onValueChange:b,placeholder:v="Select...",placeholderSearch:y="Search",disabled:E=!1,icon:w,children:S,className:x}=e,O=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","placeholderSearch","disabled","icon","children","className"]),[k,C]=(0,i.Z)(n,m),{reactElementChildren:T,optionsAvailable:A}=(0,o.useMemo)(()=>{let e=o.Children.toArray(S).filter(o.isValidElement);return{reactElementChildren:e,optionsAvailable:(0,g.n0)("",e)}},[S]),[N,I]=(0,o.useState)(""),R=(null!=k?k:[]).length>0,_=(0,o.useMemo)(()=>N?(0,g.n0)(N,T):A,[N,T,A]),P=()=>{I("")};return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:k,value:k,onChange:e=>{null==b||b(e),C(e)},disabled:E,className:(0,f.q)("w-full min-w-[10rem] relative text-tremor-default",x)},O,{multiple:!0}),e=>{let{value:t}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,f.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-1.5","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",w?"p-10 -ml-0.5":"pl-3",(0,g.um)(t.length>0,E))},w&&o.createElement("span",{className:(0,f.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(w,{className:(0,f.q)(h("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("div",{className:"h-6 flex items-center"},t.length>0?o.createElement("div",{className:"flex flex-nowrap overflow-x-scroll [&::-webkit-scrollbar]:hidden [scrollbar-width:none] gap-x-1 mr-5 -ml-1.5 relative"},A.filter(e=>t.includes(e.props.value)).map((e,n)=>{var r;return o.createElement("div",{key:n,className:(0,f.q)("max-w-[100px] lg:max-w-[200px] flex justify-center items-center pl-2 pr-1.5 py-1 font-medium","rounded-tremor-small","bg-tremor-background-muted dark:bg-dark-tremor-background-muted","bg-tremor-background-subtle dark:bg-dark-tremor-background-subtle","text-tremor-content-default dark:text-dark-tremor-content-default","text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis")},o.createElement("div",{className:"text-xs truncate "},null!==(r=e.props.children)&&void 0!==r?r:e.props.value),o.createElement("div",{onClick:n=>{n.preventDefault();let r=t.filter(t=>t!==e.props.value);null==b||b(r),C(r)}},o.createElement(p,{className:(0,f.q)(h("clearIconItem"),"cursor-pointer rounded-tremor-full w-3.5 h-3.5 ml-2","text-tremor-content-subtle hover:text-tremor-content","dark:text-dark-tremor-content-subtle dark:hover:text-tremor-content")})))})):o.createElement("span",null,v)),o.createElement("span",{className:(0,f.q)("absolute inset-y-0 right-0 flex items-center mr-2.5")},o.createElement(l.Z,{className:(0,f.q)(h("arrowDownIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),R&&!E?o.createElement("button",{type:"button",className:(0,f.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),C([]),null==b||b([])}},o.createElement(c.Z,{className:(0,f.q)(h("clearIconAllItems"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,f.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},o.createElement("div",{className:(0,f.q)("flex items-center w-full px-2.5","bg-tremor-background-muted","dark:bg-dark-tremor-background-muted")},o.createElement("span",null,o.createElement(s,{className:(0,f.q)("flex-none w-4 h-4 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("input",{name:"search",type:"input",autoComplete:"off",placeholder:y,className:(0,f.q)("w-full focus:outline-none focus:ring-none bg-transparent text-tremor-default py-2","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis"),onKeyDown:e=>{"Space"===e.code&&""!==e.target.value&&e.stopPropagation()},onChange:e=>I(e.target.value),value:N})),o.createElement(a.Z.Provider,Object.assign({},{onBlur:{handleResetSearch:P}},{value:{selectedValue:t}}),_))))})});b.displayName="MultiSelect"},76628:function(e,t,n){n.d(t,{Z:function(){return u}});var r=n(69703);n(50027),n(18174),n(21871);var o=n(41213),a=n(64090),i=n(99250),l=n(65492),s=n(25163);let c=(0,l.fn)("MultiSelectItem"),u=a.forwardRef((e,t)=>{let{value:n,className:u,children:d}=e,p=(0,r._T)(e,["value","className","children"]),{selectedValue:f}=(0,a.useContext)(o.Z),m=(0,l.NZ)(n,f);return a.createElement(s.R.Option,Object.assign({className:(0,i.q)(c("root"),"flex justify-start items-center cursor-default text-tremor-default p-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",u),ref:t,key:n,value:n},p),a.createElement("input",{type:"checkbox",className:(0,i.q)(c("checkbox"),"flex-none focus:ring-none focus:outline-none cursor-pointer mr-2.5","accent-tremor-brand","dark:accent-dark-tremor-brand"),checked:m,readOnly:!0}),a.createElement("span",{className:"whitespace-nowrap truncate"},null!=d?d:n))});u.displayName="MultiSelectItem"},95093:function(e,t,n){n.d(t,{Z:function(){return m}});var r=n(69703),o=n(64090),a=n(54518),i=n(8903),l=n(99250),s=n(65492),c=n(91753),u=n(25163),d=n(70129),p=n(46457);let f=(0,s.fn)("Select"),m=o.forwardRef((e,t)=>{let{defaultValue:n,value:s,onValueChange:m,placeholder:g="Select...",disabled:h=!1,icon:b,enableClear:v=!0,children:y,className:E}=e,w=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","disabled","icon","enableClear","children","className"]),[S,x]=(0,p.Z)(n,s),O=(0,o.useMemo)(()=>{let e=o.Children.toArray(y).filter(o.isValidElement);return(0,c.sl)(e)},[y]);return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:S,value:S,onChange:e=>{null==m||m(e),x(e)},disabled:h,className:(0,l.q)("w-full min-w-[10rem] relative text-tremor-default",E)},w),e=>{var t;let{value:n}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,l.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-2","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",b?"p-10 -ml-0.5":"pl-3",(0,c.um)((0,c.Uh)(n),h))},b&&o.createElement("span",{className:(0,l.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(b,{className:(0,l.q)(f("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("span",{className:"w-[90%] block truncate"},n&&null!==(t=O.get(n))&&void 0!==t?t:g),o.createElement("span",{className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-3")},o.createElement(a.Z,{className:(0,l.q)(f("arrowDownIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),v&&S?o.createElement("button",{type:"button",className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),x(""),null==m||m("")}},o.createElement(i.Z,{className:(0,l.q)(f("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,l.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},y)))})});m.displayName="Select"},27166:function(e,t,n){n.d(t,{Z:function(){return s}});var r=n(69703),o=n(64090),a=n(25163),i=n(99250);let l=(0,n(65492).fn)("SelectItem"),s=o.forwardRef((e,t)=>{let{value:n,icon:s,className:c,children:u}=e,d=(0,r._T)(e,["value","icon","className","children"]);return o.createElement(a.R.Option,Object.assign({className:(0,i.q)(l("root"),"flex justify-start items-center cursor-default text-tremor-default px-2.5 py-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong ui-selected:bg-tremor-background-muted text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",c),ref:t,key:n,value:n},d),s&&o.createElement(s,{className:(0,i.q)(l("icon"),"flex-none w-5 h-5 mr-1.5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}),o.createElement("span",{className:"whitespace-nowrap truncate"},null!=u?u:n))});s.displayName="SelectItem"},12224:function(e,t,n){n.d(t,{Z:function(){return N}});var r=n(69703),o=n(64090),a=n(83891),i=n(20044),l=n(10641),s=n(92381),c=n(71454),u=n(36601),d=n(37700),p=n(84152),f=n(34797),m=n(18318),g=n(71014),h=n(67409),b=n(39790);let v=(0,o.createContext)(null),y=Object.assign((0,m.yV)(function(e,t){let n=(0,s.M)(),{id:r="headlessui-label-".concat(n),passive:a=!1,...i}=e,l=function e(){let t=(0,o.useContext)(v);if(null===t){let t=Error("You used a