From 5c9473874c00c8d205788d12a3d35f0cf2136b84 Mon Sep 17 00:00:00 2001 From: Tanaro Laptop Date: Sat, 20 Jan 2024 01:50:27 +0100 Subject: [PATCH 001/543] change max_tokens type to int --- litellm/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/main.py b/litellm/main.py index 2fef048a6..407e851f5 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -366,7 +366,7 @@ def completion( n: Optional[int] = None, stream: Optional[bool] = None, stop=None, - max_tokens: Optional[float] = None, + max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, From b69cb2c773ce0fb06835dd7594329cbe6ca88556 Mon Sep 17 00:00:00 2001 From: TanaroSch Date: Tue, 6 Feb 2024 11:19:28 +0100 Subject: [PATCH 002/543] change max_tokens float to int --- docs/my-website/docs/completion/input.md | 2 +- litellm/main.py | 4 ++-- litellm/types/completion.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/my-website/docs/completion/input.md b/docs/my-website/docs/completion/input.md index 676e4d232..445fd072b 100644 --- a/docs/my-website/docs/completion/input.md +++ b/docs/my-website/docs/completion/input.md @@ -73,7 +73,7 @@ def completion( n: Optional[int] = None, stream: Optional[bool] = None, stop=None, - max_tokens: Optional[float] = None, + max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, diff --git a/litellm/main.py b/litellm/main.py index 407e851f5..fb7574b06 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -143,7 +143,7 @@ async def acompletion( n: Optional[int] = None, stream: Optional[bool] = None, stop=None, - max_tokens: Optional[float] = None, + max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, @@ -1884,7 +1884,7 @@ def batch_completion( n: Optional[int] = None, stream: Optional[bool] = None, stop=None, - max_tokens: Optional[float] = None, + max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, diff --git a/litellm/types/completion.py b/litellm/types/completion.py index 5eac90575..3ce441880 100644 --- a/litellm/types/completion.py +++ b/litellm/types/completion.py @@ -12,7 +12,7 @@ class CompletionRequest(BaseModel): n: Optional[int] = None stream: Optional[bool] = None stop: Optional[dict] = None - max_tokens: Optional[float] = None + max_tokens: Optional[int] = None presence_penalty: Optional[float] = None frequency_penalty: Optional[float] = None logit_bias: Optional[dict] = None From 872ff6176d506dfb4f7e2ee8d22505c98b96c6c6 Mon Sep 17 00:00:00 2001 From: Lucca Zenobio Date: Wed, 20 Mar 2024 15:22:23 -0300 Subject: [PATCH 003/543] updates --- litellm/llms/prompt_templates/factory.py | 6 +++--- litellm/utils.py | 5 ++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index b23f10315..abe340e7d 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -604,13 +604,13 @@ def convert_to_anthropic_tool_result(message: dict) -> str: def convert_to_anthropic_tool_invoke(tool_calls: list) -> str: invokes = "" for tool in tool_calls: - if tool["type"] != "function": + if tool.type != "function": continue - tool_name = tool["function"]["name"] + tool_name = tool.function.name parameters = "".join( f"<{param}>{val}\n" - for param, val in json.loads(tool["function"]["arguments"]).items() + for param, val in json.loads(tool.function.arguments).items() ) invokes += ( "\n" diff --git a/litellm/utils.py b/litellm/utils.py index a8c003181..57327473d 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -271,7 +271,10 @@ class Message(OpenAIObject): if tool_calls is not None: self.tool_calls = [] for tool_call in tool_calls: - self.tool_calls.append(ChatCompletionMessageToolCall(**tool_call)) + if isinstance(tool_call, dict): + self.tool_calls.append(ChatCompletionMessageToolCall(**tool_call)) + else: + self.tool_calls.append(tool_call) if logprobs is not None: self._logprobs = logprobs From 0c0780be83c7a4559684d44c538fea6a5b07cad3 Mon Sep 17 00:00:00 2001 From: Lucca Zenobio Date: Thu, 21 Mar 2024 10:43:27 -0300 Subject: [PATCH 004/543] extra headers --- litellm/llms/bedrock.py | 15 ++++++++++++++- litellm/llms/prompt_templates/factory.py | 9 +++++---- litellm/main.py | 1 + litellm/utils.py | 2 +- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index 0f52d3abc..a32cee381 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -495,6 +495,15 @@ class AmazonStabilityConfig: } +def add_custom_header(headers): + """Closure to capture the headers and add them.""" + def callback(request, **kwargs): + """Actual callback function that Boto3 will call.""" + for header_name, header_value in headers.items(): + request.headers.add_header(header_name, header_value) + return callback + + def init_bedrock_client( region_name=None, aws_access_key_id: Optional[str] = None, @@ -504,12 +513,12 @@ def init_bedrock_client( aws_session_name: Optional[str] = None, aws_profile_name: Optional[str] = None, aws_role_name: Optional[str] = None, + extra_headers: Optional[dict] = None, timeout: Optional[int] = None, ): # check for custom AWS_REGION_NAME and use it if not passed to init_bedrock_client litellm_aws_region_name = get_secret("AWS_REGION_NAME", None) standard_aws_region_name = get_secret("AWS_REGION", None) - ## CHECK IS 'os.environ/' passed in # Define the list of parameters to check params_to_check = [ @@ -618,6 +627,8 @@ def init_bedrock_client( endpoint_url=endpoint_url, config=config, ) + if extra_headers: + client.meta.events.register('before-sign.bedrock-runtime.*', add_custom_header(extra_headers)) return client @@ -677,6 +688,7 @@ def completion( litellm_params=None, logger_fn=None, timeout=None, + extra_headers: Optional[dict] = None, ): exception_mapping_worked = False try: @@ -704,6 +716,7 @@ def completion( aws_role_name=aws_role_name, aws_session_name=aws_session_name, aws_profile_name=aws_profile_name, + extra_headers=extra_headers, timeout=timeout, ) diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index abe340e7d..a09b988a5 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -604,13 +604,14 @@ def convert_to_anthropic_tool_result(message: dict) -> str: def convert_to_anthropic_tool_invoke(tool_calls: list) -> str: invokes = "" for tool in tool_calls: - if tool.type != "function": + tool = dict(tool) + if tool["type"] != "function": continue - - tool_name = tool.function.name + tool_function = dict(tool["function"]) + tool_name = tool_function["name"] parameters = "".join( f"<{param}>{val}\n" - for param, val in json.loads(tool.function.arguments).items() + for param, val in json.loads(tool_function["arguments"]).items() ) invokes += ( "\n" diff --git a/litellm/main.py b/litellm/main.py index 724190391..dceaf9bf5 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1749,6 +1749,7 @@ def completion( logger_fn=logger_fn, encoding=encoding, logging_obj=logging, + extra_headers=extra_headers, timeout=timeout, ) diff --git a/litellm/utils.py b/litellm/utils.py index 57327473d..4124ea437 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5146,7 +5146,7 @@ def get_supported_openai_params(model: str, custom_llm_provider: str): """ if custom_llm_provider == "bedrock": if model.startswith("anthropic.claude-3"): - return litellm.AmazonAnthropicClaude3Config().get_supported_openai_params() + return litellm.AmazonAnthropicClaude3Config().get_supported_openai_params() + ["extra_headers"] elif model.startswith("anthropic"): return litellm.AmazonAnthropicConfig().get_supported_openai_params() elif model.startswith("ai21"): From cda78a5da0b2d705fb5ac56193c7508dc1ba7c4f Mon Sep 17 00:00:00 2001 From: Lucca Zenobio Date: Mon, 25 Mar 2024 13:08:17 -0300 Subject: [PATCH 005/543] update --- litellm/llms/bedrock.py | 2 +- litellm/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index a32cee381..d13301910 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -128,7 +128,7 @@ class AmazonAnthropicClaude3Config: } def get_supported_openai_params(self): - return ["max_tokens", "tools", "tool_choice", "stream"] + return ["max_tokens", "tools", "tool_choice", "stream", "extra_headers"] def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): diff --git a/litellm/utils.py b/litellm/utils.py index 4124ea437..57327473d 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5146,7 +5146,7 @@ def get_supported_openai_params(model: str, custom_llm_provider: str): """ if custom_llm_provider == "bedrock": if model.startswith("anthropic.claude-3"): - return litellm.AmazonAnthropicClaude3Config().get_supported_openai_params() + ["extra_headers"] + return litellm.AmazonAnthropicClaude3Config().get_supported_openai_params() elif model.startswith("anthropic"): return litellm.AmazonAnthropicConfig().get_supported_openai_params() elif model.startswith("ai21"): From a9e2ef62125c462cc62d824f3d90bbc1d0366dfe Mon Sep 17 00:00:00 2001 From: Lucca Zenobio Date: Mon, 29 Apr 2024 10:05:30 -0300 Subject: [PATCH 006/543] test --- litellm/tests/test_bedrock_completion.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/litellm/tests/test_bedrock_completion.py b/litellm/tests/test_bedrock_completion.py index ca2ffea5f..2aab8a3b4 100644 --- a/litellm/tests/test_bedrock_completion.py +++ b/litellm/tests/test_bedrock_completion.py @@ -207,6 +207,25 @@ def test_completion_bedrock_claude_sts_client_auth(): # test_completion_bedrock_claude_sts_client_auth() +def test_bedrock_extra_headers(): + try: + litellm.set_verbose = True + response: ModelResponse = completion( + model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0", + messages=messages, + max_tokens=10, + temperature=0.78, + extra_headers={"x-key": "x_key_value"} + ) + # Add any assertions here to check the response + assert len(response.choices) > 0 + assert len(response.choices[0].message.content) > 0 + except RateLimitError: + pass + except Exception as e: + pytest.fail(f"Error occurred: {e}") + + def test_bedrock_claude_3(): try: litellm.set_verbose = True From 318b4813f2b45ecf98bd46104d560393768b832c Mon Sep 17 00:00:00 2001 From: mogith-pn <143642606+mogith-pn@users.noreply.github.com> Date: Tue, 30 Apr 2024 22:38:33 +0530 Subject: [PATCH 007/543] Clarifai-LiteLLM integration (#1) * intg v1 clarifai-litellm * Added more community models and testcase * Clarifai-updated markdown docs --- cookbook/liteLLM_clarifai_Demo.ipynb | 151 ++++++++++++++ docs/my-website/docs/providers/clarifai.md | 177 +++++++++++++++++ litellm/__init__.py | 70 +++++++ litellm/llms/clarifai.py | 216 +++++++++++++++++++++ litellm/llms/prompt_templates/factory.py | 3 + litellm/main.py | 50 +++++ litellm/tests/test_clarifai_completion.py | 67 +++++++ 7 files changed, 734 insertions(+) create mode 100644 cookbook/liteLLM_clarifai_Demo.ipynb create mode 100644 docs/my-website/docs/providers/clarifai.md create mode 100644 litellm/llms/clarifai.py create mode 100644 litellm/tests/test_clarifai_completion.py diff --git a/cookbook/liteLLM_clarifai_Demo.ipynb b/cookbook/liteLLM_clarifai_Demo.ipynb new file mode 100644 index 000000000..4e3b4dbb0 --- /dev/null +++ b/cookbook/liteLLM_clarifai_Demo.ipynb @@ -0,0 +1,151 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# LiteLLM Clarifai \n", + "This notebook walks you through on how to use liteLLM integration of Clarifai and call LLM model from clarifai with response in openAI output format." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pre-Requisites" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#install necessary packages\n", + "!pip install litellm\n", + "!pip install clarifai" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To obtain Clarifai Personal Access Token follow the steps mentioned in the [link](https://docs.clarifai.com/clarifai-basics/authentication/personal-access-tokens/)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "## Set Clarifai Credentials\n", + "import os\n", + "os.environ[\"CLARIFAI_API_KEY\"]= \"YOUR_CLARIFAI_PAT\" # Clarifai PAT" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Mistral-large" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import litellm\n", + "\n", + "litellm.set_verbose=False" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mistral large response : ModelResponse(id='chatcmpl-6eed494d-7ae2-4870-b9c2-6a64d50a6151', choices=[Choices(finish_reason='stop', index=1, message=Message(content=\"In the grand tapestry of time, where tales unfold,\\nLies the chronicle of ages, a sight to behold.\\nA tale of empires rising, and kings of old,\\nOf civilizations lost, and stories untold.\\n\\nOnce upon a yesterday, in a time so vast,\\nHumans took their first steps, casting shadows in the past.\\nFrom the cradle of mankind, a journey they embarked,\\nThrough stone and bronze and iron, their skills they sharpened and marked.\\n\\nEgyptians built pyramids, reaching for the skies,\\nWhile Greeks sought wisdom, truth, in philosophies that lie.\\nRoman legions marched, their empire to expand,\\nAnd in the East, the Silk Road joined the world, hand in hand.\\n\\nThe Middle Ages came, with knights in shining armor,\\nFeudal lords and serfs, a time of both clamor and calm order.\\nThen Renaissance bloomed, like a flower in the sun,\\nA rebirth of art and science, a new age had begun.\\n\\nAcross the vast oceans, explorers sailed with courage bold,\\nDiscovering new lands, stories of adventure, untold.\\nIndustrial Revolution churned, progress in its wake,\\nMachines and factories, a whole new world to make.\\n\\nTwo World Wars raged, a testament to man's strife,\\nYet from the ashes rose hope, a renewed will for life.\\nInto the modern era, technology took flight,\\nConnecting every corner, bathed in digital light.\\n\\nHistory, a symphony, a melody of time,\\nA testament to human will, resilience so sublime.\\nIn every page, a lesson, in every tale, a guide,\\nFor understanding our past, shapes our future's tide.\", role='assistant'))], created=1713896412, model='https://api.clarifai.com/v2/users/mistralai/apps/completion/models/mistral-large/outputs', object='chat.completion', system_fingerprint=None, usage=Usage(prompt_tokens=13, completion_tokens=338, total_tokens=351))\n" + ] + } + ], + "source": [ + "from litellm import completion\n", + "\n", + "messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n", + "response=completion(\n", + " model=\"clarifai/mistralai.completion.mistral-large\",\n", + " messages=messages,\n", + " )\n", + "\n", + "print(f\"Mistral large response : {response}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Claude-2.1 " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Claude-2.1 response : ModelResponse(id='chatcmpl-d126c919-4db4-4aa3-ac8f-7edea41e0b93', choices=[Choices(finish_reason='stop', index=1, message=Message(content=\" Here's a poem I wrote about history:\\n\\nThe Tides of Time\\n\\nThe tides of time ebb and flow,\\nCarrying stories of long ago.\\nFigures and events come into light,\\nShaping the future with all their might.\\n\\nKingdoms rise, empires fall, \\nLeaving traces that echo down every hall.\\nRevolutions bring change with a fiery glow,\\nToppling structures from long ago.\\n\\nExplorers traverse each ocean and land,\\nSeeking treasures they don't understand.\\nWhile artists and writers try to make their mark,\\nHoping their works shine bright in the dark.\\n\\nThe cycle repeats again and again,\\nAs humanity struggles to learn from its pain.\\nThough the players may change on history's stage,\\nThe themes stay the same from age to age.\\n\\nWar and peace, life and death,\\nLove and strife with every breath.\\nThe tides of time continue their dance,\\nAs we join in, by luck or by chance.\\n\\nSo we study the past to light the way forward, \\nHeeding warnings from stories told and heard.\\nThe future unfolds from this unending flow -\\nWhere the tides of time ultimately go.\", role='assistant'))], created=1713896579, model='https://api.clarifai.com/v2/users/anthropic/apps/completion/models/claude-2_1/outputs', object='chat.completion', system_fingerprint=None, usage=Usage(prompt_tokens=12, completion_tokens=232, total_tokens=244))\n" + ] + } + ], + "source": [ + "from litellm import completion\n", + "\n", + "messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n", + "response=completion(\n", + " model=\"clarifai/anthropic.completion.claude-2_1\",\n", + " messages=messages,\n", + " )\n", + "\n", + "print(f\"Claude-2.1 response : {response}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/my-website/docs/providers/clarifai.md b/docs/my-website/docs/providers/clarifai.md new file mode 100644 index 000000000..acc8c54be --- /dev/null +++ b/docs/my-website/docs/providers/clarifai.md @@ -0,0 +1,177 @@ + +# Clarifai +Anthropic, OpenAI, Mistral, Llama and Gemini LLMs are Supported on Clarifai. + +## Pre-Requisites + +`pip install clarifai` + +`pip install litellm` + +## Required Environment Variables +To obtain your Clarifai Personal access token follow this [link](https://docs.clarifai.com/clarifai-basics/authentication/personal-access-tokens/). Optionally the PAT can also be passed in `completion` function. + +```python +os.environ["CALRIFAI_API_KEY"] = "YOUR_CLARIFAI_PAT" # CLARIFAI_PAT +``` + +## Usage + +```python +import os +from litellm import completion + +os.environ["CLARIFAI_API_KEY"] = "" + +response = completion( + model="clarifai/mistralai.completion.mistral-large", + messages=[{ "content": "Tell me a joke about physics?","role": "user"}] +) +``` + +**Output** +```json +{ + "id": "chatcmpl-572701ee-9ab2-411c-ac75-46c1ba18e781", + "choices": [ + { + "finish_reason": "stop", + "index": 1, + "message": { + "content": "Sure, here's a physics joke for you:\n\nWhy can't you trust an atom?\n\nBecause they make up everything!", + "role": "assistant" + } + } + ], + "created": 1714410197, + "model": "https://api.clarifai.com/v2/users/mistralai/apps/completion/models/mistral-large/outputs", + "object": "chat.completion", + "system_fingerprint": null, + "usage": { + "prompt_tokens": 14, + "completion_tokens": 24, + "total_tokens": 38 + } + } +``` + +## Clarifai models +liteLLM supports non-streaming requests to all models on [Clarifai community](https://clarifai.com/explore/models?filterData=%5B%7B%22field%22%3A%22use_cases%22%2C%22value%22%3A%5B%22llm%22%5D%7D%5D&page=1&perPage=24) + +Example Usage - Note: liteLLM supports all models deployed on Clarifai + +## Llama LLMs +| Model Name | Function Call | +---------------------------|---------------------------------| +| clarifai/meta.Llama-2.llama2-7b-chat | `completion('clarifai/meta.Llama-2.llama2-7b-chat', messages)` +| clarifai/meta.Llama-2.llama2-13b-chat | `completion('clarifai/meta.Llama-2.llama2-13b-chat', messages)` +| clarifai/meta.Llama-2.llama2-70b-chat | `completion('clarifai/meta.Llama-2.llama2-70b-chat', messages)` | +| clarifai/meta.Llama-2.codeLlama-70b-Python | `completion('clarifai/meta.Llama-2.codeLlama-70b-Python', messages)`| +| clarifai/meta.Llama-2.codeLlama-70b-Instruct | `completion('clarifai/meta.Llama-2.codeLlama-70b-Instruct', messages)` | + +## Mistal LLMs +| Model Name | Function Call | +|---------------------------------------------|------------------------------------------------------------------------| +| clarifai/mistralai.completion.mixtral-8x22B | `completion('clarifai/mistralai.completion.mixtral-8x22B', messages)` | +| clarifai/mistralai.completion.mistral-large | `completion('clarifai/mistralai.completion.mistral-large', messages)` | +| clarifai/mistralai.completion.mistral-medium | `completion('clarifai/mistralai.completion.mistral-medium', messages)` | +| clarifai/mistralai.completion.mistral-small | `completion('clarifai/mistralai.completion.mistral-small', messages)` | +| clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1 | `completion('clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1', messages)` +| clarifai/mistralai.completion.mistral-7B-OpenOrca | `completion('clarifai/mistralai.completion.mistral-7B-OpenOrca', messages)` | +| clarifai/mistralai.completion.openHermes-2-mistral-7B | `completion('clarifai/mistralai.completion.openHermes-2-mistral-7B', messages)` | + + +## Jurassic LLMs +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/ai21.complete.Jurassic2-Grande | `completion('clarifai/ai21.complete.Jurassic2-Grande', messages)` | +| clarifai/ai21.complete.Jurassic2-Grande-Instruct | `completion('clarifai/ai21.complete.Jurassic2-Grande-Instruct', messages)` | +| clarifai/ai21.complete.Jurassic2-Jumbo-Instruct | `completion('clarifai/ai21.complete.Jurassic2-Jumbo-Instruct', messages)` | +| clarifai/ai21.complete.Jurassic2-Jumbo | `completion('clarifai/ai21.complete.Jurassic2-Jumbo', messages)` | +| clarifai/ai21.complete.Jurassic2-Large | `completion('clarifai/ai21.complete.Jurassic2-Large', messages)` | + +## Wizard LLMs + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/wizardlm.generate.wizardCoder-Python-34B | `completion('clarifai/wizardlm.generate.wizardCoder-Python-34B', messages)` | +| clarifai/wizardlm.generate.wizardLM-70B | `completion('clarifai/wizardlm.generate.wizardLM-70B', messages)` | +| clarifai/wizardlm.generate.wizardLM-13B | `completion('clarifai/wizardlm.generate.wizardLM-13B', messages)` | +| clarifai/wizardlm.generate.wizardCoder-15B | `completion('clarifai/wizardlm.generate.wizardCoder-15B', messages)` | + +## Anthropic models + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/anthropic.completion.claude-v1 | `completion('clarifai/anthropic.completion.claude-v1', messages)` | +| clarifai/anthropic.completion.claude-instant-1_2 | `completion('clarifai/anthropic.completion.claude-instant-1_2', messages)` | +| clarifai/anthropic.completion.claude-instant | `completion('clarifai/anthropic.completion.claude-instant', messages)` | +| clarifai/anthropic.completion.claude-v2 | `completion('clarifai/anthropic.completion.claude-v2', messages)` | +| clarifai/anthropic.completion.claude-2_1 | `completion('clarifai/anthropic.completion.claude-2_1', messages)` | +| clarifai/anthropic.completion.claude-3-opus | `completion('clarifai/anthropic.completion.claude-3-opus', messages)` | +| clarifai/anthropic.completion.claude-3-sonnet | `completion('clarifai/anthropic.completion.claude-3-sonnet', messages)` | + +## OpenAI GPT LLMs + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/openai.chat-completion.GPT-4 | `completion('clarifai/openai.chat-completion.GPT-4', messages)` | +| clarifai/openai.chat-completion.GPT-3_5-turbo | `completion('clarifai/openai.chat-completion.GPT-3_5-turbo', messages)` | +| clarifai/openai.chat-completion.gpt-4-turbo | `completion('clarifai/openai.chat-completion.gpt-4-turbo', messages)` | +| clarifai/openai.completion.gpt-3_5-turbo-instruct | `completion('clarifai/openai.completion.gpt-3_5-turbo-instruct', messages)` | + +## GCP LLMs + +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/gcp.generate.gemini-1_5-pro | `completion('clarifai/gcp.generate.gemini-1_5-pro', messages)` | +| clarifai/gcp.generate.imagen-2 | `completion('clarifai/gcp.generate.imagen-2', messages)` | +| clarifai/gcp.generate.code-gecko | `completion('clarifai/gcp.generate.code-gecko', messages)` | +| clarifai/gcp.generate.code-bison | `completion('clarifai/gcp.generate.code-bison', messages)` | +| clarifai/gcp.generate.text-bison | `completion('clarifai/gcp.generate.text-bison', messages)` | +| clarifai/gcp.generate.gemma-2b-it | `completion('clarifai/gcp.generate.gemma-2b-it', messages)` | +| clarifai/gcp.generate.gemma-7b-it | `completion('clarifai/gcp.generate.gemma-7b-it', messages)` | +| clarifai/gcp.generate.gemini-pro | `completion('clarifai/gcp.generate.gemini-pro', messages)` | +| clarifai/gcp.generate.gemma-1_1-7b-it | `completion('clarifai/gcp.generate.gemma-1_1-7b-it', messages)` | + +## Cohere LLMs +| Model Name | Function Call | +|-----------------------------------------------|---------------------------------------------------------------------| +| clarifai/cohere.generate.cohere-generate-command | `completion('clarifai/cohere.generate.cohere-generate-command', messages)` | + clarifai/cohere.generate.command-r-plus' | `completion('clarifai/clarifai/cohere.generate.command-r-plus', messages)`| + +## Databricks LLMs + +| Model Name | Function Call | +|---------------------------------------------------|---------------------------------------------------------------------| +| clarifai/databricks.drbx.dbrx-instruct | `completion('clarifai/databricks.drbx.dbrx-instruct', messages)` | +| clarifai/databricks.Dolly-v2.dolly-v2-12b | `completion('clarifai/databricks.Dolly-v2.dolly-v2-12b', messages)`| + +## Microsoft LLMs + +| Model Name | Function Call | +|---------------------------------------------------|---------------------------------------------------------------------| +| clarifai/microsoft.text-generation.phi-2 | `completion('clarifai/microsoft.text-generation.phi-2', messages)` | +| clarifai/microsoft.text-generation.phi-1_5 | `completion('clarifai/microsoft.text-generation.phi-1_5', messages)`| + +## Salesforce models + +| Model Name | Function Call | +|-----------------------------------------------------------|-------------------------------------------------------------------------------| +| clarifai/salesforce.blip.general-english-image-caption-blip-2 | `completion('clarifai/salesforce.blip.general-english-image-caption-blip-2', messages)` | +| clarifai/salesforce.xgen.xgen-7b-8k-instruct | `completion('clarifai/salesforce.xgen.xgen-7b-8k-instruct', messages)` | + + +## Other Top performing LLMs + +| Model Name | Function Call | +|---------------------------------------------------|---------------------------------------------------------------------| +| clarifai/deci.decilm.deciLM-7B-instruct | `completion('clarifai/deci.decilm.deciLM-7B-instruct', messages)` | +| clarifai/upstage.solar.solar-10_7b-instruct | `completion('clarifai/upstage.solar.solar-10_7b-instruct', messages)` | +| clarifai/openchat.openchat.openchat-3_5-1210 | `completion('clarifai/openchat.openchat.openchat-3_5-1210', messages)` | +| clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B | `completion('clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B', messages)` | +| clarifai/fblgit.una-cybertron.una-cybertron-7b-v2 | `completion('clarifai/fblgit.una-cybertron.una-cybertron-7b-v2', messages)` | +| clarifai/tiiuae.falcon.falcon-40b-instruct | `completion('clarifai/tiiuae.falcon.falcon-40b-instruct', messages)` | +| clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat | `completion('clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat', messages)` | +| clarifai/bigcode.code.StarCoder | `completion('clarifai/bigcode.code.StarCoder', messages)` | +| clarifai/mosaicml.mpt.mpt-7b-instruct | `completion('clarifai/mosaicml.mpt.mpt-7b-instruct', messages)` | diff --git a/litellm/__init__.py b/litellm/__init__.py index 5ef78dce4..8523ee577 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -49,6 +49,7 @@ azure_key: Optional[str] = None anthropic_key: Optional[str] = None replicate_key: Optional[str] = None cohere_key: Optional[str] = None +clarifai_key: Optional[str] = None maritalk_key: Optional[str] = None ai21_key: Optional[str] = None openrouter_key: Optional[str] = None @@ -366,6 +367,73 @@ replicate_models: List = [ "replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad", ] +clarifai_models: List = [ + 'clarifai/meta.Llama-3.Llama-3-8B-Instruct', + 'clarifai/gcp.generate.gemma-1_1-7b-it', + 'clarifai/mistralai.completion.mixtral-8x22B', + 'clarifai/cohere.generate.command-r-plus', + 'clarifai/databricks.drbx.dbrx-instruct', + 'clarifai/mistralai.completion.mistral-large', + 'clarifai/mistralai.completion.mistral-medium', + 'clarifai/mistralai.completion.mistral-small', + 'clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1', + 'clarifai/gcp.generate.gemma-2b-it', + 'clarifai/gcp.generate.gemma-7b-it', + 'clarifai/deci.decilm.deciLM-7B-instruct', + 'clarifai/mistralai.completion.mistral-7B-Instruct', + 'clarifai/gcp.generate.gemini-pro', + 'clarifai/anthropic.completion.claude-v1', + 'clarifai/anthropic.completion.claude-instant-1_2', + 'clarifai/anthropic.completion.claude-instant', + 'clarifai/anthropic.completion.claude-v2', + 'clarifai/anthropic.completion.claude-2_1', + 'clarifai/meta.Llama-2.codeLlama-70b-Python', + 'clarifai/meta.Llama-2.codeLlama-70b-Instruct', + 'clarifai/openai.completion.gpt-3_5-turbo-instruct', + 'clarifai/meta.Llama-2.llama2-7b-chat', + 'clarifai/meta.Llama-2.llama2-13b-chat', + 'clarifai/meta.Llama-2.llama2-70b-chat', + 'clarifai/openai.chat-completion.gpt-4-turbo', + 'clarifai/microsoft.text-generation.phi-2', + 'clarifai/meta.Llama-2.llama2-7b-chat-vllm', + 'clarifai/upstage.solar.solar-10_7b-instruct', + 'clarifai/openchat.openchat.openchat-3_5-1210', + 'clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B', + 'clarifai/gcp.generate.text-bison', + 'clarifai/meta.Llama-2.llamaGuard-7b', + 'clarifai/fblgit.una-cybertron.una-cybertron-7b-v2', + 'clarifai/openai.chat-completion.GPT-4', + 'clarifai/openai.chat-completion.GPT-3_5-turbo', + 'clarifai/ai21.complete.Jurassic2-Grande', + 'clarifai/ai21.complete.Jurassic2-Grande-Instruct', + 'clarifai/ai21.complete.Jurassic2-Jumbo-Instruct', + 'clarifai/ai21.complete.Jurassic2-Jumbo', + 'clarifai/ai21.complete.Jurassic2-Large', + 'clarifai/cohere.generate.cohere-generate-command', + 'clarifai/wizardlm.generate.wizardCoder-Python-34B', + 'clarifai/wizardlm.generate.wizardLM-70B', + 'clarifai/tiiuae.falcon.falcon-40b-instruct', + 'clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat', + 'clarifai/gcp.generate.code-gecko', + 'clarifai/gcp.generate.code-bison', + 'clarifai/mistralai.completion.mistral-7B-OpenOrca', + 'clarifai/mistralai.completion.openHermes-2-mistral-7B', + 'clarifai/wizardlm.generate.wizardLM-13B', + 'clarifai/huggingface-research.zephyr.zephyr-7B-alpha', + 'clarifai/wizardlm.generate.wizardCoder-15B', + 'clarifai/microsoft.text-generation.phi-1_5', + 'clarifai/databricks.Dolly-v2.dolly-v2-12b', + 'clarifai/bigcode.code.StarCoder', + 'clarifai/salesforce.xgen.xgen-7b-8k-instruct', + 'clarifai/mosaicml.mpt.mpt-7b-instruct', + 'clarifai/anthropic.completion.claude-3-opus', + 'clarifai/anthropic.completion.claude-3-sonnet', + 'clarifai/gcp.generate.gemini-1_5-pro', + 'clarifai/gcp.generate.imagen-2', + 'clarifai/salesforce.blip.general-english-image-caption-blip-2', +] + + huggingface_models: List = [ "meta-llama/Llama-2-7b-hf", "meta-llama/Llama-2-7b-chat-hf", @@ -470,6 +538,7 @@ provider_list: List = [ "text-completion-openai", "cohere", "cohere_chat", + "clarifai", "anthropic", "replicate", "huggingface", @@ -608,6 +677,7 @@ from .llms.anthropic import AnthropicConfig from .llms.anthropic_text import AnthropicTextConfig from .llms.replicate import ReplicateConfig from .llms.cohere import CohereConfig +from .llms.clarifai import ClarifaiConfig from .llms.ai21 import AI21Config from .llms.together_ai import TogetherAIConfig from .llms.cloudflare import CloudflareConfig diff --git a/litellm/llms/clarifai.py b/litellm/llms/clarifai.py new file mode 100644 index 000000000..2a7d77c61 --- /dev/null +++ b/litellm/llms/clarifai.py @@ -0,0 +1,216 @@ +import os, types, traceback +import json +import requests +import time +from typing import Callable, Optional +from litellm.utils import ModelResponse, Usage, Choices, Message +import litellm +import httpx +from .prompt_templates.factory import prompt_factory, custom_prompt + + +class ClarifaiError(Exception): + def __init__(self, status_code, message, url): + self.status_code = status_code + self.message = message + self.request = httpx.Request( + method="POST", url=url + ) + self.response = httpx.Response(status_code=status_code, request=self.request) + super().__init__( + self.message + ) + +class ClarifaiConfig: + """ + Reference: https://clarifai.com/meta/Llama-2/models/llama2-70b-chat + TODO fill in the details + """ + max_tokens: Optional[int] = None + temperature: Optional[int] = None + top_k: Optional[int] = None + + def __init__( + self, + max_tokens: Optional[int] = None, + temperature: Optional[int] = None, + top_k: Optional[int] = None, + ) -> None: + locals_ = locals() + for key, value in locals_.items(): + if key != "self" and value is not None: + setattr(self.__class__, key, value) + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + +def validate_environment(api_key): + headers = { + "accept": "application/json", + "content-type": "application/json", + } + if api_key: + headers["Authorization"] = f"Bearer {api_key}" + return headers + +def completions_to_model(payload): + # if payload["n"] != 1: + # raise HTTPException( + # status_code=422, + # detail="Only one generation is supported. Please set candidate_count to 1.", + # ) + + params = {} + if temperature := payload.get("temperature"): + params["temperature"] = temperature + if max_tokens := payload.get("max_tokens"): + params["max_tokens"] = max_tokens + return { + "inputs": [{"data": {"text": {"raw": payload["prompt"]}}}], + "model": {"output_info": {"params": params}}, +} + +def convert_model_to_url(model: str, api_base: str): + user_id, app_id, model_id = model.split(".") + return f"{api_base}/users/{user_id}/apps/{app_id}/models/{model_id}/outputs" + +def get_prompt_model_name(url: str): + clarifai_model_name = url.split("/")[-2] + if "claude" in clarifai_model_name: + return "anthropic", clarifai_model_name.replace("_", ".") + if ("llama" in clarifai_model_name)or ("mistral" in clarifai_model_name): + return "", "meta-llama/llama-2-chat" + else: + return "", clarifai_model_name + +def completion( + model: str, + messages: list, + api_base: str, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + api_key, + logging_obj, + custom_prompt_dict={}, + optional_params=None, + litellm_params=None, + logger_fn=None, +): + headers = validate_environment(api_key) + model = convert_model_to_url(model, api_base) + prompt = " ".join(message["content"] for message in messages) # TODO + + ## Load Config + config = litellm.ClarifaiConfig.get_config() + for k, v in config.items(): + if ( + k not in optional_params + ): + optional_params[k] = v + + custom_llm_provider, orig_model_name = get_prompt_model_name(model) + if custom_llm_provider == "anthropic": + prompt = prompt_factory( + model=orig_model_name, + messages=messages, + api_key=api_key, + custom_llm_provider="clarifai" + ) + else: + prompt = prompt_factory( + model=orig_model_name, + messages=messages, + api_key=api_key, + custom_llm_provider=custom_llm_provider + ) + # print(prompt); exit(0) + + data = { + "prompt": prompt, + **optional_params, + } + data = completions_to_model(data) + + + ## LOGGING + logging_obj.pre_call( + input=prompt, + api_key=api_key, + additional_args={ + "complete_input_dict": data, + "headers": headers, + "api_base": api_base, + }, + ) + + ## COMPLETION CALL + response = requests.post( + model, + headers=headers, + data=json.dumps(data), + ) + # print(response.content); exit() + """ + {"status":{"code":10000,"description":"Ok","req_id":"d914cf7e097487997910650cde954a37"},"outputs":[{"id":"c2baa668174b4547bd4d2e9f8996198d","status":{"code":10000,"description":"Ok"},"created_at":"2024-02-07T10:57:52.917990493Z","model":{"id":"GPT-4","name":"GPT-4","created_at":"2023-06-08T17:40:07.964967Z","modified_at":"2023-12-04T11:39:54.587604Z","app_id":"chat-completion","model_version":{"id":"5d7a50b44aec4a01a9c492c5a5fcf387","created_at":"2023-11-09T19:57:56.961259Z","status":{"code":21100,"description":"Model is trained and ready"},"completed_at":"2023-11-09T20:00:48.933172Z","visibility":{"gettable":50},"app_id":"chat-completion","user_id":"openai","metadata":{}},"user_id":"openai","model_type_id":"text-to-text","visibility":{"gettable":50},"toolkits":[],"use_cases":[],"languages":[],"languages_full":[],"check_consents":[],"workflow_recommended":false,"image":{"url":"https://data.clarifai.com/small/users/openai/apps/chat-completion/inputs/image/34326a9914d361bb93ae8e5381689755","hosted":{"prefix":"https://data.clarifai.com","suffix":"users/openai/apps/chat-completion/inputs/image/34326a9914d361bb93ae8e5381689755","sizes":["small"],"crossorigin":"use-credentials"}}},"input":{"id":"fba1f22a332743f083ddae0a7eb443ae","data":{"text":{"raw":"what\'s the weather in SF","url":"https://samples.clarifai.com/placeholder.gif"}}},"data":{"text":{"raw":"As an AI, I\'m unable to provide real-time information or updates. Please check a reliable weather website or app for the current weather in San Francisco.","text_info":{"encoding":"UnknownTextEnc"}}}}]} + """ + if response.status_code != 200: + raise ClarifaiError(status_code=response.status_code, message=response.text, url=model) + if "stream" in optional_params and optional_params["stream"] == True: + return response.iter_lines() + else: + logging_obj.post_call( + input=prompt, + api_key=api_key, + original_response=response.text, + additional_args={"complete_input_dict": data}, + ) + ## RESPONSE OBJECT + completion_response = response.json() + # print(completion_response) + try: + choices_list = [] + for idx, item in enumerate(completion_response["outputs"]): + if len(item["data"]["text"]["raw"]) > 0: + message_obj = Message(content=item["data"]["text"]["raw"]) + else: + message_obj = Message(content=None) + choice_obj = Choices( + finish_reason="stop", + index=idx + 1, #check + message=message_obj, + ) + choices_list.append(choice_obj) + model_response["choices"] = choices_list + except Exception as e: + raise ClarifaiError( + message=traceback.format_exc(), status_code=response.status_code, url=model + ) + + # Calculate Usage + prompt_tokens = len(encoding.encode(prompt)) + completion_tokens = len( + encoding.encode(model_response["choices"][0]["message"].get("content")) + ) + model_response["model"] = model + model_response["usage"] = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, + ) + return model_response \ No newline at end of file diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 8afda252a..14f1018b9 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -1306,6 +1306,9 @@ def prompt_factory( return anthropic_pt(messages=messages) elif "mistral." in model: return mistral_instruct_pt(messages=messages) + elif custom_llm_provider == "clarifai": + if "claude" in model: + return anthropic_pt(messages=messages) elif custom_llm_provider == "perplexity": for message in messages: message.pop("name", None) diff --git a/litellm/main.py b/litellm/main.py index 593fc7eae..334fe7b6e 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -53,6 +53,7 @@ from .llms import ( ollama, ollama_chat, cloudflare, + clarifai, cohere, cohere_chat, petals, @@ -1150,6 +1151,55 @@ def completion( ) response = model_response + elif ("clarifai" in model + or custom_llm_provider == "clarifai" + or model in litellm.clarifai_models + ): + clarifai_key = None + clarifai_key = ( + api_key + or litellm.clarifai_key + or litellm.api_key + or get_secret("CLARIFAI_API_KEY") + or get_secret("CLARIFAI_API_TOKEN") + ) + + api_base = ( + api_base + or litellm.api_base + or get_secret("CLARIFAI_API_BASE") + or "https://api.clarifai.com/v2" + ) + + custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict + model_response = clarifai.completion( + model=model, + messages=messages, + api_base=api_base, + model_response=model_response, + print_verbose=print_verbose, + optional_params=optional_params, + litellm_params=litellm_params, + logger_fn=logger_fn, + encoding=encoding, # for calculating input/output tokens + api_key=clarifai_key, + logging_obj=logging, + custom_prompt_dict=custom_prompt_dict, + ) + + if "stream" in optional_params and optional_params["stream"] == True: + # don't try to access stream object, + + model_response = CustomStreamWrapper(model_response, model, logging_obj=logging, custom_llm_provider="replicate") + + if optional_params.get("stream", False) or acompletion == True: + ## LOGGING + logging.post_call( + input=messages, + api_key=clarifai_key, + original_response=model_response, + ) + response = model_response elif custom_llm_provider == "anthropic": api_key = ( diff --git a/litellm/tests/test_clarifai_completion.py b/litellm/tests/test_clarifai_completion.py new file mode 100644 index 000000000..2c2626398 --- /dev/null +++ b/litellm/tests/test_clarifai_completion.py @@ -0,0 +1,67 @@ +import sys, os +import traceback +from dotenv import load_dotenv + +load_dotenv() +import os, io + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import pytest +import litellm +from litellm import embedding, completion, completion_cost, Timeout, ModelResponse +from litellm import RateLimitError + +# litellm.num_retries = 3 +litellm.cache = None +litellm.success_callback = [] +user_message = "Write a short poem about the sky" +messages = [{"content": user_message, "role": "user"}] + +@pytest.fixture(autouse=True) +def reset_callbacks(): + print("\npytest fixture - resetting callbacks") + litellm.success_callback = [] + litellm._async_success_callback = [] + litellm.failure_callback = [] + litellm.callbacks = [] + +def test_completion_clarifai_claude_2_1(): + print("calling clarifai claude completion") + import os + + clarifai_pat = os.environ["CLARIFAI_API_KEY"] + + try: + response = completion( + model="clarifai/anthropic.completion.claude-2_1", + messages=messages, + max_tokens=10, + temperature=0.1, + ) + print(response) + + except RateLimitError: + pass + + except Exception as e: + pytest.fail(f"Error occured: {e}") + + +def test_completion_clarifai_mistral_large(): + try: + litellm.set_verbose = True + response: ModelResponse = completion( + model="clarifai/mistralai.completion.mistral-small", + messages=messages, + max_tokens=10, + temperature=0.78, + ) + # Add any assertions here to check the response + assert len(response.choices) > 0 + assert len(response.choices[0].message.content) > 0 + except RateLimitError: + pass + except Exception as e: + pytest.fail(f"Error occurred: {e}") From 64d229caaa6e57f7dd169d82c5dc6a06d3aae5b1 Mon Sep 17 00:00:00 2001 From: ffreemt Date: Thu, 2 May 2024 19:30:01 +0800 Subject: [PATCH 008/543] Add return_exceptions to litellm.batch_completion for optionally returing exceptions and partial resuslt instead of throwing exceptions --- litellm/main.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/litellm/main.py b/litellm/main.py index 51ec95401..11ab0a0b9 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -2303,6 +2303,7 @@ def batch_completion( user (str, optional): The user string for generating completions. Defaults to "". deployment_id (optional): The deployment ID for generating completions. Defaults to None. request_timeout (int, optional): The request timeout for generating completions. Defaults to None. + return_exceptions (bool): Whether to return exceptions and partial results when exceptions occur. Defaults to False. Returns: list: A list of completion results. @@ -2361,7 +2362,17 @@ def batch_completion( completions.append(future) # Retrieve the results from the futures - results = [future.result() for future in completions] + # results = [future.result() for future in completions] + if return_exceptions: + results = [] + for future in completions: + try: + results.append(future.result()) + except Exception as exc: + results.append(exc) + else: + results = [future.result() for future in completions] + return results From ff8d1bc68cad51e323558a9ca5033ec0cb1c91ce Mon Sep 17 00:00:00 2001 From: jinno Date: Thu, 2 May 2024 21:39:51 +0900 Subject: [PATCH 009/543] fix(exceptions.py): import openai Exceptions --- litellm/exceptions.py | 47 +++++++++++++++++-------------------------- 1 file changed, 18 insertions(+), 29 deletions(-) diff --git a/litellm/exceptions.py b/litellm/exceptions.py index d8b0a7c55..7c3471acf 100644 --- a/litellm/exceptions.py +++ b/litellm/exceptions.py @@ -9,25 +9,12 @@ ## LiteLLM versions of the OpenAI Exception Types -from openai import ( - AuthenticationError, - BadRequestError, - NotFoundError, - RateLimitError, - APIStatusError, - OpenAIError, - APIError, - APITimeoutError, - APIConnectionError, - APIResponseValidationError, - UnprocessableEntityError, - PermissionDeniedError, -) +import openai import httpx from typing import Optional -class AuthenticationError(AuthenticationError): # type: ignore +class AuthenticationError(openai.AuthenticationError): # type: ignore def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 401 self.message = message @@ -39,7 +26,7 @@ class AuthenticationError(AuthenticationError): # type: ignore # raise when invalid models passed, example gpt-8 -class NotFoundError(NotFoundError): # type: ignore +class NotFoundError(openai.NotFoundError): # type: ignore def __init__(self, message, model, llm_provider, response: httpx.Response): self.status_code = 404 self.message = message @@ -50,7 +37,7 @@ class NotFoundError(NotFoundError): # type: ignore ) # Call the base class constructor with the parameters it needs -class BadRequestError(BadRequestError): # type: ignore +class BadRequestError(openai.BadRequestError): # type: ignore def __init__( self, message, model, llm_provider, response: Optional[httpx.Response] = None ): @@ -69,7 +56,7 @@ class BadRequestError(BadRequestError): # type: ignore ) # Call the base class constructor with the parameters it needs -class UnprocessableEntityError(UnprocessableEntityError): # type: ignore +class UnprocessableEntityError(openai.UnprocessableEntityError): # type: ignore def __init__(self, message, model, llm_provider, response: httpx.Response): self.status_code = 422 self.message = message @@ -80,7 +67,7 @@ class UnprocessableEntityError(UnprocessableEntityError): # type: ignore ) # Call the base class constructor with the parameters it needs -class Timeout(APITimeoutError): # type: ignore +class Timeout(openai.APITimeoutError): # type: ignore def __init__(self, message, model, llm_provider): request = httpx.Request(method="POST", url="https://api.openai.com/v1") super().__init__( @@ -96,7 +83,7 @@ class Timeout(APITimeoutError): # type: ignore return str(self.message) -class PermissionDeniedError(PermissionDeniedError): # type:ignore +class PermissionDeniedError(openai.PermissionDeniedError): # type:ignore def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 403 self.message = message @@ -107,7 +94,7 @@ class PermissionDeniedError(PermissionDeniedError): # type:ignore ) # Call the base class constructor with the parameters it needs -class RateLimitError(RateLimitError): # type: ignore +class RateLimitError(openai.RateLimitError): # type: ignore def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 429 self.message = message @@ -148,7 +135,7 @@ class ContentPolicyViolationError(BadRequestError): # type: ignore ) # Call the base class constructor with the parameters it needs -class ServiceUnavailableError(APIStatusError): # type: ignore +class ServiceUnavailableError(openai.APIStatusError): # type: ignore def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 503 self.message = message @@ -160,7 +147,7 @@ class ServiceUnavailableError(APIStatusError): # type: ignore # raise this when the API returns an invalid response object - https://github.com/openai/openai-python/blob/1be14ee34a0f8e42d3f9aa5451aa4cb161f1781f/openai/api_requestor.py#L401 -class APIError(APIError): # type: ignore +class APIError(openai.APIError): # type: ignore def __init__( self, status_code, message, llm_provider, model, request: httpx.Request ): @@ -172,7 +159,7 @@ class APIError(APIError): # type: ignore # raised if an invalid request (not get, delete, put, post) is made -class APIConnectionError(APIConnectionError): # type: ignore +class APIConnectionError(openai.APIConnectionError): # type: ignore def __init__(self, message, llm_provider, model, request: httpx.Request): self.message = message self.llm_provider = llm_provider @@ -182,7 +169,7 @@ class APIConnectionError(APIConnectionError): # type: ignore # raised if an invalid request (not get, delete, put, post) is made -class APIResponseValidationError(APIResponseValidationError): # type: ignore +class APIResponseValidationError(openai.APIResponseValidationError): # type: ignore def __init__(self, message, llm_provider, model): self.message = message self.llm_provider = llm_provider @@ -192,7 +179,7 @@ class APIResponseValidationError(APIResponseValidationError): # type: ignore super().__init__(response=response, body=None, message=message) -class OpenAIError(OpenAIError): # type: ignore +class OpenAIError(openai.OpenAIError): # type: ignore def __init__(self, original_exception): self.status_code = original_exception.http_status super().__init__( @@ -214,12 +201,14 @@ class BudgetExceededError(Exception): ## DEPRECATED ## -class InvalidRequestError(BadRequestError): # type: ignore - def __init__(self, message, model, llm_provider): +class InvalidRequestError(openai.BadRequestError): # type: ignore + def __init__( + self, message, model, llm_provider, response: Optional[httpx.Response] = None + ): self.status_code = 400 self.message = message self.model = model self.llm_provider = llm_provider super().__init__( - self.message, f"{self.model}" + self.message, response=response, body=None ) # Call the base class constructor with the parameters it needs From 6cec252b076012fd3ea5a8b06d603e05c38dd789 Mon Sep 17 00:00:00 2001 From: Lunik Date: Thu, 2 May 2024 23:12:48 +0200 Subject: [PATCH 010/543] =?UTF-8?q?=E2=9C=A8=20feat:=20Add=20Azure=20Conte?= =?UTF-8?q?nt-Safety=20Proxy=20hooks?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- litellm/proxy/hooks/azure_content_safety.py | 157 ++++++++++++++++++++ litellm/proxy/proxy_server.py | 17 +++ 2 files changed, 174 insertions(+) create mode 100644 litellm/proxy/hooks/azure_content_safety.py diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py new file mode 100644 index 000000000..161e35cde --- /dev/null +++ b/litellm/proxy/hooks/azure_content_safety.py @@ -0,0 +1,157 @@ +from litellm.integrations.custom_logger import CustomLogger +from litellm.caching import DualCache +from litellm.proxy._types import UserAPIKeyAuth +import litellm, traceback, sys, uuid +from fastapi import HTTPException +from litellm._logging import verbose_proxy_logger + + +class _PROXY_AzureContentSafety( + CustomLogger +): # https://docs.litellm.ai/docs/observability/custom_callback#callback-class + # Class variables or attributes + + def __init__(self, endpoint, api_key, thresholds=None): + try: + from azure.ai.contentsafety.aio import ContentSafetyClient + from azure.core.credentials import AzureKeyCredential + from azure.ai.contentsafety.models import TextCategory + from azure.ai.contentsafety.models import AnalyzeTextOptions + from azure.core.exceptions import HttpResponseError + except Exception as e: + raise Exception( + f"\033[91mAzure Content-Safety not installed, try running 'pip install azure-ai-contentsafety' to fix this error: {e}\n{traceback.format_exc()}\033[0m" + ) + self.endpoint = endpoint + self.api_key = api_key + self.text_category = TextCategory + self.analyze_text_options = AnalyzeTextOptions + self.azure_http_error = HttpResponseError + + self.thresholds = self._configure_thresholds(thresholds) + + self.client = ContentSafetyClient( + self.endpoint, AzureKeyCredential(self.api_key) + ) + + def _configure_thresholds(self, thresholds=None): + default_thresholds = { + self.text_category.HATE: 6, + self.text_category.SELF_HARM: 6, + self.text_category.SEXUAL: 6, + self.text_category.VIOLENCE: 6, + } + + if thresholds is None: + return default_thresholds + + for key, default in default_thresholds.items(): + if key not in thresholds: + thresholds[key] = default + + return thresholds + + def print_verbose(self, print_statement): + try: + verbose_proxy_logger.debug(print_statement) + if litellm.set_verbose: + print(print_statement) # noqa + except: + pass + + def _severity(self, severity): + if severity >= 6: + return "high" + elif severity >= 4: + return "medium" + elif severity >= 2: + return "low" + else: + return "safe" + + def _compute_result(self, response): + result = {} + + category_severity = { + item.category: item.severity for item in response.categories_analysis + } + for category in self.text_category: + severity = category_severity.get(category) + if severity is not None: + result[category] = { + "filtered": severity >= self.thresholds[category], + "severity": self._severity(severity), + } + + return result + + async def test_violation(self, content: str, source: str = None): + self.print_verbose(f"Testing Azure Content-Safety for: {content}") + + # Construct a request + request = self.analyze_text_options(text=content) + + # Analyze text + try: + response = await self.client.analyze_text(request) + except self.azure_http_error as e: + self.print_verbose( + f"Error in Azure Content-Safety: {traceback.format_exc()}" + ) + traceback.print_exc() + raise + + result = self._compute_result(response) + self.print_verbose(f"Azure Content-Safety Result: {result}") + + for key, value in result.items(): + if value["filtered"]: + raise HTTPException( + status_code=400, + detail={ + "error": "Violated content safety policy", + "source": source, + "category": key, + "severity": value["severity"], + }, + ) + + async def async_pre_call_hook( + self, + user_api_key_dict: UserAPIKeyAuth, + cache: DualCache, + data: dict, + call_type: str, # "completion", "embeddings", "image_generation", "moderation" + ): + self.print_verbose(f"Inside Azure Content-Safety Pre-Call Hook") + try: + if call_type == "completion" and "messages" in data: + for m in data["messages"]: + if "content" in m and isinstance(m["content"], str): + await self.test_violation(content=m["content"], source="input") + + except HTTPException as e: + raise e + except Exception as e: + traceback.print_exc() + + async def async_post_call_success_hook( + self, + user_api_key_dict: UserAPIKeyAuth, + response, + ): + self.print_verbose(f"Inside Azure Content-Safety Post-Call Hook") + if isinstance(response, litellm.ModelResponse) and isinstance( + response.choices[0], litellm.utils.Choices + ): + await self.test_violation( + content=response.choices[0].message.content, source="output" + ) + + async def async_post_call_streaming_hook( + self, + user_api_key_dict: UserAPIKeyAuth, + response: str, + ): + self.print_verbose(f"Inside Azure Content-Safety Call-Stream Hook") + await self.test_violation(content=response, source="output") diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 9cc871966..9c74659cc 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -2235,6 +2235,23 @@ class ProxyConfig: batch_redis_obj = _PROXY_BatchRedisRequests() imported_list.append(batch_redis_obj) + elif ( + isinstance(callback, str) + and callback == "azure_content_safety" + ): + from litellm.proxy.hooks.azure_content_safety import ( + _PROXY_AzureContentSafety, + ) + + azure_content_safety_params = litellm_settings["azure_content_safety_params"] + for k, v in azure_content_safety_params.items(): + if v is not None and isinstance(v, str) and v.startswith("os.environ/"): + azure_content_safety_params[k] = litellm.get_secret(v) + + azure_content_safety_obj = _PROXY_AzureContentSafety( + **azure_content_safety_params, + ) + imported_list.append(azure_content_safety_obj) else: imported_list.append( get_instance_fn( From 406c9820d1950cf60256a50989212bd6d470cd3e Mon Sep 17 00:00:00 2001 From: Lunik Date: Thu, 2 May 2024 23:28:21 +0200 Subject: [PATCH 011/543] =?UTF-8?q?=E2=9E=95=20feat:=20Add=20python=20requ?= =?UTF-8?q?irements=20for=20Azure=20Content-Safety=20callback?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements.txt b/requirements.txt index fbf7a28c7..7e2fa3c18 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,6 +26,8 @@ fastapi-sso==0.10.0 # admin UI, SSO pyjwt[crypto]==2.8.0 python-multipart==0.0.9 # admin UI Pillow==10.3.0 +azure-ai-contentsafety==1.0.0 # for azure content safety +azure-identity==1.15.0 # for azure content safety ### LITELLM PACKAGE DEPENDENCIES python-dotenv==1.0.0 # for env From cdb39e90ce5c38ef471cf315a77b6bcc97b6edad Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 May 2024 14:42:20 -0700 Subject: [PATCH 012/543] Improve mocking in test_proxy_exception_mapping Mock the calls to the backend and assert that the correct parameters are passed to the backend. --- litellm/tests/test_proxy_exception_mapping.py | 71 +++++++++++++++---- 1 file changed, 59 insertions(+), 12 deletions(-) diff --git a/litellm/tests/test_proxy_exception_mapping.py b/litellm/tests/test_proxy_exception_mapping.py index 0cc7b0d30..ccd071d01 100644 --- a/litellm/tests/test_proxy_exception_mapping.py +++ b/litellm/tests/test_proxy_exception_mapping.py @@ -1,6 +1,8 @@ # test that the proxy actually does exception mapping to the OpenAI format import sys, os +from unittest import mock +import json from dotenv import load_dotenv load_dotenv() @@ -12,13 +14,30 @@ sys.path.insert( import pytest import litellm, openai from fastapi.testclient import TestClient -from fastapi import FastAPI +from fastapi import Response from litellm.proxy.proxy_server import ( router, save_worker_config, initialize, ) # Replace with the actual module where your FastAPI router is defined +invalid_authentication_error_response = Response( + status_code=401, + content=json.dumps({"error": "Invalid Authentication"}), +) +context_length_exceeded_error_response_dict = { + "error": { + "message": "AzureException - Error code: 400 - {'error': {'message': \"This model's maximum context length is 4096 tokens. However, your messages resulted in 10007 tokens. Please reduce the length of the messages.\", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}}", + "type": None, + "param": None, + "code": 400, + }, +} +context_length_exceeded_error_response = Response( + status_code=400, + content=json.dumps(context_length_exceeded_error_response_dict), +) + @pytest.fixture def client(): @@ -60,7 +79,11 @@ def test_chat_completion_exception(client): # raise openai.AuthenticationError -def test_chat_completion_exception_azure(client): +@mock.patch( + "litellm.proxy.proxy_server.llm_router.acompletion", + return_value=invalid_authentication_error_response, +) +def test_chat_completion_exception_azure(mock_acompletion, client): try: # Your test data test_data = { @@ -73,6 +96,15 @@ def test_chat_completion_exception_azure(client): response = client.post("/chat/completions", json=test_data) + mock_acompletion.assert_called_once_with( + **test_data, + litellm_call_id=mock.ANY, + litellm_logging_obj=mock.ANY, + request_timeout=mock.ANY, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) + json_response = response.json() print("keys in json response", json_response.keys()) assert json_response.keys() == {"error"} @@ -90,12 +122,21 @@ def test_chat_completion_exception_azure(client): # raise openai.AuthenticationError -def test_embedding_auth_exception_azure(client): +@mock.patch( + "litellm.proxy.proxy_server.llm_router.aembedding", + return_value=invalid_authentication_error_response, +) +def test_embedding_auth_exception_azure(mock_aembedding, client): try: # Your test data test_data = {"model": "azure-embedding", "input": ["hi"]} response = client.post("/embeddings", json=test_data) + mock_aembedding.assert_called_once_with( + **test_data, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) print("Response from proxy=", response) json_response = response.json() @@ -204,7 +245,11 @@ def test_embedding_exception_any_model(client): # raise openai.BadRequestError -def test_chat_completion_exception_azure_context_window(client): +@mock.patch( + "litellm.proxy.proxy_server.llm_router.acompletion", + return_value=context_length_exceeded_error_response, +) +def test_chat_completion_exception_azure_context_window(mock_acompletion, client): try: # Your test data test_data = { @@ -219,20 +264,22 @@ def test_chat_completion_exception_azure_context_window(client): response = client.post("/chat/completions", json=test_data) print("got response from server", response) + mock_acompletion.assert_called_once_with( + **test_data, + litellm_call_id=mock.ANY, + litellm_logging_obj=mock.ANY, + request_timeout=mock.ANY, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) + json_response = response.json() print("keys in json response", json_response.keys()) assert json_response.keys() == {"error"} - assert json_response == { - "error": { - "message": "AzureException - Error code: 400 - {'error': {'message': \"This model's maximum context length is 4096 tokens. However, your messages resulted in 10007 tokens. Please reduce the length of the messages.\", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}}", - "type": None, - "param": None, - "code": 400, - } - } + assert json_response == context_length_exceeded_error_response_dict # make an openai client to call _make_status_error_from_response openai_client = openai.OpenAI(api_key="anything") From a7ec1772b1457594d3af48cdcb0a382279b841c7 Mon Sep 17 00:00:00 2001 From: ffreemt Date: Fri, 3 May 2024 11:28:38 +0800 Subject: [PATCH 013/543] Add litellm\tests\test_batch_completion_return_exceptions.py --- .gitignore | 2 ++ litellm/main.py | 3 +- ...test_batch_completion_return_exceptions.py | 29 +++++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 litellm/tests/test_batch_completion_return_exceptions.py diff --git a/.gitignore b/.gitignore index abc4ecb0c..50085bd29 100644 --- a/.gitignore +++ b/.gitignore @@ -52,3 +52,5 @@ litellm/proxy/_new_secret_config.yaml litellm/proxy/_new_secret_config.yaml litellm/proxy/_super_secret_config.yaml litellm/proxy/_super_secret_config.yaml +.python-version +litellm/llms/tokenizers/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 diff --git a/litellm/main.py b/litellm/main.py index 11ab0a0b9..8fc07b9bf 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -2143,7 +2143,7 @@ def completion( """ assume input to custom LLM api bases follow this format: resp = requests.post( - api_base, + api_base, json={ 'model': 'meta-llama/Llama-2-13b-hf', # model name 'params': { @@ -2280,6 +2280,7 @@ def batch_completion( deployment_id=None, request_timeout: Optional[int] = None, timeout: Optional[int] = 600, + return_exceptions: bool = False, # Optional liteLLM function params **kwargs, ): diff --git a/litellm/tests/test_batch_completion_return_exceptions.py b/litellm/tests/test_batch_completion_return_exceptions.py new file mode 100644 index 000000000..b44146993 --- /dev/null +++ b/litellm/tests/test_batch_completion_return_exceptions.py @@ -0,0 +1,29 @@ +"""Test batch_completion's return_exceptions.""" +import pytest +import litellm + +msg1 = [{"role": "user", "content": "hi 1"}] +msg2 = [{"role": "user", "content": "hi 2"}] + + +def test_batch_completion_return_exceptions_default(): + """Test batch_completion's return_exceptions.""" + with pytest.raises(Exception): + _ = litellm.batch_completion( + model="gpt-3.5-turbo", + messages=[msg1, msg2], + api_key="sk_xxx", # deliberately set invalid key + # return_exceptions=False, + ) + + +def test_batch_completion_return_exceptions_true(): + """Test batch_completion's return_exceptions.""" + res = litellm.batch_completion( + model="gpt-3.5-turbo", + messages=[msg1, msg2], + api_key="sk_xxx", # deliberately set invalid key + return_exceptions=True, + ) + + assert isinstance(res[0], litellm.exceptions.AuthenticationError) From 723ef9963e9c9468d98a25ab5aa8ed3f2499ccac Mon Sep 17 00:00:00 2001 From: mogith-pn <143642606+mogith-pn@users.noreply.github.com> Date: Fri, 3 May 2024 14:03:38 +0000 Subject: [PATCH 014/543] Clarifai - Added streaming and async completion support --- cookbook/liteLLM_clarifai_Demo.ipynb | 38 +++- litellm/llms/clarifai.py | 206 +++++++++++++++++----- litellm/main.py | 9 +- litellm/tests/test_clarifai_completion.py | 28 ++- litellm/tests/test_streaming.py | 3 +- litellm/utils.py | 28 +++ 6 files changed, 259 insertions(+), 53 deletions(-) diff --git a/cookbook/liteLLM_clarifai_Demo.ipynb b/cookbook/liteLLM_clarifai_Demo.ipynb index 4e3b4dbb0..40ef2fcf9 100644 --- a/cookbook/liteLLM_clarifai_Demo.ipynb +++ b/cookbook/liteLLM_clarifai_Demo.ipynb @@ -119,6 +119,42 @@ "print(f\"Claude-2.1 response : {response}\")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### OpenAI GPT-4 (Streaming)\n", + "Though clarifai doesn't support streaming, still you can call stream and get the response in standard StreamResponse format of liteLLM" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ModelResponse(id='chatcmpl-40ae19af-3bf0-4eb4-99f2-33aec3ba84af', choices=[StreamingChoices(finish_reason=None, index=0, delta=Delta(content=\"In the quiet corners of time's grand hall,\\nLies the tale of rise and fall.\\nFrom ancient ruins to modern sprawl,\\nHistory, the greatest story of them all.\\n\\nEmpires have risen, empires have decayed,\\nThrough the eons, memories have stayed.\\nIn the book of time, history is laid,\\nA tapestry of events, meticulously displayed.\\n\\nThe pyramids of Egypt, standing tall,\\nThe Roman Empire's mighty sprawl.\\nFrom Alexander's conquest, to the Berlin Wall,\\nHistory, a silent witness to it all.\\n\\nIn the shadow of the past we tread,\\nWhere once kings and prophets led.\\nTheir stories in our hearts are spread,\\nEchoes of their words, in our minds are read.\\n\\nBattles fought and victories won,\\nActs of courage under the sun.\\nTales of love, of deeds done,\\nIn history's grand book, they all run.\\n\\nHeroes born, legends made,\\nIn the annals of time, they'll never fade.\\nTheir triumphs and failures all displayed,\\nIn the eternal march of history's parade.\\n\\nThe ink of the past is forever dry,\\nBut its lessons, we cannot deny.\\nIn its stories, truths lie,\\nIn its wisdom, we rely.\\n\\nHistory, a mirror to our past,\\nA guide for the future vast.\\nThrough its lens, we're ever cast,\\nIn the drama of life, forever vast.\", role='assistant', function_call=None, tool_calls=None), logprobs=None)], created=1714744515, model='https://api.clarifai.com/v2/users/openai/apps/chat-completion/models/GPT-4/outputs', object='chat.completion.chunk', system_fingerprint=None)\n", + "ModelResponse(id='chatcmpl-40ae19af-3bf0-4eb4-99f2-33aec3ba84af', choices=[StreamingChoices(finish_reason='stop', index=0, delta=Delta(content=None, role=None, function_call=None, tool_calls=None), logprobs=None)], created=1714744515, model='https://api.clarifai.com/v2/users/openai/apps/chat-completion/models/GPT-4/outputs', object='chat.completion.chunk', system_fingerprint=None)\n" + ] + } + ], + "source": [ + "from litellm import completion\n", + "\n", + "messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n", + "response = completion(\n", + " model=\"clarifai/openai.chat-completion.GPT-4\",\n", + " messages=messages,\n", + " stream=True,\n", + " api_key = \"c75cc032415e45368be331fdd2c06db0\")\n", + "\n", + "for chunk in response:\n", + " print(chunk)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -143,7 +179,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.10" + "version": "3.10.13" } }, "nbformat": 4, diff --git a/litellm/llms/clarifai.py b/litellm/llms/clarifai.py index 2a7d77c61..e07a8d9e8 100644 --- a/litellm/llms/clarifai.py +++ b/litellm/llms/clarifai.py @@ -3,9 +3,10 @@ import json import requests import time from typing import Callable, Optional -from litellm.utils import ModelResponse, Usage, Choices, Message +from litellm.utils import ModelResponse, Usage, Choices, Message, CustomStreamWrapper import litellm import httpx +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from .prompt_templates.factory import prompt_factory, custom_prompt @@ -84,6 +85,63 @@ def completions_to_model(payload): "inputs": [{"data": {"text": {"raw": payload["prompt"]}}}], "model": {"output_info": {"params": params}}, } + +def process_response( + model, + prompt, + response, + model_response, + api_key, + data, + encoding, + logging_obj + ): + logging_obj.post_call( + input=prompt, + api_key=api_key, + original_response=response.text, + additional_args={"complete_input_dict": data}, + ) + ## RESPONSE OBJECT + try: + completion_response = response.json() + except Exception: + raise ClarifaiError( + message=response.text, status_code=response.status_code, url=model + ) + # print(completion_response) + try: + choices_list = [] + for idx, item in enumerate(completion_response["outputs"]): + if len(item["data"]["text"]["raw"]) > 0: + message_obj = Message(content=item["data"]["text"]["raw"]) + else: + message_obj = Message(content=None) + choice_obj = Choices( + finish_reason="stop", + index=idx + 1, #check + message=message_obj, + ) + choices_list.append(choice_obj) + model_response["choices"] = choices_list + + except Exception as e: + raise ClarifaiError( + message=traceback.format_exc(), status_code=response.status_code, url=model + ) + + # Calculate Usage + prompt_tokens = len(encoding.encode(prompt)) + completion_tokens = len( + encoding.encode(model_response["choices"][0]["message"].get("content")) + ) + model_response["model"] = model + model_response["usage"] = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, + ) + return model_response def convert_model_to_url(model: str, api_base: str): user_id, app_id, model_id = model.split(".") @@ -98,6 +156,40 @@ def get_prompt_model_name(url: str): else: return "", clarifai_model_name +async def async_completion( + model: str, + prompt: str, + api_base: str, + custom_prompt_dict: dict, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + api_key, + logging_obj, + data=None, + optional_params=None, + litellm_params=None, + logger_fn=None, + headers={}): + + async_handler = AsyncHTTPHandler( + timeout=httpx.Timeout(timeout=600.0, connect=5.0) + ) + response = await async_handler.post( + api_base, headers=headers, data=json.dumps(data) + ) + + return process_response( + model=model, + prompt=prompt, + response=response, + model_response=model_response, + api_key=api_key, + data=data, + encoding=encoding, + logging_obj=logging_obj, + ) + def completion( model: str, messages: list, @@ -108,6 +200,7 @@ def completion( api_key, logging_obj, custom_prompt_dict={}, + acompletion=False, optional_params=None, litellm_params=None, logger_fn=None, @@ -158,59 +251,78 @@ def completion( "api_base": api_base, }, ) - - ## COMPLETION CALL - response = requests.post( + if acompletion==True: + return async_completion( + model=model, + prompt=prompt, + api_base=api_base, + custom_prompt_dict=custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + encoding=encoding, + api_key=api_key, + logging_obj=logging_obj, + data=data, + optional_params=optional_params, + litellm_params=litellm_params, + logger_fn=logger_fn, + headers=headers, + ) + else: + ## COMPLETION CALL + response = requests.post( model, headers=headers, data=json.dumps(data), ) # print(response.content); exit() - """ - {"status":{"code":10000,"description":"Ok","req_id":"d914cf7e097487997910650cde954a37"},"outputs":[{"id":"c2baa668174b4547bd4d2e9f8996198d","status":{"code":10000,"description":"Ok"},"created_at":"2024-02-07T10:57:52.917990493Z","model":{"id":"GPT-4","name":"GPT-4","created_at":"2023-06-08T17:40:07.964967Z","modified_at":"2023-12-04T11:39:54.587604Z","app_id":"chat-completion","model_version":{"id":"5d7a50b44aec4a01a9c492c5a5fcf387","created_at":"2023-11-09T19:57:56.961259Z","status":{"code":21100,"description":"Model is trained and ready"},"completed_at":"2023-11-09T20:00:48.933172Z","visibility":{"gettable":50},"app_id":"chat-completion","user_id":"openai","metadata":{}},"user_id":"openai","model_type_id":"text-to-text","visibility":{"gettable":50},"toolkits":[],"use_cases":[],"languages":[],"languages_full":[],"check_consents":[],"workflow_recommended":false,"image":{"url":"https://data.clarifai.com/small/users/openai/apps/chat-completion/inputs/image/34326a9914d361bb93ae8e5381689755","hosted":{"prefix":"https://data.clarifai.com","suffix":"users/openai/apps/chat-completion/inputs/image/34326a9914d361bb93ae8e5381689755","sizes":["small"],"crossorigin":"use-credentials"}}},"input":{"id":"fba1f22a332743f083ddae0a7eb443ae","data":{"text":{"raw":"what\'s the weather in SF","url":"https://samples.clarifai.com/placeholder.gif"}}},"data":{"text":{"raw":"As an AI, I\'m unable to provide real-time information or updates. Please check a reliable weather website or app for the current weather in San Francisco.","text_info":{"encoding":"UnknownTextEnc"}}}}]} - """ + if response.status_code != 200: raise ClarifaiError(status_code=response.status_code, message=response.text, url=model) + if "stream" in optional_params and optional_params["stream"] == True: - return response.iter_lines() - else: - logging_obj.post_call( - input=prompt, - api_key=api_key, - original_response=response.text, - additional_args={"complete_input_dict": data}, - ) - ## RESPONSE OBJECT - completion_response = response.json() - # print(completion_response) - try: - choices_list = [] - for idx, item in enumerate(completion_response["outputs"]): - if len(item["data"]["text"]["raw"]) > 0: - message_obj = Message(content=item["data"]["text"]["raw"]) - else: - message_obj = Message(content=None) - choice_obj = Choices( - finish_reason="stop", - index=idx + 1, #check - message=message_obj, - ) - choices_list.append(choice_obj) - model_response["choices"] = choices_list - except Exception as e: - raise ClarifaiError( - message=traceback.format_exc(), status_code=response.status_code, url=model + completion_stream = response.iter_lines() + stream_response = CustomStreamWrapper( + completion_stream=completion_stream, + model=model, + custom_llm_provider="clarifai", + logging_obj=logging_obj, ) + return stream_response + + else: + return process_response( + model=model, + prompt=prompt, + response=response, + model_response=model_response, + api_key=api_key, + data=data, + encoding=encoding, + logging_obj=logging_obj) + - # Calculate Usage - prompt_tokens = len(encoding.encode(prompt)) - completion_tokens = len( - encoding.encode(model_response["choices"][0]["message"].get("content")) - ) - model_response["model"] = model - model_response["usage"] = Usage( - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=prompt_tokens + completion_tokens, - ) - return model_response \ No newline at end of file +class ModelResponseIterator: + def __init__(self, model_response): + self.model_response = model_response + self.is_done = False + + # Sync iterator + def __iter__(self): + return self + + def __next__(self): + if self.is_done: + raise StopIteration + self.is_done = True + return self.model_response + + # Async iterator + def __aiter__(self): + return self + + async def __anext__(self): + if self.is_done: + raise StopAsyncIteration + self.is_done = True + return self.model_response \ No newline at end of file diff --git a/litellm/main.py b/litellm/main.py index 0bc802a9c..396ac1779 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1185,6 +1185,7 @@ def completion( print_verbose=print_verbose, optional_params=optional_params, litellm_params=litellm_params, + acompletion=acompletion, logger_fn=logger_fn, encoding=encoding, # for calculating input/output tokens api_key=clarifai_key, @@ -1194,8 +1195,12 @@ def completion( if "stream" in optional_params and optional_params["stream"] == True: # don't try to access stream object, - - model_response = CustomStreamWrapper(model_response, model, logging_obj=logging, custom_llm_provider="replicate") + ## LOGGING + logging.post_call( + input=messages, + api_key=api_key, + original_response=model_response, + ) if optional_params.get("stream", False) or acompletion == True: ## LOGGING diff --git a/litellm/tests/test_clarifai_completion.py b/litellm/tests/test_clarifai_completion.py index 2c2626398..347e513bc 100644 --- a/litellm/tests/test_clarifai_completion.py +++ b/litellm/tests/test_clarifai_completion.py @@ -1,6 +1,7 @@ import sys, os import traceback from dotenv import load_dotenv +import asyncio, logging load_dotenv() import os, io @@ -10,7 +11,7 @@ sys.path.insert( ) # Adds the parent directory to the system path import pytest import litellm -from litellm import embedding, completion, completion_cost, Timeout, ModelResponse +from litellm import embedding, completion, acompletion, acreate, completion_cost, Timeout, ModelResponse from litellm import RateLimitError # litellm.num_retries = 3 @@ -65,3 +66,28 @@ def test_completion_clarifai_mistral_large(): pass except Exception as e: pytest.fail(f"Error occurred: {e}") + +@pytest.mark.asyncio +def test_async_completion_clarifai(): + import asyncio + + litellm.set_verbose = True + + async def test_get_response(): + user_message = "Hello, how are you?" + messages = [{"content": user_message, "role": "user"}] + try: + response = await acompletion( + model="clarifai/openai.chat-completion.GPT-4", + messages=messages, + timeout=10, + api_key=os.getenv("CLARIFAI_API_KEY"), + ) + print(f"response: {response}") + except litellm.Timeout as e: + pass + except Exception as e: + pytest.fail(f"An exception occurred: {e}") + + + asyncio.run(test_get_response()) diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py index d0d8a720a..bb9e0e16b 100644 --- a/litellm/tests/test_streaming.py +++ b/litellm/tests/test_streaming.py @@ -391,8 +391,7 @@ def test_completion_claude_stream(): print(f"completion_response: {complete_response}") except Exception as e: pytest.fail(f"Error occurred: {e}") - - + # test_completion_claude_stream() def test_completion_claude_2_stream(): litellm.set_verbose = True diff --git a/litellm/utils.py b/litellm/utils.py index e5f7f9d11..56518f9f9 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2807,6 +2807,7 @@ def client(original_function): ) else: return result + return result # Prints Exactly what was passed to litellm function - don't execute any logic here - it should just print @@ -2910,6 +2911,7 @@ def client(original_function): model_response_object=ModelResponse(), stream=kwargs.get("stream", False), ) + if kwargs.get("stream", False) == True: cached_result = CustomStreamWrapper( completion_stream=cached_result, @@ -9905,6 +9907,27 @@ class CustomStreamWrapper: return {"text": "", "is_finished": False} except Exception as e: raise e + + def handle_clarifai_completion_chunk(self, chunk): + try: + if isinstance(chunk, dict): + parsed_response = chunk + if isinstance(chunk, (str, bytes)): + if isinstance(chunk, bytes): + parsed_response = chunk.decode("utf-8") + else: + parsed_response = chunk + data_json = json.loads(parsed_response) + text = data_json.get("outputs", "")[0].get("data", "").get("text", "").get("raw","") + prompt_tokens = len(encoding.encode(data_json.get("outputs", "")[0].get("input","").get("data", "").get("text", "").get("raw",""))) + completion_tokens = len(encoding.encode(text)) + return { + "text": text, + "is_finished": True, + } + except: + traceback.print_exc() + return "" def model_response_creator(self): model_response = ModelResponse(stream=True, model=self.model) @@ -9949,6 +9972,11 @@ class CustomStreamWrapper: completion_obj["content"] = response_obj["text"] if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] + elif ( + self.custom_llm_provider and self.custom_llm_provider == "clarifai" + ): + response_obj = self.handle_clarifai_completion_chunk(chunk) + completion_obj["content"] = response_obj["text"] elif self.model == "replicate" or self.custom_llm_provider == "replicate": response_obj = self.handle_replicate_chunk(chunk) completion_obj["content"] = response_obj["text"] From 540a35ed5e96ac6e0408277d68f36191a2603bf7 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 08:48:11 -0700 Subject: [PATCH 015/543] fix update router logic --- litellm/router.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/litellm/router.py b/litellm/router.py index 7acf75e8e..9638db548 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -2669,13 +2669,18 @@ class Router: "cooldown_time", ] + _existing_router_settings = self.get_settings() for var in kwargs: if var in _allowed_settings: if var in _int_settings: _casted_value = int(kwargs[var]) setattr(self, var, _casted_value) else: - if var == "routing_strategy": + # only run routing strategy init if it has changed + if ( + var == "routing_strategy" + and _existing_router_settings["routing_strategy"] != kwargs[var] + ): self.routing_strategy_init( routing_strategy=kwargs[var], routing_strategy_args=kwargs.get( From 0b729046087a3646ecf7c5573fb6d4cf861e520a Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 09:00:32 -0700 Subject: [PATCH 016/543] fix(lowest_latency.py): fix the size of the latency list to 10 by default (can be modified) --- litellm/router_strategy/lowest_latency.py | 37 +++++++- litellm/tests/test_lowest_latency_routing.py | 92 +++++++++++++++++++- 2 files changed, 124 insertions(+), 5 deletions(-) diff --git a/litellm/router_strategy/lowest_latency.py b/litellm/router_strategy/lowest_latency.py index 80dee5e67..5f0f15aac 100644 --- a/litellm/router_strategy/lowest_latency.py +++ b/litellm/router_strategy/lowest_latency.py @@ -31,6 +31,7 @@ class LiteLLMBase(BaseModel): class RoutingArgs(LiteLLMBase): ttl: int = 1 * 60 * 60 # 1 hour lowest_latency_buffer: float = 0 + max_latency_list_size: int = 10 class LowestLatencyLoggingHandler(CustomLogger): @@ -103,7 +104,18 @@ class LowestLatencyLoggingHandler(CustomLogger): request_count_dict[id] = {} ## Latency - request_count_dict[id].setdefault("latency", []).append(final_value) + if ( + len(request_count_dict[id].get("latency", [])) + < self.routing_args.max_latency_list_size + ): + request_count_dict[id].setdefault("latency", []).append(final_value) + else: + request_count_dict[id]["latency"] = request_count_dict[id][ + "latency" + ][: self.routing_args.max_latency_list_size - 1] + [final_value] + + if precise_minute not in request_count_dict[id]: + request_count_dict[id][precise_minute] = {} if precise_minute not in request_count_dict[id]: request_count_dict[id][precise_minute] = {} @@ -170,8 +182,17 @@ class LowestLatencyLoggingHandler(CustomLogger): if id not in request_count_dict: request_count_dict[id] = {} - ## Latency - request_count_dict[id].setdefault("latency", []).append(1000.0) + ## Latency - give 1000s penalty for failing + if ( + len(request_count_dict[id].get("latency", [])) + < self.routing_args.max_latency_list_size + ): + request_count_dict[id].setdefault("latency", []).append(1000.0) + else: + request_count_dict[id]["latency"] = request_count_dict[id][ + "latency" + ][: self.routing_args.max_latency_list_size - 1] + [1000.0] + self.router_cache.set_cache( key=latency_key, value=request_count_dict, @@ -242,7 +263,15 @@ class LowestLatencyLoggingHandler(CustomLogger): request_count_dict[id] = {} ## Latency - request_count_dict[id].setdefault("latency", []).append(final_value) + if ( + len(request_count_dict[id].get("latency", [])) + < self.routing_args.max_latency_list_size + ): + request_count_dict[id].setdefault("latency", []).append(final_value) + else: + request_count_dict[id]["latency"] = request_count_dict[id][ + "latency" + ][: self.routing_args.max_latency_list_size - 1] + [final_value] if precise_minute not in request_count_dict[id]: request_count_dict[id][precise_minute] = {} diff --git a/litellm/tests/test_lowest_latency_routing.py b/litellm/tests/test_lowest_latency_routing.py index 2f0aaee91..4da879208 100644 --- a/litellm/tests/test_lowest_latency_routing.py +++ b/litellm/tests/test_lowest_latency_routing.py @@ -7,7 +7,7 @@ import traceback from dotenv import load_dotenv load_dotenv() -import os +import os, copy sys.path.insert( 0, os.path.abspath("../..") @@ -20,6 +20,96 @@ from litellm.caching import DualCache ### UNIT TESTS FOR LATENCY ROUTING ### +@pytest.mark.parametrize("sync_mode", [True, False]) +@pytest.mark.asyncio +async def test_latency_memory_leak(sync_mode): + """ + Test to make sure there's no memory leak caused by lowest latency routing + + - make 10 calls -> check memory + - make 11th call -> no change in memory + """ + test_cache = DualCache() + model_list = [] + lowest_latency_logger = LowestLatencyLoggingHandler( + router_cache=test_cache, model_list=model_list + ) + model_group = "gpt-3.5-turbo" + deployment_id = "1234" + kwargs = { + "litellm_params": { + "metadata": { + "model_group": "gpt-3.5-turbo", + "deployment": "azure/chatgpt-v-2", + }, + "model_info": {"id": deployment_id}, + } + } + start_time = time.time() + response_obj = {"usage": {"total_tokens": 50}} + time.sleep(5) + end_time = time.time() + for _ in range(10): + if sync_mode: + lowest_latency_logger.log_success_event( + response_obj=response_obj, + kwargs=kwargs, + start_time=start_time, + end_time=end_time, + ) + else: + await lowest_latency_logger.async_log_success_event( + response_obj=response_obj, + kwargs=kwargs, + start_time=start_time, + end_time=end_time, + ) + latency_key = f"{model_group}_map" + cache_value = copy.deepcopy( + test_cache.get_cache(key=latency_key) + ) # MAKE SURE NO MEMORY LEAK IN CACHING OBJECT + + if sync_mode: + lowest_latency_logger.log_success_event( + response_obj=response_obj, + kwargs=kwargs, + start_time=start_time, + end_time=end_time, + ) + else: + await lowest_latency_logger.async_log_success_event( + response_obj=response_obj, + kwargs=kwargs, + start_time=start_time, + end_time=end_time, + ) + new_cache_value = test_cache.get_cache(key=latency_key) + # Assert that the size of the cache doesn't grow unreasonably + assert get_size(new_cache_value) <= get_size( + cache_value + ), f"Memory leak detected in function call! new_cache size={get_size(new_cache_value)}, old cache size={get_size(cache_value)}" + + +def get_size(obj, seen=None): + # From https://goshippo.com/blog/measure-real-size-any-python-object/ + # Recursively finds size of objects + size = sys.getsizeof(obj) + if seen is None: + seen = set() + obj_id = id(obj) + if obj_id in seen: + return 0 + seen.add(obj_id) + if isinstance(obj, dict): + size += sum([get_size(v, seen) for v in obj.values()]) + size += sum([get_size(k, seen) for k in obj.keys()]) + elif hasattr(obj, "__dict__"): + size += get_size(obj.__dict__, seen) + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)): + size += sum([get_size(i, seen) for i in obj]) + return size + + def test_latency_updated(): test_cache = DualCache() model_list = [] From a1814a3e4ccf8bcb90aebbb8d9c4cc27435e0e17 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 09:13:37 -0700 Subject: [PATCH 017/543] test - num callbacks on proxy --- tests/test_callbacks_on_proxy.py | 79 ++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 tests/test_callbacks_on_proxy.py diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py new file mode 100644 index 000000000..6effc7464 --- /dev/null +++ b/tests/test_callbacks_on_proxy.py @@ -0,0 +1,79 @@ +# What this tests ? +## Makes sure the number of callbacks on the proxy don't increase over time +## Num callbacks should be a fixed number at t=0 and t=10, t=20 +""" +PROD TEST - DO NOT Delete this Test +""" + +import pytest +import asyncio +import aiohttp +import os +import dotenv +from dotenv import load_dotenv +import pytest + +load_dotenv() + + +async def config_update(session): + url = "http://0.0.0.0:4000/config/update" + headers = {"Authorization": "Bearer sk-1234", "Content-Type": "application/json"} + data = { + "router_settings": { + "routing_strategy": ["latency-based-routing"], + }, + } + + async with session.post(url, headers=headers, json=data) as response: + status = response.status + response_text = await response.text() + + print(response_text) + print() + + if status != 200: + raise Exception(f"Request did not return a 200 status code: {status}") + return await response.json() + + +async def get_active_callbacks(session): + url = "http://0.0.0.0:4000/health/readiness" + headers = { + "Content-Type": "application/json", + } + + async with session.get(url, headers=headers) as response: + status = response.status + response_text = await response.text() + print("response from /health/readiness") + print(response_text) + print() + + if status != 200: + raise Exception(f"Request did not return a 200 status code: {status}") + + _json_response = await response.json() + + _num_callbacks = _json_response["num_callbacks"] + print("current number of callbacks: ", _num_callbacks) + return _num_callbacks + + +@pytest.mark.asyncio +async def test_add_model_run_health(): + """ """ + import uuid + + async with aiohttp.ClientSession() as session: + num_callbacks_1 = await get_active_callbacks(session=session) + + await asyncio.sleep(30) + + num_callbacks_2 = await get_active_callbacks(session=session) + + await asyncio.sleep(30) + + num_callbacks_3 = await get_active_callbacks(session=session) + + assert num_callbacks_1 == num_callbacks_2 == num_callbacks_3 From 23d334fe60d7731555365bd9f3dc68f2038388e8 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 09:14:32 -0700 Subject: [PATCH 018/543] proxy - return num callbacks on /health/readiness --- litellm/proxy/proxy_server.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 26987f478..984417757 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -9208,7 +9208,19 @@ async def health_readiness(): """ try: # get success callback + _num_callbacks = 0 + try: + _num_callbacks = ( + len(litellm.callbacks) + + len(litellm.input_callback) + + len(litellm.failure_callback) + + len(litellm.success_callback) + ) + except: + _num_callbacks = 0 + success_callback_names = [] + try: # this was returning a JSON of the values in some of the callbacks # all we need is the callback name, hence we do str(callback) @@ -9236,13 +9248,13 @@ async def health_readiness(): # check DB if prisma_client is not None: # if db passed in, check if it's connected db_health_status = _db_health_readiness_check() - return { "status": "healthy", "db": "connected", "cache": cache_type, "litellm_version": version, "success_callbacks": success_callback_names, + "num_callbacks": _num_callbacks, **db_health_status, } else: @@ -9252,6 +9264,7 @@ async def health_readiness(): "cache": cache_type, "litellm_version": version, "success_callbacks": success_callback_names, + "num_callbacks": _num_callbacks, } except Exception as e: raise HTTPException(status_code=503, detail=f"Service Unhealthy ({str(e)})") From 3677d56e9e1ed5e0b28c1efcde919ab5f79cae1a Mon Sep 17 00:00:00 2001 From: Vince Loewe Date: Fri, 3 May 2024 17:42:50 +0100 Subject: [PATCH 019/543] Lunary: Fix tool calling --- litellm/integrations/lunary.py | 36 ++++++++++++++++++++++++------ litellm/tests/test_lunary.py | 40 ++++++++++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 9 deletions(-) diff --git a/litellm/integrations/lunary.py b/litellm/integrations/lunary.py index 6ddf2ca59..6b23f0987 100644 --- a/litellm/integrations/lunary.py +++ b/litellm/integrations/lunary.py @@ -4,7 +4,6 @@ from datetime import datetime, timezone import traceback import dotenv import importlib -import sys import packaging @@ -18,13 +17,33 @@ def parse_usage(usage): "prompt": usage["prompt_tokens"] if "prompt_tokens" in usage else 0, } +def parse_tool_calls(tool_calls): + if tool_calls is None: + return None + + def clean_tool_call(tool_call): + + serialized = { + "type": tool_call.type, + "id": tool_call.id, + "function": { + "name": tool_call.function.name, + "arguments": tool_call.function.arguments, + } + } + + return serialized + + return [clean_tool_call(tool_call) for tool_call in tool_calls] + def parse_messages(input): + if input is None: return None def clean_message(message): - # if is strin, return as is + # if is string, return as is if isinstance(message, str): return message @@ -38,9 +57,7 @@ def parse_messages(input): # Only add tool_calls and function_call to res if they are set if message.get("tool_calls"): - serialized["tool_calls"] = message.get("tool_calls") - if message.get("function_call"): - serialized["function_call"] = message.get("function_call") + serialized["tool_calls"] = parse_tool_calls(message.get("tool_calls")) return serialized @@ -93,8 +110,13 @@ class LunaryLogger: print_verbose(f"Lunary Logging - Logging request for model {model}") litellm_params = kwargs.get("litellm_params", {}) + optional_params = kwargs.get("optional_params", {}) metadata = litellm_params.get("metadata", {}) or {} + if optional_params: + # merge into extra + extra = {**extra, **optional_params} + tags = litellm_params.pop("tags", None) or [] if extra: @@ -104,7 +126,7 @@ class LunaryLogger: # keep only serializable types for param, value in extra.items(): - if not isinstance(value, (str, int, bool, float)): + if not isinstance(value, (str, int, bool, float)) and param != "tools": try: extra[param] = str(value) except: @@ -140,7 +162,7 @@ class LunaryLogger: metadata=metadata, runtime="litellm", tags=tags, - extra=extra, + params=extra, ) self.lunary_client.track_event( diff --git a/litellm/tests/test_lunary.py b/litellm/tests/test_lunary.py index cbf9364af..c9a8afd57 100644 --- a/litellm/tests/test_lunary.py +++ b/litellm/tests/test_lunary.py @@ -11,7 +11,6 @@ litellm.failure_callback = ["lunary"] litellm.success_callback = ["lunary"] litellm.set_verbose = True - def test_lunary_logging(): try: response = completion( @@ -59,9 +58,46 @@ def test_lunary_logging_with_metadata(): except Exception as e: print(e) +#test_lunary_logging_with_metadata() -# test_lunary_logging_with_metadata() +def test_lunary_with_tools(): + import litellm + + messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}] + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + }, + } + ] + + response = litellm.completion( + model="gpt-3.5-turbo-1106", + messages=messages, + tools=tools, + tool_choice="auto", # auto is default, but we'll be explicit + ) + + response_message = response.choices[0].message + print("\nLLM Response:\n", response.choices[0].message) + + +#test_lunary_with_tools() def test_lunary_logging_with_streaming_and_metadata(): try: From fe6e46546649f7368510ae0ba984e83cb16c75ae Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 09:59:59 -0700 Subject: [PATCH 020/543] test - num callbacks on proxy should not increase --- tests/test_callbacks_on_proxy.py | 79 ++++++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 4 deletions(-) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index 6effc7464..01183cf6e 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -16,12 +16,13 @@ import pytest load_dotenv() -async def config_update(session): +async def config_update(session, routing_strategy=None): url = "http://0.0.0.0:4000/config/update" headers = {"Authorization": "Bearer sk-1234", "Content-Type": "application/json"} + print("routing_strategy: ", routing_strategy) data = { "router_settings": { - "routing_strategy": ["latency-based-routing"], + "routing_strategy": routing_strategy, }, } @@ -60,9 +61,41 @@ async def get_active_callbacks(session): return _num_callbacks +async def get_current_routing_strategy(session): + url = "http://0.0.0.0:4000/get/config/callbacks" + headers = { + "Content-Type": "application/json", + "Authorization": "Bearer sk-1234", + } + + async with session.get(url, headers=headers) as response: + status = response.status + response_text = await response.text() + print(response_text) + print() + + if status != 200: + raise Exception(f"Request did not return a 200 status code: {status}") + + _json_response = await response.json() + print("JSON response: ", _json_response) + + router_settings = _json_response["router_settings"] + print("Router settings: ", router_settings) + routing_strategy = router_settings["routing_strategy"] + return routing_strategy + + @pytest.mark.asyncio -async def test_add_model_run_health(): - """ """ +async def test_check_num_callbacks(): + """ + Test 1: num callbacks should NOT increase over time + -> check current callbacks + -> sleep for 30s + -> check current callbacks + -> sleep for 30s + -> check current callbacks + """ import uuid async with aiohttp.ClientSession() as session: @@ -72,8 +105,46 @@ async def test_add_model_run_health(): num_callbacks_2 = await get_active_callbacks(session=session) + assert num_callbacks_1 == num_callbacks_2 + await asyncio.sleep(30) num_callbacks_3 = await get_active_callbacks(session=session) assert num_callbacks_1 == num_callbacks_2 == num_callbacks_3 + + +@pytest.mark.asyncio +async def test_check_num_callbacks_on_lowest_latency(): + """ + Test 1: num callbacks should NOT increase over time + -> Update to lowest latency + -> check current callbacks + -> sleep for 30s + -> check current callbacks + -> sleep for 30s + -> check current callbacks + -> update back to original routing-strategy + """ + import uuid + + async with aiohttp.ClientSession() as session: + + original_routing_strategy = await get_current_routing_strategy(session=session) + await config_update(session=session, routing_strategy="latency-based-routing") + + num_callbacks_1 = await get_active_callbacks(session=session) + + await asyncio.sleep(30) + + num_callbacks_2 = await get_active_callbacks(session=session) + + assert num_callbacks_1 == num_callbacks_2 + + await asyncio.sleep(30) + + num_callbacks_3 = await get_active_callbacks(session=session) + + assert num_callbacks_1 == num_callbacks_2 == num_callbacks_3 + + await config_update(session=session, routing_strategy=original_routing_strategy) From 9ba5685722c1858437268f5d01b400727a974d80 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 10:05:06 -0700 Subject: [PATCH 021/543] test active callbacks on proxy --- tests/test_callbacks_on_proxy.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index 01183cf6e..a4d31587d 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -100,6 +100,9 @@ async def test_check_num_callbacks(): async with aiohttp.ClientSession() as session: num_callbacks_1 = await get_active_callbacks(session=session) + assert ( + num_callbacks_1 > 0 + ) # /health/readiness returns 0 when some calculation goes wrong await asyncio.sleep(30) @@ -134,6 +137,9 @@ async def test_check_num_callbacks_on_lowest_latency(): await config_update(session=session, routing_strategy="latency-based-routing") num_callbacks_1 = await get_active_callbacks(session=session) + assert ( + num_callbacks_1 > 0 + ) # /health/readiness returns 0 when some calculation goes wrong await asyncio.sleep(30) From 2dd9d2f704028be562f7fd1cbd4709300e3f5c47 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 10:09:57 -0700 Subject: [PATCH 022/543] test(test_amazing_vertex_completion.py): try-except api errors --- .../tests/test_amazing_vertex_completion.py | 36 ------------------- litellm/utils.py | 21 +++++++---- 2 files changed, 15 insertions(+), 42 deletions(-) diff --git a/litellm/tests/test_amazing_vertex_completion.py b/litellm/tests/test_amazing_vertex_completion.py index 05eece834..1d79653ea 100644 --- a/litellm/tests/test_amazing_vertex_completion.py +++ b/litellm/tests/test_amazing_vertex_completion.py @@ -548,42 +548,6 @@ def test_gemini_pro_vision_base64(): def test_gemini_pro_function_calling(): - load_vertex_ai_credentials() - tools = [ - { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, - }, - "required": ["location"], - }, - }, - } - ] - - messages = [ - { - "role": "user", - "content": "What's the weather like in Boston today in fahrenheit?", - } - ] - completion = litellm.completion( - model="gemini-pro", messages=messages, tools=tools, tool_choice="auto" - ) - print(f"completion: {completion}") - if hasattr(completion.choices[0].message, "tool_calls") and isinstance( - completion.choices[0].message.tool_calls, list - ): - assert len(completion.choices[0].message.tool_calls) == 1 try: load_vertex_ai_credentials() tools = [ diff --git a/litellm/utils.py b/litellm/utils.py index ec296e9dc..80d26f58b 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -3974,12 +3974,10 @@ def calculage_img_tokens( tile_tokens = (base_tokens * 2) * tiles_needed_high_res total_tokens = base_tokens + tile_tokens return total_tokens - + def create_pretrained_tokenizer( - identifier: str, - revision="main", - auth_token: Optional[str] = None + identifier: str, revision="main", auth_token: Optional[str] = None ): """ Creates a tokenizer from an existing file on a HuggingFace repository to be used with `token_counter`. @@ -3993,7 +3991,9 @@ def create_pretrained_tokenizer( dict: A dictionary with the tokenizer and its type. """ - tokenizer = Tokenizer.from_pretrained(identifier, revision=revision, auth_token=auth_token) + tokenizer = Tokenizer.from_pretrained( + identifier, revision=revision, auth_token=auth_token + ) return {"type": "huggingface_tokenizer", "tokenizer": tokenizer} @@ -9001,7 +9001,16 @@ def exception_type( request=original_exception.request, ) elif custom_llm_provider == "azure": - if "This model's maximum context length is" in error_str: + if "Internal server error" in error_str: + exception_mapping_worked = True + raise APIError( + status_code=500, + message=f"AzureException - {original_exception.message}", + llm_provider="azure", + model=model, + request=httpx.Request(method="POST", url="https://openai.com/"), + ) + elif "This model's maximum context length is" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( message=f"AzureException - {original_exception.message}", From e7405f105c886bad2ad6ed7a1eed0b47af29dcd0 Mon Sep 17 00:00:00 2001 From: Lunik Date: Fri, 3 May 2024 20:50:37 +0200 Subject: [PATCH 023/543] =?UTF-8?q?=E2=9C=85=20ci:=20Add=20tests?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- litellm/proxy/hooks/azure_content_safety.py | 8 +- litellm/tests/test_azure_content_safety.py | 267 ++++++++++++++++++++ 2 files changed, 271 insertions(+), 4 deletions(-) create mode 100644 litellm/tests/test_azure_content_safety.py diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py index 161e35cde..d0d23363b 100644 --- a/litellm/proxy/hooks/azure_content_safety.py +++ b/litellm/proxy/hooks/azure_content_safety.py @@ -36,10 +36,10 @@ class _PROXY_AzureContentSafety( def _configure_thresholds(self, thresholds=None): default_thresholds = { - self.text_category.HATE: 6, - self.text_category.SELF_HARM: 6, - self.text_category.SEXUAL: 6, - self.text_category.VIOLENCE: 6, + self.text_category.HATE: 4, + self.text_category.SELF_HARM: 4, + self.text_category.SEXUAL: 4, + self.text_category.VIOLENCE: 4, } if thresholds is None: diff --git a/litellm/tests/test_azure_content_safety.py b/litellm/tests/test_azure_content_safety.py new file mode 100644 index 000000000..f7d9d8268 --- /dev/null +++ b/litellm/tests/test_azure_content_safety.py @@ -0,0 +1,267 @@ +# What is this? +## Unit test for azure content safety +import sys, os, asyncio, time, random +from datetime import datetime +import traceback +from dotenv import load_dotenv +from fastapi import HTTPException + +load_dotenv() +import os + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import pytest +import litellm +from litellm.proxy.hooks.azure_content_safety import _PROXY_AzureContentSafety +from litellm import Router, mock_completion +from litellm.proxy.utils import ProxyLogging +from litellm.proxy._types import UserAPIKeyAuth +from litellm.caching import DualCache + + +@pytest.mark.asyncio +async def test_strict_input_filtering_01(): + """ + - have a response with a filtered input + - call the pre call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 2}, + ) + + data = { + "messages": [ + {"role": "system", "content": "You are an helpfull assistant"}, + {"role": "user", "content": "Fuck yourself you stupid bitch"}, + ] + } + + with pytest.raises(HTTPException) as exc_info: + await azure_content_safety.async_pre_call_hook( + user_api_key_dict=UserAPIKeyAuth(), + cache=DualCache(), + data=data, + call_type="completion", + ) + + assert exc_info.value.detail["source"] == "input" + assert exc_info.value.detail["category"] == "Hate" + assert exc_info.value.detail["severity"] == "low" + + +@pytest.mark.asyncio +async def test_strict_input_filtering_02(): + """ + - have a response with a filtered input + - call the pre call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 2}, + ) + + data = { + "messages": [ + {"role": "system", "content": "You are an helpfull assistant"}, + {"role": "user", "content": "Hello how are you ?"}, + ] + } + + await azure_content_safety.async_pre_call_hook( + user_api_key_dict=UserAPIKeyAuth(), + cache=DualCache(), + data=data, + call_type="completion", + ) + + +@pytest.mark.asyncio +async def test_loose_input_filtering_01(): + """ + - have a response with a filtered input + - call the pre call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 8}, + ) + + data = { + "messages": [ + {"role": "system", "content": "You are an helpfull assistant"}, + {"role": "user", "content": "Fuck yourself you stupid bitch"}, + ] + } + + await azure_content_safety.async_pre_call_hook( + user_api_key_dict=UserAPIKeyAuth(), + cache=DualCache(), + data=data, + call_type="completion", + ) + + +@pytest.mark.asyncio +async def test_loose_input_filtering_02(): + """ + - have a response with a filtered input + - call the pre call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 8}, + ) + + data = { + "messages": [ + {"role": "system", "content": "You are an helpfull assistant"}, + {"role": "user", "content": "Hello how are you ?"}, + ] + } + + await azure_content_safety.async_pre_call_hook( + user_api_key_dict=UserAPIKeyAuth(), + cache=DualCache(), + data=data, + call_type="completion", + ) + + +@pytest.mark.asyncio +async def test_strict_output_filtering_01(): + """ + - have a response with a filtered output + - call the post call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 2}, + ) + + response = mock_completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": "You are a song writer expert. You help users to write songs about any topic in any genre.", + }, + { + "role": "user", + "content": "Help me write a rap text song. Add some insults to make it more credible.", + }, + ], + mock_response="I'm the king of the mic, you're just a fucking dick. Don't fuck with me your stupid bitch.", + ) + + with pytest.raises(HTTPException) as exc_info: + await azure_content_safety.async_post_call_success_hook( + user_api_key_dict=UserAPIKeyAuth(), response=response + ) + + assert exc_info.value.detail["source"] == "output" + assert exc_info.value.detail["category"] == "Hate" + assert exc_info.value.detail["severity"] == "low" + + +@pytest.mark.asyncio +async def test_strict_output_filtering_02(): + """ + - have a response with a filtered output + - call the post call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 2}, + ) + + response = mock_completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": "You are a song writer expert. You help users to write songs about any topic in any genre.", + }, + { + "role": "user", + "content": "Help me write a rap text song. Add some insults to make it more credible.", + }, + ], + mock_response="I'm unable to help with you with hate speech", + ) + + await azure_content_safety.async_post_call_success_hook( + user_api_key_dict=UserAPIKeyAuth(), response=response + ) + + +@pytest.mark.asyncio +async def test_loose_output_filtering_01(): + """ + - have a response with a filtered output + - call the post call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 8}, + ) + + response = mock_completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": "You are a song writer expert. You help users to write songs about any topic in any genre.", + }, + { + "role": "user", + "content": "Help me write a rap text song. Add some insults to make it more credible.", + }, + ], + mock_response="I'm the king of the mic, you're just a fucking dick. Don't fuck with me your stupid bitch.", + ) + + await azure_content_safety.async_post_call_success_hook( + user_api_key_dict=UserAPIKeyAuth(), response=response + ) + + +@pytest.mark.asyncio +async def test_loose_output_filtering_02(): + """ + - have a response with a filtered output + - call the post call hook + """ + azure_content_safety = _PROXY_AzureContentSafety( + endpoint=os.getenv("AZURE_CONTENT_SAFETY_ENDPOINT"), + api_key=os.getenv("AZURE_CONTENT_SAFETY_API_KEY"), + thresholds={"Hate": 8}, + ) + + response = mock_completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": "You are a song writer expert. You help users to write songs about any topic in any genre.", + }, + { + "role": "user", + "content": "Help me write a rap text song. Add some insults to make it more credible.", + }, + ], + mock_response="I'm unable to help with you with hate speech", + ) + + await azure_content_safety.async_post_call_success_hook( + user_api_key_dict=UserAPIKeyAuth(), response=response + ) From 9ba9b3891fdba07d040808eaf25d7cf732027565 Mon Sep 17 00:00:00 2001 From: Lunik Date: Fri, 3 May 2024 20:51:40 +0200 Subject: [PATCH 024/543] =?UTF-8?q?=E2=9A=A1=EF=B8=8F=20perf:=20Remove=20t?= =?UTF-8?q?est=20violation=20on=20each=20stream=20chunk?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lunik --- litellm/proxy/hooks/azure_content_safety.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py index d0d23363b..2cea05c69 100644 --- a/litellm/proxy/hooks/azure_content_safety.py +++ b/litellm/proxy/hooks/azure_content_safety.py @@ -148,10 +148,10 @@ class _PROXY_AzureContentSafety( content=response.choices[0].message.content, source="output" ) - async def async_post_call_streaming_hook( - self, - user_api_key_dict: UserAPIKeyAuth, - response: str, - ): - self.print_verbose(f"Inside Azure Content-Safety Call-Stream Hook") - await self.test_violation(content=response, source="output") + #async def async_post_call_streaming_hook( + # self, + # user_api_key_dict: UserAPIKeyAuth, + # response: str, + #): + # self.print_verbose(f"Inside Azure Content-Safety Call-Stream Hook") + # await self.test_violation(content=response, source="output") From defc205348713c77861d487357ba849e4c700c47 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 12:39:21 -0700 Subject: [PATCH 025/543] test(test_alangfuse.py): fix test --- litellm/tests/test_alangfuse.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/litellm/tests/test_alangfuse.py b/litellm/tests/test_alangfuse.py index 29718d474..fd968c6a7 100644 --- a/litellm/tests/test_alangfuse.py +++ b/litellm/tests/test_alangfuse.py @@ -559,7 +559,15 @@ def test_langfuse_existing_trace_id(): new_langfuse_trace = langfuse_client.get_trace(id=trace_id) - assert dict(initial_langfuse_trace) == dict(new_langfuse_trace) + initial_langfuse_trace_dict = dict(initial_langfuse_trace) + initial_langfuse_trace_dict.pop("updatedAt") + initial_langfuse_trace_dict.pop("timestamp") + + new_langfuse_trace_dict = dict(new_langfuse_trace) + new_langfuse_trace_dict.pop("updatedAt") + new_langfuse_trace_dict.pop("timestamp") + + assert initial_langfuse_trace_dict == new_langfuse_trace_dict def test_langfuse_logging_tool_calling(): From b2a0502383fbcda2ef00afbe9aae319ef3a2f1f5 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 12:42:20 -0700 Subject: [PATCH 026/543] =?UTF-8?q?bump:=20version=201.35.36=20=E2=86=92?= =?UTF-8?q?=201.35.37?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 72651f148..b0caa9089 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.35.36" +version = "1.35.37" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -80,7 +80,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.35.36" +version = "1.35.37" version_files = [ "pyproject.toml:^version" ] From fdc9856652364d138deeaade517ae36eecd787e5 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 13:33:43 -0700 Subject: [PATCH 027/543] UI - set DB Exceptions webhook_url --- ui/litellm-dashboard/src/components/settings.tsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ui/litellm-dashboard/src/components/settings.tsx b/ui/litellm-dashboard/src/components/settings.tsx index 53de36286..092f7bb14 100644 --- a/ui/litellm-dashboard/src/components/settings.tsx +++ b/ui/litellm-dashboard/src/components/settings.tsx @@ -106,7 +106,8 @@ const Settings: React.FC = ({ "llm_exceptions": "LLM Exceptions", "llm_too_slow": "LLM Responses Too Slow", "llm_requests_hanging": "LLM Requests Hanging", - "budget_alerts": "Budget Alerts (API Keys, Users)" + "budget_alerts": "Budget Alerts (API Keys, Users)", + "db_exceptions": "Database Exceptions (Read/Write)", } useEffect(() => { From 776f541f6ceca750383ecb399083e175c192c1cb Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:04:38 -0700 Subject: [PATCH 028/543] fix bug where slack would get inserting several times --- litellm/proxy/proxy_server.py | 40 +++++++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 26987f478..763094fde 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -8708,11 +8708,11 @@ async def update_config(config_info: ConfigYAML): # overwrite existing settings with updated values if k == "alert_to_webhook_url": # check if slack is already enabled. if not, enable it - if "slack" not in _existing_settings: - if "alerting" not in _existing_settings: + if "alerting" not in _existing_settings: + _existing_settings["alerting"] = ["slack"] + elif isinstance(_existing_settings["alerting"], list): + if "slack" not in _existing_settings["alerting"]: _existing_settings["alerting"] = ["slack"] - elif isinstance(_existing_settings["alerting"], list): - _existing_settings["alerting"].append("slack") _existing_settings[k] = v config["general_settings"] = _existing_settings @@ -9197,6 +9197,37 @@ def _db_health_readiness_check(): return db_health_cache +@router.get( + "/active/callbacks", + tags=["health"], + dependencies=[Depends(user_api_key_auth)], +) +async def active_callbacks(): + _alerting = str(general_settings.get("alerting")) + # get success callback + success_callback_names = [] + try: + # this was returning a JSON of the values in some of the callbacks + # all we need is the callback name, hence we do str(callback) + success_callback_names = [str(x) for x in litellm.success_callback] + except: + # don't let this block the /health/readiness response, if we can't convert to str -> return litellm.success_callback + success_callback_names = litellm.success_callback + + _num_callbacks = ( + len(litellm.callbacks) + + len(litellm.input_callback) + + len(litellm.failure_callback) + + len(litellm.success_callback) + ) + + return { + "alerting": _alerting, + "success_callbacks": success_callback_names, + "num_callbacks": _num_callbacks, + } + + @router.get( "/health/readiness", tags=["health"], @@ -9206,6 +9237,7 @@ async def health_readiness(): """ Unprotected endpoint for checking if worker can receive requests """ + global general_settings try: # get success callback success_callback_names = [] From e9eead2f257fd739ec8b7c371ca4994d9780d9a7 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:22:15 -0700 Subject: [PATCH 029/543] test - size of callbacks, alerts --- tests/test_callbacks_on_proxy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index a4d31587d..29ca62b80 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -39,7 +39,7 @@ async def config_update(session, routing_strategy=None): async def get_active_callbacks(session): - url = "http://0.0.0.0:4000/health/readiness" + url = "http://0.0.0.0:4000/active/callbacks" headers = { "Content-Type": "application/json", } @@ -47,7 +47,7 @@ async def get_active_callbacks(session): async with session.get(url, headers=headers) as response: status = response.status response_text = await response.text() - print("response from /health/readiness") + print("response from /active/callbacks") print(response_text) print() From 3997ea64427c0f1396d6f12782629745242f4be3 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:24:01 -0700 Subject: [PATCH 030/543] fix - return num callbacks in /active/callbacks --- litellm/proxy/proxy_server.py | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index f5c234963..c9d4d3247 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -9203,6 +9203,10 @@ def _db_health_readiness_check(): dependencies=[Depends(user_api_key_auth)], ) async def active_callbacks(): + """ + Returns a list of active callbacks on litellm.callbacks, litellm.input_callback, litellm.failure_callback, litellm.success_callback + """ + global proxy_logging_obj _alerting = str(general_settings.get("alerting")) # get success callback success_callback_names = [] @@ -9219,12 +9223,27 @@ async def active_callbacks(): + len(litellm.input_callback) + len(litellm.failure_callback) + len(litellm.success_callback) + + len(litellm._async_failure_callback) + + len(litellm._async_success_callback) + + len(litellm._async_input_callback) ) + alerting = proxy_logging_obj.alerting + _num_alerting = 0 + if alerting and isinstance(alerting, list): + _num_alerting = len(alerting) + return { "alerting": _alerting, - "success_callbacks": success_callback_names, + "litellm.callbacks": litellm.callbacks, + "litellm.input_callback": litellm.input_callback, + "litellm.failure_callback": litellm.failure_callback, + "litellm.success_callback": success_callback_names, + "litellm._async_success_callback": litellm._async_success_callback, + "litellm._async_failure_callback": litellm._async_failure_callback, + "litellm._async_input_callback": litellm._async_input_callback, "num_callbacks": _num_callbacks, + "num_alerting": _num_alerting, } @@ -9240,17 +9259,6 @@ async def health_readiness(): global general_settings try: # get success callback - _num_callbacks = 0 - try: - _num_callbacks = ( - len(litellm.callbacks) - + len(litellm.input_callback) - + len(litellm.failure_callback) - + len(litellm.success_callback) - ) - except: - _num_callbacks = 0 - success_callback_names = [] try: @@ -9286,7 +9294,6 @@ async def health_readiness(): "cache": cache_type, "litellm_version": version, "success_callbacks": success_callback_names, - "num_callbacks": _num_callbacks, **db_health_status, } else: @@ -9296,7 +9303,6 @@ async def health_readiness(): "cache": cache_type, "litellm_version": version, "success_callbacks": success_callback_names, - "num_callbacks": _num_callbacks, } except Exception as e: raise HTTPException(status_code=503, detail=f"Service Unhealthy ({str(e)})") From e5311d35f24a172f59a8f1cea438d5b5109cacfd Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:32:13 -0700 Subject: [PATCH 031/543] fix test len active callbacks --- tests/test_callbacks_on_proxy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index 29ca62b80..b5de5df25 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -42,6 +42,7 @@ async def get_active_callbacks(session): url = "http://0.0.0.0:4000/active/callbacks" headers = { "Content-Type": "application/json", + "Authorization": "Bearer sk-1234", } async with session.get(url, headers=headers) as response: From a369867e0208dbd356a2acdffaa79a110e02a28b Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:46:44 -0700 Subject: [PATCH 032/543] test - num alerts on callbacks --- tests/test_callbacks_on_proxy.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index b5de5df25..c10b18ed1 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -24,6 +24,12 @@ async def config_update(session, routing_strategy=None): "router_settings": { "routing_strategy": routing_strategy, }, + "general_settings": { + "alert_to_webhook_url": { + "llm_exceptions": "https://hooks.slack.com/services/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" + }, + "alert_types": ["llm_exceptions", "db_exceptions"], + }, } async with session.post(url, headers=headers, json=data) as response: @@ -58,8 +64,10 @@ async def get_active_callbacks(session): _json_response = await response.json() _num_callbacks = _json_response["num_callbacks"] + _num_alerts = _json_response["num_alerting"] print("current number of callbacks: ", _num_callbacks) - return _num_callbacks + print("current number of alerts: ", _num_alerts) + return _num_callbacks, _num_alerts async def get_current_routing_strategy(session): @@ -100,20 +108,20 @@ async def test_check_num_callbacks(): import uuid async with aiohttp.ClientSession() as session: - num_callbacks_1 = await get_active_callbacks(session=session) + num_callbacks_1, _ = await get_active_callbacks(session=session) assert ( num_callbacks_1 > 0 ) # /health/readiness returns 0 when some calculation goes wrong await asyncio.sleep(30) - num_callbacks_2 = await get_active_callbacks(session=session) + num_callbacks_2, _ = await get_active_callbacks(session=session) assert num_callbacks_1 == num_callbacks_2 await asyncio.sleep(30) - num_callbacks_3 = await get_active_callbacks(session=session) + num_callbacks_3, _ = await get_active_callbacks(session=session) assert num_callbacks_1 == num_callbacks_2 == num_callbacks_3 @@ -137,21 +145,23 @@ async def test_check_num_callbacks_on_lowest_latency(): original_routing_strategy = await get_current_routing_strategy(session=session) await config_update(session=session, routing_strategy="latency-based-routing") - num_callbacks_1 = await get_active_callbacks(session=session) + num_callbacks_1, num_alerts_1 = await get_active_callbacks(session=session) assert ( num_callbacks_1 > 0 ) # /health/readiness returns 0 when some calculation goes wrong await asyncio.sleep(30) - num_callbacks_2 = await get_active_callbacks(session=session) + num_callbacks_2, num_alerts_2 = await get_active_callbacks(session=session) assert num_callbacks_1 == num_callbacks_2 await asyncio.sleep(30) - num_callbacks_3 = await get_active_callbacks(session=session) + num_callbacks_3, num_alerts_3 = await get_active_callbacks(session=session) assert num_callbacks_1 == num_callbacks_2 == num_callbacks_3 + assert num_alerts_1 == num_alerts_2 == num_alerts_3 + await config_update(session=session, routing_strategy=original_routing_strategy) From ab27866b6a025680188efab51d17aa9274d3f86e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:58:11 -0700 Subject: [PATCH 033/543] fix test slack alerting len --- litellm/proxy/proxy_server.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index c9d4d3247..dfc6db6ba 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -9235,13 +9235,19 @@ async def active_callbacks(): return { "alerting": _alerting, - "litellm.callbacks": litellm.callbacks, - "litellm.input_callback": litellm.input_callback, - "litellm.failure_callback": litellm.failure_callback, - "litellm.success_callback": success_callback_names, - "litellm._async_success_callback": litellm._async_success_callback, - "litellm._async_failure_callback": litellm._async_failure_callback, - "litellm._async_input_callback": litellm._async_input_callback, + "litellm.callbacks": [str(x) for x in litellm.callbacks], + "litellm.input_callback": [str(x) for x in litellm.input_callback], + "litellm.failure_callback": [str(x) for x in litellm.failure_callback], + "litellm.success_callback": [str(x) for x in litellm.success_callback], + "litellm._async_success_callback": [ + str(x) for x in litellm._async_success_callback + ], + "litellm._async_failure_callback": [ + str(x) for x in litellm._async_failure_callback + ], + "litellm._async_input_callback": [ + str(x) for x in litellm._async_input_callback + ], "num_callbacks": _num_callbacks, "num_alerting": _num_alerting, } From bae5d3601db52be88a97e6e90b97951387b12094 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 15:08:55 -0700 Subject: [PATCH 034/543] feat - add unit tests for slack alerting --- litellm/tests/test_alerting.py | 134 +++++++++++++++++++-------------- 1 file changed, 78 insertions(+), 56 deletions(-) diff --git a/litellm/tests/test_alerting.py b/litellm/tests/test_alerting.py index a74e25910..5d6e068b7 100644 --- a/litellm/tests/test_alerting.py +++ b/litellm/tests/test_alerting.py @@ -3,7 +3,7 @@ import sys import os -import io, asyncio, httpx +import io, asyncio from datetime import datetime, timedelta # import logging @@ -17,61 +17,6 @@ import asyncio from unittest.mock import patch, MagicMock from litellm.caching import DualCache from litellm.integrations.slack_alerting import SlackAlerting -from litellm.proxy._types import UserAPIKeyAuth -from litellm.proxy.proxy_server import HTTPException - - -@pytest.mark.parametrize("exception_type", ["llm-exception", "non-llm-exception"]) -@pytest.mark.asyncio -async def test_slack_alerting_llm_exceptions(exception_type, monkeypatch): - """ - Test if non-llm exception -> No request - Test if llm exception -> Request triggered - """ - _pl = ProxyLogging(user_api_key_cache=DualCache()) - _pl.update_values( - alerting=["slack"], - alerting_threshold=100, - redis_cache=None, - alert_types=["llm_exceptions"], - ) - - async def mock_alerting_handler(message, level, alert_type): - global exception_type - - if exception_type == "llm-exception": - pass - elif exception_type == "non-llm-exception": - pytest.fail("Function should not have been called") - - monkeypatch.setattr(_pl, "alerting_handler", mock_alerting_handler) - - if exception_type == "llm-exception": - await _pl.post_call_failure_hook( - original_exception=litellm.APIError( - status_code=500, - message="This is a test exception", - llm_provider="openai", - model="gpt-3.5-turbo", - request=httpx.Request( - method="completion", url="https://github.com/BerriAI/litellm" - ), - ), - user_api_key_dict=UserAPIKeyAuth(), - ) - - await asyncio.sleep(2) - - elif exception_type == "non-llm-exception": - await _pl.post_call_failure_hook( - original_exception=HTTPException( - status_code=400, - detail={"error": "this is a test exception"}, - ), - user_api_key_dict=UserAPIKeyAuth(), - ) - - await asyncio.sleep(2) @pytest.mark.asyncio @@ -149,3 +94,80 @@ def test_init(): assert slack_no_alerting.alerting == [] print("passed testing slack alerting init") + + +from unittest.mock import patch, AsyncMock +from datetime import datetime, timedelta + + +@pytest.fixture +def slack_alerting(): + return SlackAlerting(alerting_threshold=1) + + +# Test for hanging LLM responses +@pytest.mark.asyncio +async def test_response_taking_too_long_hanging(slack_alerting): + request_data = { + "model": "test_model", + "messages": "test_messages", + "litellm_status": "running", + } + with patch.object(slack_alerting, "send_alert", new=AsyncMock()) as mock_send_alert: + await slack_alerting.response_taking_too_long( + type="hanging_request", request_data=request_data + ) + mock_send_alert.assert_awaited_once() + + +# Test for slow LLM responses +@pytest.mark.asyncio +async def test_response_taking_too_long_callback(slack_alerting): + start_time = datetime.now() + end_time = start_time + timedelta(seconds=301) + kwargs = {"model": "test_model", "messages": "test_messages", "litellm_params": {}} + with patch.object(slack_alerting, "send_alert", new=AsyncMock()) as mock_send_alert: + await slack_alerting.response_taking_too_long_callback( + kwargs, None, start_time, end_time + ) + mock_send_alert.assert_awaited_once() + + +# Test for budget crossed +@pytest.mark.asyncio +async def test_budget_alerts_crossed(slack_alerting): + user_max_budget = 100 + user_current_spend = 101 + with patch.object(slack_alerting, "send_alert", new=AsyncMock()) as mock_send_alert: + await slack_alerting.budget_alerts( + "user_budget", user_max_budget, user_current_spend + ) + mock_send_alert.assert_awaited_once() + + +# Test for budget crossed again (should not fire alert 2nd time) +@pytest.mark.asyncio +async def test_budget_alerts_crossed_again(slack_alerting): + user_max_budget = 100 + user_current_spend = 101 + with patch.object(slack_alerting, "send_alert", new=AsyncMock()) as mock_send_alert: + await slack_alerting.budget_alerts( + "user_budget", user_max_budget, user_current_spend + ) + mock_send_alert.assert_awaited_once() + mock_send_alert.reset_mock() + await slack_alerting.budget_alerts( + "user_budget", user_max_budget, user_current_spend + ) + mock_send_alert.assert_not_awaited() + + +# Test for send_alert - should be called once +@pytest.mark.asyncio +async def test_send_alert(slack_alerting): + with patch.object( + slack_alerting.async_http_handler, "post", new=AsyncMock() + ) as mock_post: + mock_post.return_value.status_code = 200 + await slack_alerting.send_alert("Test message", "Low", "budget_alerts") + mock_post.assert_awaited_once() From 5b39f8e282e399726849ae56169610cacee31213 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 15:27:32 -0700 Subject: [PATCH 035/543] feat(proxy_server.py): return api base in response headers Closes https://github.com/BerriAI/litellm/issues/2631 --- litellm/proxy/proxy_server.py | 3 ++ litellm/tests/test_alerting.py | 14 ++++++++ litellm/types/router.py | 2 ++ litellm/utils.py | 59 +++++++++++++++++++++++++++------- 4 files changed, 67 insertions(+), 11 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 26987f478..eb349b2d5 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3658,6 +3658,7 @@ async def chat_completion( hidden_params = getattr(response, "_hidden_params", {}) or {} model_id = hidden_params.get("model_id", None) or "" cache_key = hidden_params.get("cache_key", None) or "" + api_base = hidden_params.get("api_base", None) or "" # Post Call Processing if llm_router is not None: @@ -3670,6 +3671,7 @@ async def chat_completion( custom_headers = { "x-litellm-model-id": model_id, "x-litellm-cache-key": cache_key, + "x-litellm-model-api-base": api_base, } selected_data_generator = select_data_generator( response=response, user_api_key_dict=user_api_key_dict @@ -3682,6 +3684,7 @@ async def chat_completion( fastapi_response.headers["x-litellm-model-id"] = model_id fastapi_response.headers["x-litellm-cache-key"] = cache_key + fastapi_response.headers["x-litellm-model-api-base"] = api_base ### CALL HOOKS ### - modify outgoing data response = await proxy_logging_obj.post_call_success_hook( diff --git a/litellm/tests/test_alerting.py b/litellm/tests/test_alerting.py index a74e25910..40c75b86b 100644 --- a/litellm/tests/test_alerting.py +++ b/litellm/tests/test_alerting.py @@ -15,6 +15,7 @@ import litellm import pytest import asyncio from unittest.mock import patch, MagicMock +from litellm.utils import get_api_base from litellm.caching import DualCache from litellm.integrations.slack_alerting import SlackAlerting from litellm.proxy._types import UserAPIKeyAuth @@ -74,6 +75,19 @@ async def test_slack_alerting_llm_exceptions(exception_type, monkeypatch): await asyncio.sleep(2) +@pytest.mark.parametrize( + "model, optional_params, expected_api_base", + [ + ("openai/my-fake-model", {"api_base": "my-fake-api-base"}, "my-fake-api-base"), + ("gpt-3.5-turbo", {}, "https://api.openai.com"), + ], +) +def test_get_api_base_unit_test(model, optional_params, expected_api_base): + api_base = get_api_base(model=model, optional_params=optional_params) + + assert api_base == expected_api_base + + @pytest.mark.asyncio async def test_get_api_base(): _pl = ProxyLogging(user_api_key_cache=DualCache()) diff --git a/litellm/types/router.py b/litellm/types/router.py index 64b71b999..068a99b00 100644 --- a/litellm/types/router.py +++ b/litellm/types/router.py @@ -99,6 +99,7 @@ class ModelInfo(BaseModel): class LiteLLM_Params(BaseModel): model: str + custom_llm_provider: Optional[str] = None tpm: Optional[int] = None rpm: Optional[int] = None api_key: Optional[str] = None @@ -123,6 +124,7 @@ class LiteLLM_Params(BaseModel): def __init__( self, model: str, + custom_llm_provider: Optional[str] = None, max_retries: Optional[Union[int, str]] = None, tpm: Optional[int] = None, rpm: Optional[int] = None, diff --git a/litellm/utils.py b/litellm/utils.py index 80d26f58b..589ea4d07 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -315,6 +315,7 @@ class ChatCompletionDeltaToolCall(OpenAIObject): class HiddenParams(OpenAIObject): original_response: Optional[str] = None model_id: Optional[str] = None # used in Router for individual deployments + api_base: Optional[str] = None # returns api base used for making completion call class Config: extra = "allow" @@ -3157,6 +3158,10 @@ def client(original_function): result._hidden_params["model_id"] = kwargs.get("model_info", {}).get( "id", None ) + result._hidden_params["api_base"] = get_api_base( + model=model, + optional_params=getattr(logging_obj, "optional_params", {}), + ) result._response_ms = ( end_time - start_time ).total_seconds() * 1000 # return response latency in ms like openai @@ -3226,6 +3231,8 @@ def client(original_function): call_type = original_function.__name__ if "litellm_call_id" not in kwargs: kwargs["litellm_call_id"] = str(uuid.uuid4()) + + model = "" try: model = args[0] if len(args) > 0 else kwargs["model"] except: @@ -3547,6 +3554,10 @@ def client(original_function): result._hidden_params["model_id"] = kwargs.get("model_info", {}).get( "id", None ) + result._hidden_params["api_base"] = get_api_base( + model=model, + optional_params=kwargs, + ) if ( isinstance(result, ModelResponse) or isinstance(result, EmbeddingResponse) @@ -5810,19 +5821,40 @@ def get_api_base(model: str, optional_params: dict) -> Optional[str]: get_api_base(model="gemini/gemini-pro") ``` """ - _optional_params = LiteLLM_Params( - model=model, **optional_params - ) # convert to pydantic object - # get llm provider + try: - model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider( - model=model - ) - except: - custom_llm_provider = None + if "model" in optional_params: + _optional_params = LiteLLM_Params(**optional_params) + else: # prevent needing to copy and pop the dict + _optional_params = LiteLLM_Params( + model=model, **optional_params + ) # convert to pydantic object + except Exception as e: + verbose_logger.error("Error occurred in getting api base - {}".format(str(e))) + return None + # get llm provider + if _optional_params.api_base is not None: return _optional_params.api_base + try: + model, custom_llm_provider, dynamic_api_key, dynamic_api_base = ( + get_llm_provider( + model=model, + custom_llm_provider=_optional_params.custom_llm_provider, + api_base=_optional_params.api_base, + api_key=_optional_params.api_key, + ) + ) + except Exception as e: + verbose_logger.error("Error occurred in getting api base - {}".format(str(e))) + custom_llm_provider = None + dynamic_api_key = None + dynamic_api_base = None + + if dynamic_api_base is not None: + return dynamic_api_base + if ( _optional_params.vertex_location is not None and _optional_params.vertex_project is not None @@ -5835,11 +5867,17 @@ def get_api_base(model: str, optional_params: dict) -> Optional[str]: ) return _api_base - if custom_llm_provider is not None and custom_llm_provider == "gemini": + if custom_llm_provider is None: + return None + + if custom_llm_provider == "gemini": _api_base = "https://generativelanguage.googleapis.com/v1beta/models/{}:generateContent".format( model ) return _api_base + elif custom_llm_provider == "openai": + _api_base = "https://api.openai.com" + return _api_base return None @@ -6147,7 +6185,6 @@ def get_llm_provider( try: dynamic_api_key = None # check if llm provider provided - # AZURE AI-Studio Logic - Azure AI Studio supports AZURE/Cohere # If User passes azure/command-r-plus -> we should send it to cohere_chat/command-r-plus if model.split("/", 1)[0] == "azure": From 209baaca0266cf441e00ccae7a382535cfafec5e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 15:40:05 -0700 Subject: [PATCH 036/543] docs - simplify prod docs --- docs/my-website/docs/proxy/prod.md | 167 ++++++++--------------------- 1 file changed, 45 insertions(+), 122 deletions(-) diff --git a/docs/my-website/docs/proxy/prod.md b/docs/my-website/docs/proxy/prod.md index 980bba542..32cd916c9 100644 --- a/docs/my-website/docs/proxy/prod.md +++ b/docs/my-website/docs/proxy/prod.md @@ -3,34 +3,38 @@ import TabItem from '@theme/TabItem'; # ⚡ Best Practices for Production -Expected Performance in Production +## 1. Use this config.yaml +Use this config.yaml in production (with your own LLMs) -1 LiteLLM Uvicorn Worker on Kubernetes - -| Description | Value | -|--------------|-------| -| Avg latency | `50ms` | -| Median latency | `51ms` | -| `/chat/completions` Requests/second | `35` | -| `/chat/completions` Requests/minute | `2100` | -| `/chat/completions` Requests/hour | `126K` | - - -## 1. Switch off Debug Logging - -Remove `set_verbose: True` from your config.yaml ```yaml +model_list: + - model_name: fake-openai-endpoint + litellm_params: + model: openai/fake + api_key: fake-key + api_base: https://exampleopenaiendpoint-production.up.railway.app/ + +general_settings: + master_key: sk-1234 # enter your own master key, ensure it starts with 'sk-' + alerting: ["slack"] # Setup slack alerting - get alerts on LLM exceptions, Budget Alerts, Slow LLM Responses + proxy_batch_write_at: 60 # Batch write spend updates every 60s + litellm_settings: - set_verbose: True + set_verbose: False # Switch off Debug Logging, ensure your logs do not have any debugging on ``` -You should only see the following level of details in logs on the proxy server +Set slack webhook url in your env ```shell -# INFO: 192.168.2.205:11774 - "POST /chat/completions HTTP/1.1" 200 OK -# INFO: 192.168.2.205:34717 - "POST /chat/completions HTTP/1.1" 200 OK -# INFO: 192.168.2.205:29734 - "POST /chat/completions HTTP/1.1" 200 OK +export SLACK_WEBHOOK_URL="https://hooks.slack.com/services/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" ``` +:::info + +Need Help or want dedicated support ? Talk to a founder [here]: (https://calendly.com/d/4mp-gd3-k5k/litellm-1-1-onboarding-chat) + +::: + + ## 2. On Kubernetes - Use 1 Uvicorn worker [Suggested CMD] Use this Docker `CMD`. This will start the proxy with 1 Uvicorn Async Worker @@ -40,21 +44,12 @@ Use this Docker `CMD`. This will start the proxy with 1 Uvicorn Async Worker CMD ["--port", "4000", "--config", "./proxy_server_config.yaml"] ``` -## 3. Batch write spend updates every 60s -The default proxy batch write is 10s. This is to make it easy to see spend when debugging locally. +## 3. Use Redis 'port','host', 'password'. NOT 'redis_url' -In production, we recommend using a longer interval period of 60s. This reduces the number of connections used to make DB writes. +If you decide to use Redis, DO NOT use 'redis_url'. We recommend usig redis port, host, and password params. -```yaml -general_settings: - master_key: sk-1234 - proxy_batch_write_at: 60 # 👈 Frequency of batch writing logs to server (in seconds) -``` - -## 4. use Redis 'port','host', 'password'. NOT 'redis_url' - -When connecting to Redis use redis port, host, and password params. Not 'redis_url'. We've seen a 80 RPS difference between these 2 approaches when using the async redis client. +`redis_url`is 80 RPS slower This is still something we're investigating. Keep track of it [here](https://github.com/BerriAI/litellm/issues/3188) @@ -69,103 +64,31 @@ router_settings: redis_password: os.environ/REDIS_PASSWORD ``` -## 5. Switch off resetting budgets +## Extras +### Expected Performance in Production -Add this to your config.yaml. (Only spend per Key, User and Team will be tracked - spend per API Call will not be written to the LiteLLM Database) -```yaml -general_settings: - disable_reset_budget: true -``` +1 LiteLLM Uvicorn Worker on Kubernetes -## 6. Move spend logs to separate server (BETA) - -Writing each spend log to the db can slow down your proxy. In testing we saw a 70% improvement in median response time, by moving writing spend logs to a separate server. - -👉 [LiteLLM Spend Logs Server](https://github.com/BerriAI/litellm/tree/main/litellm-js/spend-logs) +| Description | Value | +|--------------|-------| +| Avg latency | `50ms` | +| Median latency | `51ms` | +| `/chat/completions` Requests/second | `35` | +| `/chat/completions` Requests/minute | `2100` | +| `/chat/completions` Requests/hour | `126K` | -**Spend Logs** -This is a log of the key, tokens, model, and latency for each call on the proxy. +### Verifying Debugging logs are off -[**Full Payload**](https://github.com/BerriAI/litellm/blob/8c9623a6bc4ad9da0a2dac64249a60ed8da719e8/litellm/proxy/utils.py#L1769) - - -**1. Start the spend logs server** - -```bash -docker run -p 3000:3000 \ - -e DATABASE_URL="postgres://.." \ - ghcr.io/berriai/litellm-spend_logs:main-latest - -# RUNNING on http://0.0.0.0:3000 -``` - -**2. Connect to proxy** - - -Example litellm_config.yaml - -```yaml -model_list: -- model_name: fake-openai-endpoint - litellm_params: - model: openai/my-fake-model - api_key: my-fake-key - api_base: https://exampleopenaiendpoint-production.up.railway.app/ - -general_settings: - master_key: sk-1234 - proxy_batch_write_at: 5 # 👈 Frequency of batch writing logs to server (in seconds) -``` - -Add `SPEND_LOGS_URL` as an environment variable when starting the proxy - -```bash -docker run \ - -v $(pwd)/litellm_config.yaml:/app/config.yaml \ - -e DATABASE_URL="postgresql://.." \ - -e SPEND_LOGS_URL="http://host.docker.internal:3000" \ # 👈 KEY CHANGE - -p 4000:4000 \ - ghcr.io/berriai/litellm:main-latest \ - --config /app/config.yaml --detailed_debug - -# Running on http://0.0.0.0:4000 -``` - -**3. Test Proxy!** - - -```bash -curl --location 'http://0.0.0.0:4000/v1/chat/completions' \ ---header 'Content-Type: application/json' \ ---header 'Authorization: Bearer sk-1234' \ ---data '{ - "model": "fake-openai-endpoint", - "messages": [ - {"role": "system", "content": "Be helpful"}, - {"role": "user", "content": "What do you know?"} - ] -}' -``` - -In your LiteLLM Spend Logs Server, you should see - -**Expected Response** - -``` -Received and stored 1 logs. Total logs in memory: 1 -... -Flushed 1 log to the DB. +You should only see the following level of details in logs on the proxy server +```shell +# INFO: 192.168.2.205:11774 - "POST /chat/completions HTTP/1.1" 200 OK +# INFO: 192.168.2.205:34717 - "POST /chat/completions HTTP/1.1" 200 OK +# INFO: 192.168.2.205:29734 - "POST /chat/completions HTTP/1.1" 200 OK ``` -### Machine Specification - -A t2.micro should be sufficient to handle 1k logs / minute on this server. - -This consumes at max 120MB, and <0.1 vCPU. - -## Machine Specifications to Deploy LiteLLM +### Machine Specifications to Deploy LiteLLM | Service | Spec | CPUs | Memory | Architecture | Version| | --- | --- | --- | --- | --- | --- | @@ -173,7 +96,7 @@ This consumes at max 120MB, and <0.1 vCPU. | Redis Cache | - | - | - | - | 7.0+ Redis Engine| -## Reference Kubernetes Deployment YAML +### Reference Kubernetes Deployment YAML Reference Kubernetes `deployment.yaml` that was load tested by us From fbe412a3a6a61640793a24d4c919210175e2179c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 15:58:05 -0700 Subject: [PATCH 037/543] feat - add amazon.titan-embed-text-v2 --- litellm/model_prices_and_context_window_backup.json | 9 +++++++++ model_prices_and_context_window.json | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 7fcd425bb..c7d5aae2d 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1832,6 +1832,15 @@ "litellm_provider": "bedrock", "mode": "embedding" }, + "amazon.titan-embed-text-v2:0": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "output_vector_size": 1024, + "input_cost_per_token": 0.0000002, + "output_cost_per_token": 0.0, + "litellm_provider": "bedrock", + "mode": "embedding" + }, "mistral.mistral-7b-instruct-v0:2": { "max_tokens": 8191, "max_input_tokens": 32000, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 7fcd425bb..c7d5aae2d 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -1832,6 +1832,15 @@ "litellm_provider": "bedrock", "mode": "embedding" }, + "amazon.titan-embed-text-v2:0": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "output_vector_size": 1024, + "input_cost_per_token": 0.0000002, + "output_cost_per_token": 0.0, + "litellm_provider": "bedrock", + "mode": "embedding" + }, "mistral.mistral-7b-instruct-v0:2": { "max_tokens": 8191, "max_input_tokens": 32000, From bf048ecda4976f2676a25de4aec69fe98c6c0b54 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 15:59:02 -0700 Subject: [PATCH 038/543] docs - titan embeddings v2 --- docs/my-website/docs/providers/bedrock.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/my-website/docs/providers/bedrock.md b/docs/my-website/docs/providers/bedrock.md index 590ffc423..147c12e65 100644 --- a/docs/my-website/docs/providers/bedrock.md +++ b/docs/my-website/docs/providers/bedrock.md @@ -535,7 +535,8 @@ print(response) | Model Name | Function Call | |----------------------|---------------------------------------------| -| Titan Embeddings - G1 | `embedding(model="bedrock/amazon.titan-embed-text-v1", input=input)` | +| Titan Embeddings V2 | `embedding(model="bedrock/amazon.titan-embed-text-v2:0", input=input)` | +| Titan Embeddings - V1 | `embedding(model="bedrock/amazon.titan-embed-text-v1", input=input)` | | Cohere Embeddings - English | `embedding(model="bedrock/cohere.embed-english-v3", input=input)` | | Cohere Embeddings - Multilingual | `embedding(model="bedrock/cohere.embed-multilingual-v3", input=input)` | From 4a39b95accf25380a037ff241cb1822f412e3e22 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 16:13:27 -0700 Subject: [PATCH 039/543] fix - support dimension for titan embed v2 --- litellm/utils.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/litellm/utils.py b/litellm/utils.py index 80d26f58b..e124358b6 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4743,6 +4743,21 @@ def get_optional_params_embeddings( status_code=500, message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) + if custom_llm_provider == "bedrock": + if "amazon.titan-embed-text-v2" in model: + # embed-text-v2 supports the dimension param + non_default_params.pop("dimensions", None) + if len(non_default_params.keys()) > 0: + if litellm.drop_params is True: # drop the unsupported non-default values + keys = list(non_default_params.keys()) + for k in keys: + non_default_params.pop(k, None) + final_params = {**non_default_params, **kwargs} + return final_params + raise UnsupportedParamsError( + status_code=500, + message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", + ) if ( custom_llm_provider != "openai" From 401bf8d67ee416a177b0fe6afe1c3939d5a94ed1 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 16:23:37 -0700 Subject: [PATCH 040/543] test - bedrock v2 supports dimension --- .../test_get_optional_params_embeddings.py | 29 +++++++++++++++++++ litellm/utils.py | 10 +++++-- 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/litellm/tests/test_get_optional_params_embeddings.py b/litellm/tests/test_get_optional_params_embeddings.py index 41396b531..81b177030 100644 --- a/litellm/tests/test_get_optional_params_embeddings.py +++ b/litellm/tests/test_get_optional_params_embeddings.py @@ -40,3 +40,32 @@ def test_vertex_projects(): # test_vertex_projects() + + +def test_bedrock_embed_v2_regular(): + model, custom_llm_provider, _, _ = get_llm_provider( + model="bedrock/amazon.titan-embed-text-v2:0" + ) + optional_params = get_optional_params_embeddings( + model=model, + dimensions=512, + custom_llm_provider=custom_llm_provider, + ) + print(f"received optional_params: {optional_params}") + assert optional_params == {"dimensions": 512} + + +def test_bedrock_embed_v2_with_drop_params(): + litellm.drop_params = True + model, custom_llm_provider, _, _ = get_llm_provider( + model="bedrock/amazon.titan-embed-text-v2:0" + ) + optional_params = get_optional_params_embeddings( + model=model, + dimensions=512, + user="test-litellm-user-5", + encoding_format="base64", + custom_llm_provider=custom_llm_provider, + ) + print(f"received optional_params: {optional_params}") + assert optional_params == {"dimensions": 512} diff --git a/litellm/utils.py b/litellm/utils.py index e124358b6..5070e6498 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4744,9 +4744,14 @@ def get_optional_params_embeddings( message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) if custom_llm_provider == "bedrock": - if "amazon.titan-embed-text-v2" in model: - # embed-text-v2 supports the dimension param + # if dimensions is in non_default_params -> pass it for model=bedrock/amazon.titan-embed-text-v2 + if ( + "dimensions" in non_default_params.keys() + and "amazon.titan-embed-text-v2" in model + ): + kwargs["dimensions"] = non_default_params["dimensions"] non_default_params.pop("dimensions", None) + if len(non_default_params.keys()) > 0: if litellm.drop_params is True: # drop the unsupported non-default values keys = list(non_default_params.keys()) @@ -4758,6 +4763,7 @@ def get_optional_params_embeddings( status_code=500, message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) + return {**non_default_params, **kwargs} if ( custom_llm_provider != "openai" From a732d8772af2f8d2cb9b9a04d3f17fda85086436 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 16:24:21 -0700 Subject: [PATCH 041/543] fix(bedrock.py): convert httpx.timeout to boto3 valid timeout Closes https://github.com/BerriAI/litellm/issues/3398 --- litellm/llms/azure.py | 2 +- litellm/llms/bedrock.py | 9 +++++++-- litellm/llms/openai.py | 15 +++++++++------ litellm/main.py | 35 +++++++++++++++++++++++------------ litellm/router.py | 12 +++++++++--- litellm/tests/test_timeout.py | 32 +++++++++++++++++++++++++++++++- litellm/utils.py | 14 +++++++++++++- 7 files changed, 93 insertions(+), 26 deletions(-) diff --git a/litellm/llms/azure.py b/litellm/llms/azure.py index 0fe5c4e7e..e7af9d43b 100644 --- a/litellm/llms/azure.py +++ b/litellm/llms/azure.py @@ -151,7 +151,7 @@ class AzureChatCompletion(BaseLLM): api_type: str, azure_ad_token: str, print_verbose: Callable, - timeout, + timeout: Union[float, httpx.Timeout], logging_obj, optional_params, litellm_params, diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index 235c13c59..7ce544c96 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -533,7 +533,7 @@ def init_bedrock_client( aws_session_name: Optional[str] = None, aws_profile_name: Optional[str] = None, aws_role_name: Optional[str] = None, - timeout: Optional[int] = None, + timeout: Optional[Union[float, httpx.Timeout]] = None, ): # check for custom AWS_REGION_NAME and use it if not passed to init_bedrock_client litellm_aws_region_name = get_secret("AWS_REGION_NAME", None) @@ -592,7 +592,12 @@ def init_bedrock_client( import boto3 - config = boto3.session.Config(connect_timeout=timeout, read_timeout=timeout) + if isinstance(timeout, float): + config = boto3.session.Config(connect_timeout=timeout, read_timeout=timeout) + elif isinstance(timeout, httpx.Timeout): + config = boto3.session.Config( + connect_timeout=timeout.connect, read_timeout=timeout.read + ) ### CHECK STS ### if aws_role_name is not None and aws_session_name is not None: diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index f68ab235e..5a76605b3 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -246,7 +246,7 @@ class OpenAIChatCompletion(BaseLLM): def completion( self, model_response: ModelResponse, - timeout: float, + timeout: Union[float, httpx.Timeout], model: Optional[str] = None, messages: Optional[list] = None, print_verbose: Optional[Callable] = None, @@ -271,9 +271,12 @@ class OpenAIChatCompletion(BaseLLM): if model is None or messages is None: raise OpenAIError(status_code=422, message=f"Missing model or messages") - if not isinstance(timeout, float): + if not isinstance(timeout, float) and not isinstance( + timeout, httpx.Timeout + ): raise OpenAIError( - status_code=422, message=f"Timeout needs to be a float" + status_code=422, + message=f"Timeout needs to be a float or httpx.Timeout", ) if custom_llm_provider != "openai": @@ -425,7 +428,7 @@ class OpenAIChatCompletion(BaseLLM): self, data: dict, model_response: ModelResponse, - timeout: float, + timeout: Union[float, httpx.Timeout], api_key: Optional[str] = None, api_base: Optional[str] = None, organization: Optional[str] = None, @@ -480,7 +483,7 @@ class OpenAIChatCompletion(BaseLLM): def streaming( self, logging_obj, - timeout: float, + timeout: Union[float, httpx.Timeout], data: dict, model: str, api_key: Optional[str] = None, @@ -524,7 +527,7 @@ class OpenAIChatCompletion(BaseLLM): async def async_streaming( self, logging_obj, - timeout: float, + timeout: Union[float, httpx.Timeout], data: dict, model: str, api_key: Optional[str] = None, diff --git a/litellm/main.py b/litellm/main.py index 9765669fe..bbcdef0de 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -39,6 +39,7 @@ from litellm.utils import ( Usage, get_optional_params_embeddings, get_optional_params_image_gen, + supports_httpx_timeout, ) from .llms import ( anthropic_text, @@ -450,7 +451,7 @@ def completion( model: str, # Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create messages: List = [], - timeout: Optional[Union[float, int]] = None, + timeout: Optional[Union[float, str, httpx.Timeout]] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, n: Optional[int] = None, @@ -648,11 +649,21 @@ def completion( non_default_params = { k: v for k, v in kwargs.items() if k not in default_params } # model-specific params - pass them straight to the model/provider - if timeout is None: - timeout = ( - kwargs.get("request_timeout", None) or 600 - ) # set timeout for 10 minutes by default - timeout = float(timeout) + + ### TIMEOUT LOGIC ### + timeout = timeout or kwargs.get("request_timeout", 600) or 600 + # set timeout for 10 minutes by default + + if ( + timeout is not None + and isinstance(timeout, httpx.Timeout) + and supports_httpx_timeout(custom_llm_provider) == False + ): + read_timeout = timeout.read or 600 + timeout = read_timeout # default 10 min timeout + elif timeout is not None and not isinstance(timeout, httpx.Timeout): + timeout = float(timeout) # type: ignore + try: if base_url is not None: api_base = base_url @@ -873,7 +884,7 @@ def completion( logger_fn=logger_fn, logging_obj=logging, acompletion=acompletion, - timeout=timeout, + timeout=timeout, # type: ignore client=client, # pass AsyncAzureOpenAI, AzureOpenAI client ) @@ -1014,7 +1025,7 @@ def completion( optional_params=optional_params, litellm_params=litellm_params, logger_fn=logger_fn, - timeout=timeout, + timeout=timeout, # type: ignore custom_prompt_dict=custom_prompt_dict, client=client, # pass AsyncOpenAI, OpenAI client organization=organization, @@ -1099,7 +1110,7 @@ def completion( optional_params=optional_params, litellm_params=litellm_params, logger_fn=logger_fn, - timeout=timeout, + timeout=timeout, # type: ignore ) if ( @@ -1473,7 +1484,7 @@ def completion( acompletion=acompletion, logging_obj=logging, custom_prompt_dict=custom_prompt_dict, - timeout=timeout, + timeout=timeout, # type: ignore ) if ( "stream" in optional_params @@ -1566,7 +1577,7 @@ def completion( logger_fn=logger_fn, logging_obj=logging, acompletion=acompletion, - timeout=timeout, + timeout=timeout, # type: ignore ) ## LOGGING logging.post_call( @@ -1893,7 +1904,7 @@ def completion( logger_fn=logger_fn, encoding=encoding, logging_obj=logging, - timeout=timeout, + timeout=timeout, # type: ignore ) if ( "stream" in optional_params diff --git a/litellm/router.py b/litellm/router.py index 9638db548..d64deecec 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -375,7 +375,9 @@ class Router: except Exception as e: raise e - def _completion(self, model: str, messages: List[Dict[str, str]], **kwargs): + def _completion( + self, model: str, messages: List[Dict[str, str]], **kwargs + ) -> Union[ModelResponse, CustomStreamWrapper]: model_name = None try: # pick the one that is available (lowest TPM/RPM) @@ -438,7 +440,9 @@ class Router: ) raise e - async def acompletion(self, model: str, messages: List[Dict[str, str]], **kwargs): + async def acompletion( + self, model: str, messages: List[Dict[str, str]], **kwargs + ) -> Union[ModelResponse, CustomStreamWrapper]: try: kwargs["model"] = model kwargs["messages"] = messages @@ -454,7 +458,9 @@ class Router: except Exception as e: raise e - async def _acompletion(self, model: str, messages: List[Dict[str, str]], **kwargs): + async def _acompletion( + self, model: str, messages: List[Dict[str, str]], **kwargs + ) -> Union[ModelResponse, CustomStreamWrapper]: """ - Get an available deployment - call it with a semaphore over the call diff --git a/litellm/tests/test_timeout.py b/litellm/tests/test_timeout.py index d38da52e5..f24b26a0c 100644 --- a/litellm/tests/test_timeout.py +++ b/litellm/tests/test_timeout.py @@ -10,7 +10,37 @@ sys.path.insert( import time import litellm import openai -import pytest, uuid +import pytest, uuid, httpx + + +@pytest.mark.parametrize( + "model, provider", + [ + ("gpt-3.5-turbo", "openai"), + ("anthropic.claude-instant-v1", "bedrock"), + ("azure/chatgpt-v-2", "azure"), + ], +) +@pytest.mark.parametrize("sync_mode", [True, False]) +@pytest.mark.asyncio +async def test_httpx_timeout(model, provider, sync_mode): + """ + Test if setting httpx.timeout works for completion calls + """ + timeout_val = httpx.Timeout(10.0, connect=60.0) + + messages = [{"role": "user", "content": "Hey, how's it going?"}] + + if sync_mode: + response = litellm.completion( + model=model, messages=messages, timeout=timeout_val + ) + else: + response = await litellm.acompletion( + model=model, messages=messages, timeout=timeout_val + ) + + print(f"response: {response}") def test_timeout(): diff --git a/litellm/utils.py b/litellm/utils.py index 80d26f58b..89d814e32 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4442,7 +4442,19 @@ def completion_cost( raise e -def supports_function_calling(model: str): +def supports_httpx_timeout(custom_llm_provider: str) -> bool: + """ + Helper function to know if a provider implementation supports httpx timeout + """ + supported_providers = ["openai", "azure", "bedrock"] + + if custom_llm_provider in supported_providers: + return True + + return False + + +def supports_function_calling(model: str) -> bool: """ Check if the given model supports function calling and return a boolean value. From 2084cfd959da45bd1b90432e13da4d51180f9ac7 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 16:41:08 -0700 Subject: [PATCH 042/543] fix - test_check_num_callbacks --- tests/test_callbacks_on_proxy.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index c10b18ed1..70cd3ca25 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -108,11 +108,9 @@ async def test_check_num_callbacks(): import uuid async with aiohttp.ClientSession() as session: + await asyncio.sleep(30) num_callbacks_1, _ = await get_active_callbacks(session=session) - assert ( - num_callbacks_1 > 0 - ) # /health/readiness returns 0 when some calculation goes wrong - + assert num_callbacks_1 > 0 await asyncio.sleep(30) num_callbacks_2, _ = await get_active_callbacks(session=session) @@ -141,14 +139,12 @@ async def test_check_num_callbacks_on_lowest_latency(): import uuid async with aiohttp.ClientSession() as session: + await asyncio.sleep(30) original_routing_strategy = await get_current_routing_strategy(session=session) await config_update(session=session, routing_strategy="latency-based-routing") num_callbacks_1, num_alerts_1 = await get_active_callbacks(session=session) - assert ( - num_callbacks_1 > 0 - ) # /health/readiness returns 0 when some calculation goes wrong await asyncio.sleep(30) From f7eee609431f3003dfb1885ffd0663a1b666af7d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 16:51:12 -0700 Subject: [PATCH 043/543] docs(exception_mapping.md): update exception mapping docs with 'should_retry' --- docs/my-website/docs/exception_mapping.md | 67 +++++++++++++++++------ litellm/utils.py | 15 +++-- 2 files changed, 60 insertions(+), 22 deletions(-) diff --git a/docs/my-website/docs/exception_mapping.md b/docs/my-website/docs/exception_mapping.md index db17fb093..2345e9f83 100644 --- a/docs/my-website/docs/exception_mapping.md +++ b/docs/my-website/docs/exception_mapping.md @@ -13,7 +13,7 @@ LiteLLM maps exceptions across all providers to their OpenAI counterparts. | >=500 | InternalServerError | | N/A | ContextWindowExceededError| | 400 | ContentPolicyViolationError| -| N/A | APIConnectionError | +| 500 | APIConnectionError | Base case we return APIConnectionError @@ -74,6 +74,28 @@ except Exception as e: ``` +## Usage - Should you retry exception? + +``` +import litellm +import openai + +try: + response = litellm.completion( + model="gpt-4", + messages=[ + { + "role": "user", + "content": "hello, write a 20 pageg essay" + } + ], + timeout=0.01, # this will raise a timeout exception + ) +except openai.APITimeoutError as e: + should_retry = litellm._should_retry(e.status_code) + print(f"should_retry: {should_retry}") +``` + ## Details To see how it's implemented - [check out the code](https://github.com/BerriAI/litellm/blob/a42c197e5a6de56ea576c73715e6c7c6b19fa249/litellm/utils.py#L1217) @@ -86,21 +108,34 @@ To see how it's implemented - [check out the code](https://github.com/BerriAI/li Base case - we return the original exception. -| | ContextWindowExceededError | AuthenticationError | InvalidRequestError | RateLimitError | ServiceUnavailableError | -|---------------|----------------------------|---------------------|---------------------|---------------|-------------------------| -| Anthropic | ✅ | ✅ | ✅ | ✅ | | -| OpenAI | ✅ | ✅ |✅ |✅ |✅| -| Azure OpenAI | ✅ | ✅ |✅ |✅ |✅| -| Replicate | ✅ | ✅ | ✅ | ✅ | ✅ | -| Cohere | ✅ | ✅ | ✅ | ✅ | ✅ | -| Huggingface | ✅ | ✅ | ✅ | ✅ | | -| Openrouter | ✅ | ✅ | ✅ | ✅ | | -| AI21 | ✅ | ✅ | ✅ | ✅ | | -| VertexAI | | |✅ | | | -| Bedrock | | |✅ | | | -| Sagemaker | | |✅ | | | -| TogetherAI | ✅ | ✅ | ✅ | ✅ | | -| AlephAlpha | ✅ | ✅ | ✅ | ✅ | ✅ | +| custom_llm_provider | Timeout | ContextWindowExceededError | BadRequestError | NotFoundError | ContentPolicyViolationError | AuthenticationError | APIError | RateLimitError | ServiceUnavailableError | PermissionDeniedError | UnprocessableEntityError | +|----------------------------|---------|----------------------------|------------------|---------------|-----------------------------|---------------------|----------|----------------|-------------------------|-----------------------|-------------------------| +| openai | ✓ | ✓ | ✓ | | ✓ | ✓ | | | | | | +| text-completion-openai | ✓ | ✓ | ✓ | | ✓ | ✓ | | | | | | +| custom_openai | ✓ | ✓ | ✓ | | ✓ | ✓ | | | | | | +| openai_compatible_providers| ✓ | ✓ | ✓ | | ✓ | ✓ | | | | | | +| anthropic | ✓ | ✓ | ✓ | ✓ | | ✓ | | | ✓ | ✓ | | +| replicate | ✓ | ✓ | ✓ | ✓ | | ✓ | | ✓ | ✓ | | | +| bedrock | ✓ | ✓ | ✓ | ✓ | | ✓ | | ✓ | ✓ | ✓ | | +| sagemaker | | ✓ | ✓ | | | | | | | | | +| vertex_ai | ✓ | | ✓ | | | | ✓ | | | | ✓ | +| palm | ✓ | ✓ | | | | | ✓ | | | | | +| gemini | ✓ | ✓ | | | | | ✓ | | | | | +| cloudflare | | | ✓ | | | ✓ | | | | | | +| cohere | | ✓ | ✓ | | | ✓ | | | ✓ | | | +| cohere_chat | | ✓ | ✓ | | | ✓ | | | ✓ | | | +| huggingface | ✓ | ✓ | ✓ | | | ✓ | | ✓ | ✓ | | | +| ai21 | ✓ | ✓ | ✓ | ✓ | | ✓ | | ✓ | | | | +| nlp_cloud | ✓ | ✓ | ✓ | | | ✓ | ✓ | ✓ | ✓ | | | +| together_ai | ✓ | ✓ | ✓ | | | ✓ | | | | | | +| aleph_alpha | | | ✓ | | | ✓ | | | | | | +| ollama | ✓ | | ✓ | | | | | | ✓ | | | +| ollama_chat | ✓ | | ✓ | | | | | | ✓ | | | +| vllm | | | | | | ✓ | ✓ | | | | | +| azure | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | ✓ | | | + +- "✓" indicates that the specified `custom_llm_provider` can raise the corresponding exception. +- Empty cells indicate the lack of association or that the provider does not raise that particular exception type as indicated by the function. > For a deeper understanding of these exceptions, you can check out [this](https://github.com/BerriAI/litellm/blob/d7e58d13bf9ba9edbab2ab2f096f3de7547f35fa/litellm/utils.py#L1544) implementation for additional insights. diff --git a/litellm/utils.py b/litellm/utils.py index acadb47e7..63684766b 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -8480,7 +8480,7 @@ def exception_type( # 503 Getting metadata from plugin failed with error: Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate. exception_mapping_worked = True raise BadRequestError( - message=f"PalmException - Invalid api key", + message=f"GeminiException - Invalid api key", model=model, llm_provider="palm", response=original_exception.response, @@ -8491,23 +8491,26 @@ def exception_type( ): exception_mapping_worked = True raise Timeout( - message=f"PalmException - {original_exception.message}", + message=f"GeminiException - {original_exception.message}", model=model, llm_provider="palm", ) if "400 Request payload size exceeds" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( - message=f"PalmException - {error_str}", + message=f"GeminiException - {error_str}", model=model, llm_provider="palm", response=original_exception.response, ) - if "500 An internal error has occurred." in error_str: + if ( + "500 An internal error has occurred." in error_str + or "list index out of range" in error_str + ): exception_mapping_worked = True raise APIError( status_code=getattr(original_exception, "status_code", 500), - message=f"PalmException - {original_exception.message}", + message=f"GeminiException - {original_exception.message}", llm_provider="palm", model=model, request=original_exception.request, @@ -8516,7 +8519,7 @@ def exception_type( if original_exception.status_code == 400: exception_mapping_worked = True raise BadRequestError( - message=f"PalmException - {error_str}", + message=f"GeminiException - {error_str}", model=model, llm_provider="palm", response=original_exception.response, From d3152e606ff222ecc0f165f096d7512b9955b572 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 16:53:24 -0700 Subject: [PATCH 044/543] fix - ui filter exceptions by modelGroup --- ui/litellm-dashboard/src/components/networking.tsx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ui/litellm-dashboard/src/components/networking.tsx b/ui/litellm-dashboard/src/components/networking.tsx index 35880949b..53779c64b 100644 --- a/ui/litellm-dashboard/src/components/networking.tsx +++ b/ui/litellm-dashboard/src/components/networking.tsx @@ -526,7 +526,9 @@ export const modelExceptionsCall = async ( */ try { let url = proxyBaseUrl ? `${proxyBaseUrl}/model/metrics/exceptions` : `/model/metrics/exceptions`; - + if (modelGroup) { + url = `${url}?_selected_model_group=${modelGroup}` + } const response = await fetch(url, { method: "GET", headers: { From e7034ea53d65cf233c05e12e676a9d0348c68bb8 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 16:54:24 -0700 Subject: [PATCH 045/543] feat - filter exceptions by model group --- litellm/proxy/proxy_server.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 55202fd16..27a05ca21 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -7706,7 +7706,7 @@ async def model_metrics_exceptions( exception_type, COUNT(*) AS num_exceptions FROM "LiteLLM_ErrorLogs" - WHERE "startTime" >= $1::timestamp AND "endTime" <= $2::timestamp + WHERE "startTime" >= $1::timestamp AND "endTime" <= $2::timestamp AND model_group = $3 GROUP BY combined_model_api_base, exception_type ) SELECT @@ -7718,7 +7718,9 @@ async def model_metrics_exceptions( ORDER BY total_exceptions DESC LIMIT 200; """ - db_response = await prisma_client.db.query_raw(sql_query, startTime, endTime) + db_response = await prisma_client.db.query_raw( + sql_query, startTime, endTime, _selected_model_group + ) response: List[dict] = [] exception_types = set() From 0b9fa53e3e6da6fbe0c965fa77917f919210fa2c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 16:59:49 -0700 Subject: [PATCH 046/543] fix(anthropic.py): drop unsupported non-whitespace character value when calling anthropic with stop sequences Fixes https://github.com/BerriAI/litellm/issues/3286 --- litellm/llms/anthropic.py | 42 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index 24d889b0f..377235dee 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -84,6 +84,48 @@ class AnthropicConfig: and v is not None } + def get_supported_openai_params(self): + return [ + "stream", + "stop", + "temperature", + "top_p", + "max_tokens", + "tools", + "tool_choice", + ] + + def map_openai_params(self, non_default_params: dict, optional_params: dict): + for param, value in non_default_params.items(): + if param == "max_tokens": + optional_params["max_tokens"] = value + if param == "tools": + optional_params["tools"] = value + if param == "stream": + optional_params["stream"] = value + if param == "stop": + if isinstance(value, str): + if ( + value == "\n" + ): # anthropic doesn't allow whitespace characters as stop-sequences + continue + value = [value] + elif isinstance(value, list): + new_v = [] + for v in value: + if ( + v == "\n" + ): # anthropic doesn't allow whitespace characters as stop-sequences + continue + new_v.append(v) + value = new_v + optional_params["stop_sequences"] = value + if param == "temperature": + optional_params["temperature"] = value + if param == "top_p": + optional_params["top_p"] = value + return optional_params + # makes headers for API call def validate_environment(api_key, user_headers): From 0450abfdc12d92576595e8e4d6ba88308d72f704 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 17:22:06 -0700 Subject: [PATCH 047/543] fix(bedrock.py): fix boto3 config init --- litellm/llms/bedrock.py | 2 + litellm/tests/log.txt | 114 ++++++++++++++++++---------------------- 2 files changed, 53 insertions(+), 63 deletions(-) diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index 7ce544c96..517e44146 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -598,6 +598,8 @@ def init_bedrock_client( config = boto3.session.Config( connect_timeout=timeout.connect, read_timeout=timeout.read ) + else: + config = boto3.session.Config() ### CHECK STS ### if aws_role_name is not None and aws_session_name is not None: diff --git a/litellm/tests/log.txt b/litellm/tests/log.txt index 2d3718c7d..a7d123345 100644 --- a/litellm/tests/log.txt +++ b/litellm/tests/log.txt @@ -5,74 +5,59 @@ plugins: timeout-2.2.0, asyncio-0.23.2, anyio-3.7.1, xdist-3.3.1 asyncio: mode=Mode.STRICT collected 1 item -test_custom_logger.py Chunks have a created at hidden param -Chunks sorted -token_counter messages received: [{'role': 'user', 'content': 'write a one sentence poem about: 73348'}] -Token Counter - using OpenAI token counter, for model=gpt-3.5-turbo -LiteLLM: Utils - Counting tokens for OpenAI model=gpt-3.5-turbo -Logging Details LiteLLM-Success Call: None -success callbacks: [] -Token Counter - using OpenAI token counter, for model=gpt-3.5-turbo -LiteLLM: Utils - Counting tokens for OpenAI model=gpt-3.5-turbo -Logging Details LiteLLM-Success Call streaming complete -Looking up model=gpt-3.5-turbo in model_cost_map -Success: model=gpt-3.5-turbo in model_cost_map -prompt_tokens=17; completion_tokens=0 -Returned custom cost for model=gpt-3.5-turbo - prompt_tokens_cost_usd_dollar: 2.55e-05, completion_tokens_cost_usd_dollar: 0.0 -final cost: 2.55e-05; prompt_tokens_cost_usd_dollar: 2.55e-05; completion_tokens_cost_usd_dollar: 0.0 -. [100%] +test_image_generation.py . [100%] =============================== warnings summary =============================== -../../../../../../opt/homebrew/lib/python3.11/site-packages/pydantic/_internal/_config.py:284: 18 warnings +../../../../../../opt/homebrew/lib/python3.11/site-packages/pydantic/_internal/_config.py:284: 23 warnings /opt/homebrew/lib/python3.11/site-packages/pydantic/_internal/_config.py:284: PydanticDeprecatedSince20: Support for class-based `config` is deprecated, use ConfigDict instead. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ warnings.warn(DEPRECATION_MESSAGE, DeprecationWarning) -../proxy/_types.py:218 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:218: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:219 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:219: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:305 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:305: PydanticDeprecatedSince20: `pydantic.config.Extra` is deprecated, use literal values instead (e.g. `extra='allow'`). Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:306 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:306: PydanticDeprecatedSince20: `pydantic.config.Extra` is deprecated, use literal values instead (e.g. `extra='allow'`). Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ extra = Extra.allow # Allow extra fields -../proxy/_types.py:308 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:308: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:309 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:309: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:337 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:337: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:338 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:338: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:384 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:384: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:385 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:385: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:450 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:450: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:454 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:454: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:462 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:462: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:466 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:466: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:502 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:502: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:509 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:509: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:536 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:536: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:546 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:546: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:823 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:823: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:840 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:840: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:850 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:850: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:867 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:867: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:869 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:869: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:886 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:886: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) ../../../../../../opt/homebrew/lib/python3.11/site-packages/pkg_resources/__init__.py:121 @@ -126,30 +111,33 @@ final cost: 2.55e-05; prompt_tokens_cost_usd_dollar: 2.55e-05; completion_tokens Implementing implicit namespace packages (as specified in PEP 420) is preferred to `pkg_resources.declare_namespace`. See https://setuptools.pypa.io/en/latest/references/keywords.html#keyword-namespace-packages declare_namespace(pkg) -test_custom_logger.py::test_redis_cache_completion_stream - /opt/homebrew/lib/python3.11/site-packages/_pytest/unraisableexception.py:78: PytestUnraisableExceptionWarning: Exception ignored in: +test_image_generation.py::test_aimage_generation_bedrock_with_optional_params + /opt/homebrew/lib/python3.11/site-packages/_pytest/threadexception.py:73: PytestUnhandledThreadExceptionWarning: Exception in thread Thread-1 (success_handler) Traceback (most recent call last): - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/streams.py", line 395, in __del__ - self.close() - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/streams.py", line 343, in close - return self._transport.close() - ^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/sslproto.py", line 112, in close - self._ssl_protocol._start_shutdown() - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/sslproto.py", line 620, in _start_shutdown - self._shutdown_timeout_handle = self._loop.call_later( - ^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/base_events.py", line 727, in call_later - timer = self.call_at(self.time() + delay, callback, *args, - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/base_events.py", line 740, in call_at - self._check_closed() - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/base_events.py", line 519, in _check_closed - raise RuntimeError('Event loop is closed') - RuntimeError: Event loop is closed + File "/Users/krrishdholakia/Documents/litellm/litellm/utils.py", line 1412, in _success_handler_helper_fn + litellm.completion_cost( + File "/Users/krrishdholakia/Documents/litellm/litellm/utils.py", line 4442, in completion_cost + raise e + File "/Users/krrishdholakia/Documents/litellm/litellm/utils.py", line 4405, in completion_cost + raise Exception( + Exception: Model=1024-x-1024/stability.stable-diffusion-xl-v1 not found in completion cost model map - warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) + During handling of the above exception, another exception occurred: + + Traceback (most recent call last): + File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/threading.py", line 1045, in _bootstrap_inner + self.run() + File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/threading.py", line 982, in run + self._target(*self._args, **self._kwargs) + File "/Users/krrishdholakia/Documents/litellm/litellm/utils.py", line 1465, in success_handler + start_time, end_time, result = self._success_handler_helper_fn( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/krrishdholakia/Documents/litellm/litellm/utils.py", line 1459, in _success_handler_helper_fn + raise Exception(f"[Non-Blocking] LiteLLM.Success_Call Error: {str(e)}") + Exception: [Non-Blocking] LiteLLM.Success_Call Error: Model=1024-x-1024/stability.stable-diffusion-xl-v1 not found in completion cost model map + + warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html -======================== 1 passed, 56 warnings in 2.43s ======================== +======================== 1 passed, 61 warnings in 3.00s ======================== From 097714e02f3b835eeb61810df52004b568b3d9d6 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 17:31:34 -0700 Subject: [PATCH 048/543] fix(anthropic.py): handle whitespace characters for anthropic calls --- litellm/llms/anthropic.py | 11 +++++---- litellm/tests/test_optional_params.py | 16 ++++++++++++- litellm/utils.py | 33 ++++----------------------- 3 files changed, 26 insertions(+), 34 deletions(-) diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index 377235dee..3c130aafc 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -101,13 +101,13 @@ class AnthropicConfig: optional_params["max_tokens"] = value if param == "tools": optional_params["tools"] = value - if param == "stream": + if param == "stream" and value == True: optional_params["stream"] = value if param == "stop": if isinstance(value, str): if ( value == "\n" - ): # anthropic doesn't allow whitespace characters as stop-sequences + ) and litellm.drop_params == True: # anthropic doesn't allow whitespace characters as stop-sequences continue value = [value] elif isinstance(value, list): @@ -115,10 +115,13 @@ class AnthropicConfig: for v in value: if ( v == "\n" - ): # anthropic doesn't allow whitespace characters as stop-sequences + ) and litellm.drop_params == True: # anthropic doesn't allow whitespace characters as stop-sequences continue new_v.append(v) - value = new_v + if len(new_v) > 0: + value = new_v + else: + continue optional_params["stop_sequences"] = value if param == "temperature": optional_params["temperature"] = value diff --git a/litellm/tests/test_optional_params.py b/litellm/tests/test_optional_params.py index 4fa8df3b6..c6e0d7a5b 100644 --- a/litellm/tests/test_optional_params.py +++ b/litellm/tests/test_optional_params.py @@ -5,13 +5,27 @@ import pytest sys.path.insert(0, os.path.abspath("../..")) import litellm -from litellm.utils import get_optional_params_embeddings +from litellm.utils import get_optional_params_embeddings, get_optional_params ## get_optional_params_embeddings ### Models: OpenAI, Azure, Bedrock ### Scenarios: w/ optional params + litellm.drop_params = True +@pytest.mark.parametrize( + "stop_sequence, expected_count", [("\n", 0), (["\n"], 0), (["finish_reason"], 1)] +) +def test_anthropic_optional_params(stop_sequence, expected_count): + """ + Test if whitespace character optional param is dropped by anthropic + """ + litellm.drop_params = True + optional_params = get_optional_params( + model="claude-3", custom_llm_provider="anthropic", stop=stop_sequence + ) + assert len(optional_params) == expected_count + + def test_bedrock_optional_params_embeddings(): litellm.drop_params = True optional_params = get_optional_params_embeddings( diff --git a/litellm/utils.py b/litellm/utils.py index 63684766b..dceb280c9 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5006,26 +5006,9 @@ def get_optional_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) - # handle anthropic params - if stream: - optional_params["stream"] = stream - if stop is not None: - if type(stop) == str: - stop = [stop] # openai can accept str/list for stop - optional_params["stop_sequences"] = stop - if temperature is not None: - optional_params["temperature"] = temperature - if top_p is not None: - optional_params["top_p"] = top_p - if max_tokens is not None: - if (model == "claude-2") or (model == "claude-instant-1"): - # these models use antropic_text.py which only accepts max_tokens_to_sample - optional_params["max_tokens_to_sample"] = max_tokens - else: - optional_params["max_tokens"] = max_tokens - optional_params["max_tokens"] = max_tokens - if tools is not None: - optional_params["tools"] = tools + optional_params = litellm.AnthropicConfig().map_openai_params( + non_default_params=non_default_params, optional_params=optional_params + ) elif custom_llm_provider == "cohere": ## check if unsupported param passed in supported_params = get_supported_openai_params( @@ -5929,15 +5912,7 @@ def get_supported_openai_params(model: str, custom_llm_provider: str): elif custom_llm_provider == "ollama_chat": return litellm.OllamaChatConfig().get_supported_openai_params() elif custom_llm_provider == "anthropic": - return [ - "stream", - "stop", - "temperature", - "top_p", - "max_tokens", - "tools", - "tool_choice", - ] + return litellm.AnthropicConfig().get_supported_openai_params() elif custom_llm_provider == "groq": return [ "temperature", From 4ce4927c0cf9af8d0c4b6319b155e244572db4e7 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Fri, 3 May 2024 17:56:39 -0700 Subject: [PATCH 049/543] Add test_engines_model_chat_completions --- litellm/tests/test_proxy_server.py | 35 +++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index 43a070556..c1965dc2a 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -160,7 +160,40 @@ def test_chat_completion(mock_acompletion, client_no_auth): pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") -# Run the test +@mock_patch_acompletion() +def test_engines_model_chat_completions(mock_acompletion, client_no_auth): + global headers + try: + # Your test data + test_data = { + "model": "gpt-3.5-turbo", + "messages": [ + {"role": "user", "content": "hi"}, + ], + "max_tokens": 10, + } + + print("testing proxy server with chat completions") + response = client_no_auth.post("/engines/gpt-3.5-turbo/chat/completions", json=test_data) + mock_acompletion.assert_called_once_with( + model="gpt-3.5-turbo", + messages=[ + {"role": "user", "content": "hi"}, + ], + max_tokens=10, + litellm_call_id=mock.ANY, + litellm_logging_obj=mock.ANY, + request_timeout=mock.ANY, + specific_deployment=True, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) + print(f"response - {response.text}") + assert response.status_code == 200 + result = response.json() + print(f"Received response: {result}") + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") @mock_patch_acompletion() From eb433bde863c750affcae4ac38517ba756ed9290 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Fri, 3 May 2024 17:57:30 -0700 Subject: [PATCH 050/543] Add route: "/engines/{model:path}/chat/completions" Without this, it results in: ```pytb Traceback (most recent call last): File "/Users/abramowi/Code/OpenSource/litellm/litellm/proxy/proxy_server.py", line 3836, in completion raise HTTPException( fastapi.exceptions.HTTPException: 400: {'error': 'completion: Invalid model name passed in model=gpt-3.5-turbo/chat'} ``` --- litellm/proxy/proxy_server.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 55202fd16..bbeacded0 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3456,6 +3456,11 @@ def model_list( dependencies=[Depends(user_api_key_auth)], tags=["chat/completions"], ) +@router.post( + "/engines/{model:path}/chat/completions", + dependencies=[Depends(user_api_key_auth)], + tags=["chat/completions"], +) @router.post( "/openai/deployments/{model:path}/chat/completions", dependencies=[Depends(user_api_key_auth)], From fc0ced48c1a3c695186b1d433bb2d99234d6c67e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebasti=C3=A1n=20Est=C3=A9vez?= Date: Fri, 3 May 2024 23:38:54 -0400 Subject: [PATCH 051/543] add_function_to_prompt bug fix This blows up when there's no "functions" in the dictionary even when tools is present because the inner function executes regardless (does not short circuit). --- litellm/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/utils.py b/litellm/utils.py index ac8ec35d4..75d6f8b7f 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4956,7 +4956,7 @@ def get_optional_params( litellm.add_function_to_prompt ): # if user opts to add it to prompt instead optional_params["functions_unsupported_model"] = non_default_params.pop( - "tools", non_default_params.pop("functions") + "tools", non_default_params.pop("functions", None) ) else: raise UnsupportedParamsError( From 01a11ccced84498136066678f3d9412882393022 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 20:40:58 -0700 Subject: [PATCH 052/543] ui - new build --- litellm/proxy/_experimental/out/404.html | 6 +--- .../static/chunks/386-d811195b597a2122.js | 32 ------------------- .../static/chunks/761-05f8a8451296476c.js | 32 +++++++++++++++++++ .../chunks/app/layout-bdfb585eb82bdab5.js | 1 - .../chunks/app/layout-bf3537d6924e801d.js | 1 + .../chunks/app/page-e0ee34389254cdf2.js | 1 - .../chunks/app/page-f538305fa38a6c75.js | 2 +- ...915716.js => main-app-9b4fb13a7db53edf.js} | 2 +- ...8bd8abb.js => webpack-202e312607f242a1.js} | 2 +- .../out/_next/static/css/00c2ddbcd01819c0.css | 5 +++ .../out/_next/static/css/9f51f0573c6b0365.css | 3 -- .../_buildManifest.js | 0 .../_ssgManifest.js | 0 litellm/proxy/_experimental/out/index.html | 6 +--- litellm/proxy/_experimental/out/index.txt | 11 ++----- ui/litellm-dashboard/out/404.html | 6 +--- .../c5rha8cqAah-saaczjn02/_buildManifest.js | 1 - .../c5rha8cqAah-saaczjn02/_ssgManifest.js | 1 - .../static/chunks/761-05f8a8451296476c.js | 32 +++++++++++++++++++ .../chunks/app/layout-bf3537d6924e801d.js | 1 + .../chunks/app/page-f538305fa38a6c75.js | 2 +- .../chunks/main-app-9b4fb13a7db53edf.js | 1 + .../static/chunks/webpack-202e312607f242a1.js | 1 + .../out/_next/static/css/00c2ddbcd01819c0.css | 5 +++ .../hizRXvPxuztz_ZrAcFMhz}/_buildManifest.js | 0 .../hizRXvPxuztz_ZrAcFMhz}/_ssgManifest.js | 0 ui/litellm-dashboard/out/index.html | 6 +--- ui/litellm-dashboard/out/index.txt | 11 ++----- 28 files changed, 90 insertions(+), 81 deletions(-) delete mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/386-d811195b597a2122.js create mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/761-05f8a8451296476c.js delete mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/app/layout-bdfb585eb82bdab5.js create mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/app/layout-bf3537d6924e801d.js delete mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/app/page-e0ee34389254cdf2.js rename ui/litellm-dashboard/out/_next/static/chunks/app/page-5a4a198eefedc775.js => litellm/proxy/_experimental/out/_next/static/chunks/app/page-f538305fa38a6c75.js (73%) rename litellm/proxy/_experimental/out/_next/static/chunks/{main-app-096338c8e1915716.js => main-app-9b4fb13a7db53edf.js} (54%) rename litellm/proxy/_experimental/out/_next/static/chunks/{webpack-65a932b4e8bd8abb.js => webpack-202e312607f242a1.js} (98%) create mode 100644 litellm/proxy/_experimental/out/_next/static/css/00c2ddbcd01819c0.css delete mode 100644 litellm/proxy/_experimental/out/_next/static/css/9f51f0573c6b0365.css rename litellm/proxy/_experimental/out/_next/static/{c5rha8cqAah-saaczjn02 => hizRXvPxuztz_ZrAcFMhz}/_buildManifest.js (100%) rename litellm/proxy/_experimental/out/_next/static/{c5rha8cqAah-saaczjn02 => hizRXvPxuztz_ZrAcFMhz}/_ssgManifest.js (100%) delete mode 100644 ui/litellm-dashboard/out/_next/static/c5rha8cqAah-saaczjn02/_buildManifest.js delete mode 100644 ui/litellm-dashboard/out/_next/static/c5rha8cqAah-saaczjn02/_ssgManifest.js create mode 100644 ui/litellm-dashboard/out/_next/static/chunks/761-05f8a8451296476c.js create mode 100644 ui/litellm-dashboard/out/_next/static/chunks/app/layout-bf3537d6924e801d.js rename litellm/proxy/_experimental/out/_next/static/chunks/app/page-5a4a198eefedc775.js => ui/litellm-dashboard/out/_next/static/chunks/app/page-f538305fa38a6c75.js (73%) create mode 100644 ui/litellm-dashboard/out/_next/static/chunks/main-app-9b4fb13a7db53edf.js create mode 100644 ui/litellm-dashboard/out/_next/static/chunks/webpack-202e312607f242a1.js create mode 100644 ui/litellm-dashboard/out/_next/static/css/00c2ddbcd01819c0.css rename {litellm/proxy/_experimental/out/_next/static/dWGL92c5LzTMn7XX6utn2 => ui/litellm-dashboard/out/_next/static/hizRXvPxuztz_ZrAcFMhz}/_buildManifest.js (100%) rename {litellm/proxy/_experimental/out/_next/static/dWGL92c5LzTMn7XX6utn2 => ui/litellm-dashboard/out/_next/static/hizRXvPxuztz_ZrAcFMhz}/_ssgManifest.js (100%) diff --git a/litellm/proxy/_experimental/out/404.html b/litellm/proxy/_experimental/out/404.html index ae30e10a8..e1cfa170d 100644 --- a/litellm/proxy/_experimental/out/404.html +++ b/litellm/proxy/_experimental/out/404.html @@ -1,5 +1 @@ -<<<<<<< HEAD -404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

-======= -404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

->>>>>>> 73a7b4f4 (refactor(main.py): trigger new build) +404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

\ No newline at end of file diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/386-d811195b597a2122.js b/litellm/proxy/_experimental/out/_next/static/chunks/386-d811195b597a2122.js deleted file mode 100644 index c589a3c73..000000000 --- a/litellm/proxy/_experimental/out/_next/static/chunks/386-d811195b597a2122.js +++ /dev/null @@ -1,32 +0,0 @@ -"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[386],{12215:function(e,t,n){n.d(t,{iN:function(){return h},R_:function(){return d},EV:function(){return g},ez:function(){return p}});var r=n(41785),o=n(76991),a=[{index:7,opacity:.15},{index:6,opacity:.25},{index:5,opacity:.3},{index:5,opacity:.45},{index:5,opacity:.65},{index:5,opacity:.85},{index:4,opacity:.9},{index:3,opacity:.95},{index:2,opacity:.97},{index:1,opacity:.98}];function i(e){var t=e.r,n=e.g,o=e.b,a=(0,r.py)(t,n,o);return{h:360*a.h,s:a.s,v:a.v}}function l(e){var t=e.r,n=e.g,o=e.b;return"#".concat((0,r.vq)(t,n,o,!1))}function s(e,t,n){var r;return(r=Math.round(e.h)>=60&&240>=Math.round(e.h)?n?Math.round(e.h)-2*t:Math.round(e.h)+2*t:n?Math.round(e.h)+2*t:Math.round(e.h)-2*t)<0?r+=360:r>=360&&(r-=360),r}function c(e,t,n){var r;return 0===e.h&&0===e.s?e.s:((r=n?e.s-.16*t:4===t?e.s+.16:e.s+.05*t)>1&&(r=1),n&&5===t&&r>.1&&(r=.1),r<.06&&(r=.06),Number(r.toFixed(2)))}function u(e,t,n){var r;return(r=n?e.v+.05*t:e.v-.15*t)>1&&(r=1),Number(r.toFixed(2))}function d(e){for(var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=[],r=(0,o.uA)(e),d=5;d>0;d-=1){var p=i(r),f=l((0,o.uA)({h:s(p,d,!0),s:c(p,d,!0),v:u(p,d,!0)}));n.push(f)}n.push(l(r));for(var m=1;m<=4;m+=1){var g=i(r),h=l((0,o.uA)({h:s(g,m),s:c(g,m),v:u(g,m)}));n.push(h)}return"dark"===t.theme?a.map(function(e){var r,a,i,s=e.index,c=e.opacity;return l((r=(0,o.uA)(t.backgroundColor||"#141414"),a=(0,o.uA)(n[s]),i=100*c/100,{r:(a.r-r.r)*i+r.r,g:(a.g-r.g)*i+r.g,b:(a.b-r.b)*i+r.b}))}):n}var p={red:"#F5222D",volcano:"#FA541C",orange:"#FA8C16",gold:"#FAAD14",yellow:"#FADB14",lime:"#A0D911",green:"#52C41A",cyan:"#13C2C2",blue:"#1677FF",geekblue:"#2F54EB",purple:"#722ED1",magenta:"#EB2F96",grey:"#666666"},f={},m={};Object.keys(p).forEach(function(e){f[e]=d(p[e]),f[e].primary=f[e][5],m[e]=d(p[e],{theme:"dark",backgroundColor:"#141414"}),m[e].primary=m[e][5]}),f.red,f.volcano;var g=f.gold;f.orange,f.yellow,f.lime,f.green,f.cyan;var h=f.blue;f.geekblue,f.purple,f.magenta,f.grey,f.grey},8985:function(e,t,n){n.d(t,{E4:function(){return ej},jG:function(){return T},ks:function(){return Z},bf:function(){return F},CI:function(){return eD},fp:function(){return X},xy:function(){return eL}});var r,o,a=n(50833),i=n(80406),l=n(63787),s=n(5239),c=function(e){for(var t,n=0,r=0,o=e.length;o>=4;++r,o-=4)t=(65535&(t=255&e.charCodeAt(r)|(255&e.charCodeAt(++r))<<8|(255&e.charCodeAt(++r))<<16|(255&e.charCodeAt(++r))<<24))*1540483477+((t>>>16)*59797<<16),t^=t>>>24,n=(65535&t)*1540483477+((t>>>16)*59797<<16)^(65535&n)*1540483477+((n>>>16)*59797<<16);switch(o){case 3:n^=(255&e.charCodeAt(r+2))<<16;case 2:n^=(255&e.charCodeAt(r+1))<<8;case 1:n^=255&e.charCodeAt(r),n=(65535&n)*1540483477+((n>>>16)*59797<<16)}return n^=n>>>13,(((n=(65535&n)*1540483477+((n>>>16)*59797<<16))^n>>>15)>>>0).toString(36)},u=n(24050),d=n(64090),p=n.t(d,2);n(61475),n(92536);var f=n(47365),m=n(65127);function g(e){return e.join("%")}var h=function(){function e(t){(0,f.Z)(this,e),(0,a.Z)(this,"instanceId",void 0),(0,a.Z)(this,"cache",new Map),this.instanceId=t}return(0,m.Z)(e,[{key:"get",value:function(e){return this.opGet(g(e))}},{key:"opGet",value:function(e){return this.cache.get(e)||null}},{key:"update",value:function(e,t){return this.opUpdate(g(e),t)}},{key:"opUpdate",value:function(e,t){var n=t(this.cache.get(e));null===n?this.cache.delete(e):this.cache.set(e,n)}}]),e}(),b="data-token-hash",v="data-css-hash",y="__cssinjs_instance__",E=d.createContext({hashPriority:"low",cache:function(){var e=Math.random().toString(12).slice(2);if("undefined"!=typeof document&&document.head&&document.body){var t=document.body.querySelectorAll("style[".concat(v,"]"))||[],n=document.head.firstChild;Array.from(t).forEach(function(t){t[y]=t[y]||e,t[y]===e&&document.head.insertBefore(t,n)});var r={};Array.from(document.querySelectorAll("style[".concat(v,"]"))).forEach(function(t){var n,o=t.getAttribute(v);r[o]?t[y]===e&&(null===(n=t.parentNode)||void 0===n||n.removeChild(t)):r[o]=!0})}return new h(e)}(),defaultCache:!0}),S=n(6976),w=n(22127),x=function(){function e(){(0,f.Z)(this,e),(0,a.Z)(this,"cache",void 0),(0,a.Z)(this,"keys",void 0),(0,a.Z)(this,"cacheCallTimes",void 0),this.cache=new Map,this.keys=[],this.cacheCallTimes=0}return(0,m.Z)(e,[{key:"size",value:function(){return this.keys.length}},{key:"internalGet",value:function(e){var t,n,r=arguments.length>1&&void 0!==arguments[1]&&arguments[1],o={map:this.cache};return e.forEach(function(e){if(o){var t;o=null===(t=o)||void 0===t||null===(t=t.map)||void 0===t?void 0:t.get(e)}else o=void 0}),null!==(t=o)&&void 0!==t&&t.value&&r&&(o.value[1]=this.cacheCallTimes++),null===(n=o)||void 0===n?void 0:n.value}},{key:"get",value:function(e){var t;return null===(t=this.internalGet(e,!0))||void 0===t?void 0:t[0]}},{key:"has",value:function(e){return!!this.internalGet(e)}},{key:"set",value:function(t,n){var r=this;if(!this.has(t)){if(this.size()+1>e.MAX_CACHE_SIZE+e.MAX_CACHE_OFFSET){var o=this.keys.reduce(function(e,t){var n=(0,i.Z)(e,2)[1];return r.internalGet(t)[1]0,"[Ant Design CSS-in-JS] Theme should have at least one derivative function."),k+=1}return(0,m.Z)(e,[{key:"getDerivativeToken",value:function(e){return this.derivatives.reduce(function(t,n){return n(e,t)},void 0)}}]),e}(),A=new x;function T(e){var t=Array.isArray(e)?e:[e];return A.has(t)||A.set(t,new C(t)),A.get(t)}var I=new WeakMap,N={},R=new WeakMap;function _(e){var t=R.get(e)||"";return t||(Object.keys(e).forEach(function(n){var r=e[n];t+=n,r instanceof C?t+=r.id:r&&"object"===(0,S.Z)(r)?t+=_(r):t+=r}),R.set(e,t)),t}function P(e,t){return c("".concat(t,"_").concat(_(e)))}var L="random-".concat(Date.now(),"-").concat(Math.random()).replace(/\./g,""),M="_bAmBoO_",D=void 0,j=(0,w.Z)();function F(e){return"number"==typeof e?"".concat(e,"px"):e}function B(e,t,n){var r,o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},i=arguments.length>4&&void 0!==arguments[4]&&arguments[4];if(i)return e;var l=(0,s.Z)((0,s.Z)({},o),{},(r={},(0,a.Z)(r,b,t),(0,a.Z)(r,v,n),r)),c=Object.keys(l).map(function(e){var t=l[e];return t?"".concat(e,'="').concat(t,'"'):null}).filter(function(e){return e}).join(" ");return"")}var Z=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return"--".concat(t?"".concat(t,"-"):"").concat(e).replace(/([a-z0-9])([A-Z])/g,"$1-$2").replace(/([A-Z]+)([A-Z][a-z0-9]+)/g,"$1-$2").replace(/([a-z])([A-Z0-9])/g,"$1-$2").toLowerCase()},U=function(e,t,n){var r,o={},a={};return Object.entries(e).forEach(function(e){var t=(0,i.Z)(e,2),r=t[0],l=t[1];if(null!=n&&null!==(s=n.preserve)&&void 0!==s&&s[r])a[r]=l;else if(("string"==typeof l||"number"==typeof l)&&!(null!=n&&null!==(c=n.ignore)&&void 0!==c&&c[r])){var s,c,u,d=Z(r,null==n?void 0:n.prefix);o[d]="number"!=typeof l||null!=n&&null!==(u=n.unitless)&&void 0!==u&&u[r]?String(l):"".concat(l,"px"),a[r]="var(".concat(d,")")}}),[a,(r={scope:null==n?void 0:n.scope},Object.keys(o).length?".".concat(t).concat(null!=r&&r.scope?".".concat(r.scope):"","{").concat(Object.entries(o).map(function(e){var t=(0,i.Z)(e,2),n=t[0],r=t[1];return"".concat(n,":").concat(r,";")}).join(""),"}"):"")]},z=n(24800),H=(0,s.Z)({},p).useInsertionEffect,G=H?function(e,t,n){return H(function(){return e(),t()},n)}:function(e,t,n){d.useMemo(e,n),(0,z.Z)(function(){return t(!0)},n)},$=void 0!==(0,s.Z)({},p).useInsertionEffect?function(e){var t=[],n=!1;return d.useEffect(function(){return n=!1,function(){n=!0,t.length&&t.forEach(function(e){return e()})}},e),function(e){n||t.push(e)}}:function(){return function(e){e()}};function W(e,t,n,r,o){var a=d.useContext(E).cache,s=g([e].concat((0,l.Z)(t))),c=$([s]),u=function(e){a.opUpdate(s,function(t){var r=(0,i.Z)(t||[void 0,void 0],2),o=r[0],a=[void 0===o?0:o,r[1]||n()];return e?e(a):a})};d.useMemo(function(){u()},[s]);var p=a.opGet(s)[1];return G(function(){null==o||o(p)},function(e){return u(function(t){var n=(0,i.Z)(t,2),r=n[0],a=n[1];return e&&0===r&&(null==o||o(p)),[r+1,a]}),function(){a.opUpdate(s,function(t){var n=(0,i.Z)(t||[],2),o=n[0],l=void 0===o?0:o,u=n[1];return 0==l-1?(c(function(){(e||!a.opGet(s))&&(null==r||r(u,!1))}),null):[l-1,u]})}},[s]),p}var V={},q=new Map,Y=function(e,t,n,r){var o=n.getDerivativeToken(e),a=(0,s.Z)((0,s.Z)({},o),t);return r&&(a=r(a)),a},K="token";function X(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=(0,d.useContext)(E),o=r.cache.instanceId,a=r.container,p=n.salt,f=void 0===p?"":p,m=n.override,g=void 0===m?V:m,h=n.formatToken,S=n.getComputedToken,w=n.cssVar,x=function(e,t){for(var n=I,r=0;r=(q.get(e)||0)}),n.length-r.length>0&&r.forEach(function(e){"undefined"!=typeof document&&document.querySelectorAll("style[".concat(b,'="').concat(e,'"]')).forEach(function(e){if(e[y]===o){var t;null===(t=e.parentNode)||void 0===t||t.removeChild(e)}}),q.delete(e)})},function(e){var t=(0,i.Z)(e,4),n=t[0],r=t[3];if(w&&r){var l=(0,u.hq)(r,c("css-variables-".concat(n._themeKey)),{mark:v,prepend:"queue",attachTo:a,priority:-999});l[y]=o,l.setAttribute(b,n._themeKey)}})}var Q=n(14749),J={animationIterationCount:1,borderImageOutset:1,borderImageSlice:1,borderImageWidth:1,boxFlex:1,boxFlexGroup:1,boxOrdinalGroup:1,columnCount:1,columns:1,flex:1,flexGrow:1,flexPositive:1,flexShrink:1,flexNegative:1,flexOrder:1,gridRow:1,gridRowEnd:1,gridRowSpan:1,gridRowStart:1,gridColumn:1,gridColumnEnd:1,gridColumnSpan:1,gridColumnStart:1,msGridRow:1,msGridRowSpan:1,msGridColumn:1,msGridColumnSpan:1,fontWeight:1,lineHeight:1,opacity:1,order:1,orphans:1,tabSize:1,widows:1,zIndex:1,zoom:1,WebkitLineClamp:1,fillOpacity:1,floodOpacity:1,stopOpacity:1,strokeDasharray:1,strokeDashoffset:1,strokeMiterlimit:1,strokeOpacity:1,strokeWidth:1},ee="comm",et="rule",en="decl",er=Math.abs,eo=String.fromCharCode;function ea(e,t,n){return e.replace(t,n)}function ei(e,t){return 0|e.charCodeAt(t)}function el(e,t,n){return e.slice(t,n)}function es(e){return e.length}function ec(e,t){return t.push(e),e}function eu(e,t){for(var n="",r=0;r0?f[v]+" "+y:ea(y,/&\f/g,f[v])).trim())&&(s[b++]=E);return ev(e,t,n,0===o?et:l,s,c,u,d)}function eO(e,t,n,r,o){return ev(e,t,n,en,el(e,0,r),el(e,r+1,-1),r,o)}var ek="data-ant-cssinjs-cache-path",eC="_FILE_STYLE__",eA=!0,eT="_multi_value_";function eI(e){var t,n,r;return eu((r=function e(t,n,r,o,a,i,l,s,c){for(var u,d,p,f=0,m=0,g=l,h=0,b=0,v=0,y=1,E=1,S=1,w=0,x="",O=a,k=i,C=o,A=x;E;)switch(v=w,w=ey()){case 40:if(108!=v&&58==ei(A,g-1)){-1!=(d=A+=ea(ew(w),"&","&\f"),p=er(f?s[f-1]:0),d.indexOf("&\f",p))&&(S=-1);break}case 34:case 39:case 91:A+=ew(w);break;case 9:case 10:case 13:case 32:A+=function(e){for(;eh=eE();)if(eh<33)ey();else break;return eS(e)>2||eS(eh)>3?"":" "}(v);break;case 92:A+=function(e,t){for(var n;--t&&ey()&&!(eh<48)&&!(eh>102)&&(!(eh>57)||!(eh<65))&&(!(eh>70)||!(eh<97)););return n=eg+(t<6&&32==eE()&&32==ey()),el(eb,e,n)}(eg-1,7);continue;case 47:switch(eE()){case 42:case 47:ec(ev(u=function(e,t){for(;ey();)if(e+eh===57)break;else if(e+eh===84&&47===eE())break;return"/*"+el(eb,t,eg-1)+"*"+eo(47===e?e:ey())}(ey(),eg),n,r,ee,eo(eh),el(u,2,-2),0,c),c);break;default:A+="/"}break;case 123*y:s[f++]=es(A)*S;case 125*y:case 59:case 0:switch(w){case 0:case 125:E=0;case 59+m:-1==S&&(A=ea(A,/\f/g,"")),b>0&&es(A)-g&&ec(b>32?eO(A+";",o,r,g-1,c):eO(ea(A," ","")+";",o,r,g-2,c),c);break;case 59:A+=";";default:if(ec(C=ex(A,n,r,f,m,a,s,x,O=[],k=[],g,i),i),123===w){if(0===m)e(A,n,C,C,O,i,g,s,k);else switch(99===h&&110===ei(A,3)?100:h){case 100:case 108:case 109:case 115:e(t,C,C,o&&ec(ex(t,C,C,0,0,a,s,x,a,O=[],g,k),k),a,k,g,s,o?O:k);break;default:e(A,C,C,C,[""],k,0,s,k)}}}f=m=b=0,y=S=1,x=A="",g=l;break;case 58:g=1+es(A),b=v;default:if(y<1){if(123==w)--y;else if(125==w&&0==y++&&125==(eh=eg>0?ei(eb,--eg):0,ef--,10===eh&&(ef=1,ep--),eh))continue}switch(A+=eo(w),w*y){case 38:S=m>0?1:(A+="\f",-1);break;case 44:s[f++]=(es(A)-1)*S,S=1;break;case 64:45===eE()&&(A+=ew(ey())),h=eE(),m=g=es(x=A+=function(e){for(;!eS(eE());)ey();return el(eb,e,eg)}(eg)),w++;break;case 45:45===v&&2==es(A)&&(y=0)}}return i}("",null,null,null,[""],(n=t=e,ep=ef=1,em=es(eb=n),eg=0,t=[]),0,[0],t),eb="",r),ed).replace(/\{%%%\:[^;];}/g,";")}var eN=function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{root:!0,parentSelectors:[]},o=r.root,a=r.injectHash,c=r.parentSelectors,d=n.hashId,p=n.layer,f=(n.path,n.hashPriority),m=n.transformers,g=void 0===m?[]:m;n.linters;var h="",b={};function v(t){var r=t.getName(d);if(!b[r]){var o=e(t.style,n,{root:!1,parentSelectors:c}),a=(0,i.Z)(o,1)[0];b[r]="@keyframes ".concat(t.getName(d)).concat(a)}}if((function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return t.forEach(function(t){Array.isArray(t)?e(t,n):t&&n.push(t)}),n})(Array.isArray(t)?t:[t]).forEach(function(t){var r="string"!=typeof t||o?t:{};if("string"==typeof r)h+="".concat(r,"\n");else if(r._keyframe)v(r);else{var u=g.reduce(function(e,t){var n;return(null==t||null===(n=t.visit)||void 0===n?void 0:n.call(t,e))||e},r);Object.keys(u).forEach(function(t){var r=u[t];if("object"!==(0,S.Z)(r)||!r||"animationName"===t&&r._keyframe||"object"===(0,S.Z)(r)&&r&&("_skip_check_"in r||eT in r)){function p(e,t){var n=e.replace(/[A-Z]/g,function(e){return"-".concat(e.toLowerCase())}),r=t;J[e]||"number"!=typeof r||0===r||(r="".concat(r,"px")),"animationName"===e&&null!=t&&t._keyframe&&(v(t),r=t.getName(d)),h+="".concat(n,":").concat(r,";")}var m,g=null!==(m=null==r?void 0:r.value)&&void 0!==m?m:r;"object"===(0,S.Z)(r)&&null!=r&&r[eT]&&Array.isArray(g)?g.forEach(function(e){p(t,e)}):p(t,g)}else{var y=!1,E=t.trim(),w=!1;(o||a)&&d?E.startsWith("@")?y=!0:E=function(e,t,n){if(!t)return e;var r=".".concat(t),o="low"===n?":where(".concat(r,")"):r;return e.split(",").map(function(e){var t,n=e.trim().split(/\s+/),r=n[0]||"",a=(null===(t=r.match(/^\w+/))||void 0===t?void 0:t[0])||"";return[r="".concat(a).concat(o).concat(r.slice(a.length))].concat((0,l.Z)(n.slice(1))).join(" ")}).join(",")}(t,d,f):o&&!d&&("&"===E||""===E)&&(E="",w=!0);var x=e(r,n,{root:w,injectHash:y,parentSelectors:[].concat((0,l.Z)(c),[E])}),O=(0,i.Z)(x,2),k=O[0],C=O[1];b=(0,s.Z)((0,s.Z)({},b),C),h+="".concat(E).concat(k)}})}}),o){if(p&&(void 0===D&&(D=function(e,t,n){if((0,w.Z)()){(0,u.hq)(e,L);var r,o,a=document.createElement("div");a.style.position="fixed",a.style.left="0",a.style.top="0",null==t||t(a),document.body.appendChild(a);var i=n?n(a):null===(r=getComputedStyle(a).content)||void 0===r?void 0:r.includes(M);return null===(o=a.parentNode)||void 0===o||o.removeChild(a),(0,u.jL)(L),i}return!1}("@layer ".concat(L," { .").concat(L,' { content: "').concat(M,'"!important; } }'),function(e){e.className=L})),D)){var y=p.split(","),E=y[y.length-1].trim();h="@layer ".concat(E," {").concat(h,"}"),y.length>1&&(h="@layer ".concat(p,"{%%%:%}").concat(h))}}else h="{".concat(h,"}");return[h,b]};function eR(e,t){return c("".concat(e.join("%")).concat(t))}function e_(){return null}var eP="style";function eL(e,t){var n=e.token,o=e.path,s=e.hashId,c=e.layer,p=e.nonce,f=e.clientOnly,m=e.order,g=void 0===m?0:m,h=d.useContext(E),S=h.autoClear,x=(h.mock,h.defaultCache),O=h.hashPriority,k=h.container,C=h.ssrInline,A=h.transformers,T=h.linters,I=h.cache,N=n._tokenKey,R=[N].concat((0,l.Z)(o)),_=W(eP,R,function(){var e=R.join("|");if(!function(){if(!r&&(r={},(0,w.Z)())){var e,t=document.createElement("div");t.className=ek,t.style.position="fixed",t.style.visibility="hidden",t.style.top="-9999px",document.body.appendChild(t);var n=getComputedStyle(t).content||"";(n=n.replace(/^"/,"").replace(/"$/,"")).split(";").forEach(function(e){var t=e.split(":"),n=(0,i.Z)(t,2),o=n[0],a=n[1];r[o]=a});var o=document.querySelector("style[".concat(ek,"]"));o&&(eA=!1,null===(e=o.parentNode)||void 0===e||e.removeChild(o)),document.body.removeChild(t)}}(),r[e]){var n=function(e){var t=r[e],n=null;if(t&&(0,w.Z)()){if(eA)n=eC;else{var o=document.querySelector("style[".concat(v,'="').concat(r[e],'"]'));o?n=o.innerHTML:delete r[e]}}return[n,t]}(e),a=(0,i.Z)(n,2),l=a[0],u=a[1];if(l)return[l,N,u,{},f,g]}var d=eN(t(),{hashId:s,hashPriority:O,layer:c,path:o.join("-"),transformers:A,linters:T}),p=(0,i.Z)(d,2),m=p[0],h=p[1],b=eI(m),y=eR(R,b);return[b,N,y,h,f,g]},function(e,t){var n=(0,i.Z)(e,3)[2];(t||S)&&j&&(0,u.jL)(n,{mark:v})},function(e){var t=(0,i.Z)(e,4),n=t[0],r=(t[1],t[2]),o=t[3];if(j&&n!==eC){var a={mark:v,prepend:"queue",attachTo:k,priority:g},l="function"==typeof p?p():p;l&&(a.csp={nonce:l});var s=(0,u.hq)(n,r,a);s[y]=I.instanceId,s.setAttribute(b,N),Object.keys(o).forEach(function(e){(0,u.hq)(eI(o[e]),"_effect-".concat(e),a)})}}),P=(0,i.Z)(_,3),L=P[0],M=P[1],D=P[2];return function(e){var t,n;return t=C&&!j&&x?d.createElement("style",(0,Q.Z)({},(n={},(0,a.Z)(n,b,M),(0,a.Z)(n,v,D),n),{dangerouslySetInnerHTML:{__html:L}})):d.createElement(e_,null),d.createElement(d.Fragment,null,t,e)}}var eM="cssVar",eD=function(e,t){var n=e.key,r=e.prefix,o=e.unitless,a=e.ignore,s=e.token,c=e.scope,p=void 0===c?"":c,f=(0,d.useContext)(E),m=f.cache.instanceId,g=f.container,h=s._tokenKey,S=[].concat((0,l.Z)(e.path),[n,p,h]);return W(eM,S,function(){var e=U(t(),n,{prefix:r,unitless:o,ignore:a,scope:p}),l=(0,i.Z)(e,2),s=l[0],c=l[1],u=eR(S,c);return[s,c,u,n]},function(e){var t=(0,i.Z)(e,3)[2];j&&(0,u.jL)(t,{mark:v})},function(e){var t=(0,i.Z)(e,3),r=t[1],o=t[2];if(r){var a=(0,u.hq)(r,o,{mark:v,prepend:"queue",attachTo:g,priority:-999});a[y]=m,a.setAttribute(b,n)}})};o={},(0,a.Z)(o,eP,function(e,t,n){var r=(0,i.Z)(e,6),o=r[0],a=r[1],l=r[2],s=r[3],c=r[4],u=r[5],d=(n||{}).plain;if(c)return null;var p=o,f={"data-rc-order":"prependQueue","data-rc-priority":"".concat(u)};return p=B(o,a,l,f,d),s&&Object.keys(s).forEach(function(e){if(!t[e]){t[e]=!0;var n=eI(s[e]);p+=B(n,a,"_effect-".concat(e),f,d)}}),[u,l,p]}),(0,a.Z)(o,K,function(e,t,n){var r=(0,i.Z)(e,5),o=r[2],a=r[3],l=r[4],s=(n||{}).plain;if(!a)return null;var c=o._tokenKey,u=B(a,l,c,{"data-rc-order":"prependQueue","data-rc-priority":"".concat(-999)},s);return[-999,c,u]}),(0,a.Z)(o,eM,function(e,t,n){var r=(0,i.Z)(e,4),o=r[1],a=r[2],l=r[3],s=(n||{}).plain;if(!o)return null;var c=B(o,l,a,{"data-rc-order":"prependQueue","data-rc-priority":"".concat(-999)},s);return[-999,a,c]});var ej=function(){function e(t,n){(0,f.Z)(this,e),(0,a.Z)(this,"name",void 0),(0,a.Z)(this,"style",void 0),(0,a.Z)(this,"_keyframe",!0),this.name=t,this.style=n}return(0,m.Z)(e,[{key:"getName",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return e?"".concat(e,"-").concat(this.name):this.name}}]),e}();function eF(e){return e.notSplit=!0,e}eF(["borderTop","borderBottom"]),eF(["borderTop"]),eF(["borderBottom"]),eF(["borderLeft","borderRight"]),eF(["borderLeft"]),eF(["borderRight"])},60688:function(e,t,n){n.d(t,{Z:function(){return T}});var r=n(14749),o=n(80406),a=n(50833),i=n(6787),l=n(64090),s=n(16480),c=n.n(s),u=n(12215),d=n(67689),p=n(5239),f=n(6976),m=n(24050),g=n(74687),h=n(53850);function b(e){return"object"===(0,f.Z)(e)&&"string"==typeof e.name&&"string"==typeof e.theme&&("object"===(0,f.Z)(e.icon)||"function"==typeof e.icon)}function v(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return Object.keys(e).reduce(function(t,n){var r=e[n];return"class"===n?(t.className=r,delete t.class):(delete t[n],t[n.replace(/-(.)/g,function(e,t){return t.toUpperCase()})]=r),t},{})}function y(e){return(0,u.R_)(e)[0]}function E(e){return e?Array.isArray(e)?e:[e]:[]}var S=function(e){var t=(0,l.useContext)(d.Z),n=t.csp,r=t.prefixCls,o="\n.anticon {\n display: inline-block;\n color: inherit;\n font-style: normal;\n line-height: 0;\n text-align: center;\n text-transform: none;\n vertical-align: -0.125em;\n text-rendering: optimizeLegibility;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n\n.anticon > * {\n line-height: 1;\n}\n\n.anticon svg {\n display: inline-block;\n}\n\n.anticon::before {\n display: none;\n}\n\n.anticon .anticon-icon {\n display: block;\n}\n\n.anticon[tabindex] {\n cursor: pointer;\n}\n\n.anticon-spin::before,\n.anticon-spin {\n display: inline-block;\n -webkit-animation: loadingCircle 1s infinite linear;\n animation: loadingCircle 1s infinite linear;\n}\n\n@-webkit-keyframes loadingCircle {\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n\n@keyframes loadingCircle {\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n";r&&(o=o.replace(/anticon/g,r)),(0,l.useEffect)(function(){var t=e.current,r=(0,g.A)(t);(0,m.hq)(o,"@ant-design-icons",{prepend:!0,csp:n,attachTo:r})},[])},w=["icon","className","onClick","style","primaryColor","secondaryColor"],x={primaryColor:"#333",secondaryColor:"#E6E6E6",calculated:!1},O=function(e){var t,n,r=e.icon,o=e.className,a=e.onClick,s=e.style,c=e.primaryColor,u=e.secondaryColor,d=(0,i.Z)(e,w),f=l.useRef(),m=x;if(c&&(m={primaryColor:c,secondaryColor:u||y(c)}),S(f),t=b(r),n="icon should be icon definiton, but got ".concat(r),(0,h.ZP)(t,"[@ant-design/icons] ".concat(n)),!b(r))return null;var g=r;return g&&"function"==typeof g.icon&&(g=(0,p.Z)((0,p.Z)({},g),{},{icon:g.icon(m.primaryColor,m.secondaryColor)})),function e(t,n,r){return r?l.createElement(t.tag,(0,p.Z)((0,p.Z)({key:n},v(t.attrs)),r),(t.children||[]).map(function(r,o){return e(r,"".concat(n,"-").concat(t.tag,"-").concat(o))})):l.createElement(t.tag,(0,p.Z)({key:n},v(t.attrs)),(t.children||[]).map(function(r,o){return e(r,"".concat(n,"-").concat(t.tag,"-").concat(o))}))}(g.icon,"svg-".concat(g.name),(0,p.Z)((0,p.Z)({className:o,onClick:a,style:s,"data-icon":g.name,width:"1em",height:"1em",fill:"currentColor","aria-hidden":"true"},d),{},{ref:f}))};function k(e){var t=E(e),n=(0,o.Z)(t,2),r=n[0],a=n[1];return O.setTwoToneColors({primaryColor:r,secondaryColor:a})}O.displayName="IconReact",O.getTwoToneColors=function(){return(0,p.Z)({},x)},O.setTwoToneColors=function(e){var t=e.primaryColor,n=e.secondaryColor;x.primaryColor=t,x.secondaryColor=n||y(t),x.calculated=!!n};var C=["className","icon","spin","rotate","tabIndex","onClick","twoToneColor"];k(u.iN.primary);var A=l.forwardRef(function(e,t){var n,s=e.className,u=e.icon,p=e.spin,f=e.rotate,m=e.tabIndex,g=e.onClick,h=e.twoToneColor,b=(0,i.Z)(e,C),v=l.useContext(d.Z),y=v.prefixCls,S=void 0===y?"anticon":y,w=v.rootClassName,x=c()(w,S,(n={},(0,a.Z)(n,"".concat(S,"-").concat(u.name),!!u.name),(0,a.Z)(n,"".concat(S,"-spin"),!!p||"loading"===u.name),n),s),k=m;void 0===k&&g&&(k=-1);var A=E(h),T=(0,o.Z)(A,2),I=T[0],N=T[1];return l.createElement("span",(0,r.Z)({role:"img","aria-label":u.name},b,{ref:t,tabIndex:k,onClick:g,className:x}),l.createElement(O,{icon:u,primaryColor:I,secondaryColor:N,style:f?{msTransform:"rotate(".concat(f,"deg)"),transform:"rotate(".concat(f,"deg)")}:void 0}))});A.displayName="AntdIcon",A.getTwoToneColor=function(){var e=O.getTwoToneColors();return e.calculated?[e.primaryColor,e.secondaryColor]:e.primaryColor},A.setTwoToneColor=k;var T=A},67689:function(e,t,n){var r=(0,n(64090).createContext)({});t.Z=r},99537:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm193.5 301.7l-210.6 292a31.8 31.8 0 01-51.7 0L318.5 484.9c-3.8-5.3 0-12.7 6.5-12.7h46.9c10.2 0 19.9 4.9 25.9 13.3l71.2 98.8 157.2-218c6-8.3 15.6-13.3 25.9-13.3H699c6.5 0 10.3 7.4 6.5 12.7z"}}]},name:"check-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},90507:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M912 190h-69.9c-9.8 0-19.1 4.5-25.1 12.2L404.7 724.5 207 474a32 32 0 00-25.1-12.2H112c-6.7 0-10.4 7.7-6.3 12.9l273.9 347c12.8 16.2 37.4 16.2 50.3 0l488.4-618.9c4.1-5.1.4-12.8-6.3-12.8z"}}]},name:"check",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},77136:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{"fill-rule":"evenodd",viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64c247.4 0 448 200.6 448 448S759.4 960 512 960 64 759.4 64 512 264.6 64 512 64zm127.98 274.82h-.04l-.08.06L512 466.75 384.14 338.88c-.04-.05-.06-.06-.08-.06a.12.12 0 00-.07 0c-.03 0-.05.01-.09.05l-45.02 45.02a.2.2 0 00-.05.09.12.12 0 000 .07v.02a.27.27 0 00.06.06L466.75 512 338.88 639.86c-.05.04-.06.06-.06.08a.12.12 0 000 .07c0 .03.01.05.05.09l45.02 45.02a.2.2 0 00.09.05.12.12 0 00.07 0c.02 0 .04-.01.08-.05L512 557.25l127.86 127.87c.04.04.06.05.08.05a.12.12 0 00.07 0c.03 0 .05-.01.09-.05l45.02-45.02a.2.2 0 00.05-.09.12.12 0 000-.07v-.02a.27.27 0 00-.05-.06L557.25 512l127.87-127.86c.04-.04.05-.06.05-.08a.12.12 0 000-.07c0-.03-.01-.05-.05-.09l-45.02-45.02a.2.2 0 00-.09-.05.12.12 0 00-.07 0z"}}]},name:"close-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},81303:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{"fill-rule":"evenodd",viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M799.86 166.31c.02 0 .04.02.08.06l57.69 57.7c.04.03.05.05.06.08a.12.12 0 010 .06c0 .03-.02.05-.06.09L569.93 512l287.7 287.7c.04.04.05.06.06.09a.12.12 0 010 .07c0 .02-.02.04-.06.08l-57.7 57.69c-.03.04-.05.05-.07.06a.12.12 0 01-.07 0c-.03 0-.05-.02-.09-.06L512 569.93l-287.7 287.7c-.04.04-.06.05-.09.06a.12.12 0 01-.07 0c-.02 0-.04-.02-.08-.06l-57.69-57.7c-.04-.03-.05-.05-.06-.07a.12.12 0 010-.07c0-.03.02-.05.06-.09L454.07 512l-287.7-287.7c-.04-.04-.05-.06-.06-.09a.12.12 0 010-.07c0-.02.02-.04.06-.08l57.7-57.69c.03-.04.05-.05.07-.06a.12.12 0 01.07 0c.03 0 .05.02.09.06L512 454.07l287.7-287.7c.04-.04.06-.05.09-.06a.12.12 0 01.07 0z"}}]},name:"close",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},20383:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M884 256h-75c-5.1 0-9.9 2.5-12.9 6.6L512 654.2 227.9 262.6c-3-4.1-7.8-6.6-12.9-6.6h-75c-6.5 0-10.3 7.4-6.5 12.7l352.6 486.1c12.8 17.6 39 17.6 51.7 0l352.6-486.1c3.9-5.3.1-12.7-6.4-12.7z"}}]},name:"down",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},31413:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M176 511a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0z"}}]},name:"ellipsis",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},20653:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm-32 232c0-4.4 3.6-8 8-8h48c4.4 0 8 3.6 8 8v272c0 4.4-3.6 8-8 8h-48c-4.4 0-8-3.6-8-8V296zm32 440a48.01 48.01 0 010-96 48.01 48.01 0 010 96z"}}]},name:"exclamation-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},41311:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M942.2 486.2C847.4 286.5 704.1 186 512 186c-192.2 0-335.4 100.5-430.2 300.3a60.3 60.3 0 000 51.5C176.6 737.5 319.9 838 512 838c192.2 0 335.4-100.5 430.2-300.3 7.7-16.2 7.7-35 0-51.5zM512 766c-161.3 0-279.4-81.8-362.7-254C232.6 339.8 350.7 258 512 258c161.3 0 279.4 81.8 362.7 254C791.5 684.2 673.4 766 512 766zm-4-430c-97.2 0-176 78.8-176 176s78.8 176 176 176 176-78.8 176-176-78.8-176-176-176zm0 288c-61.9 0-112-50.1-112-112s50.1-112 112-112 112 50.1 112 112-50.1 112-112 112z"}}]},name:"eye",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},40388:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm32 664c0 4.4-3.6 8-8 8h-48c-4.4 0-8-3.6-8-8V456c0-4.4 3.6-8 8-8h48c4.4 0 8 3.6 8 8v272zm-32-344a48.01 48.01 0 010-96 48.01 48.01 0 010 96z"}}]},name:"info-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},66155:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"0 0 1024 1024",focusable:"false"},children:[{tag:"path",attrs:{d:"M988 548c-19.9 0-36-16.1-36-36 0-59.4-11.6-117-34.6-171.3a440.45 440.45 0 00-94.3-139.9 437.71 437.71 0 00-139.9-94.3C629 83.6 571.4 72 512 72c-19.9 0-36-16.1-36-36s16.1-36 36-36c69.1 0 136.2 13.5 199.3 40.3C772.3 66 827 103 874 150c47 47 83.9 101.8 109.7 162.7 26.7 63.1 40.2 130.2 40.2 199.3.1 19.9-16 36-35.9 36z"}}]},name:"loading",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},50459:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M765.7 486.8L314.9 134.7A7.97 7.97 0 00302 141v77.3c0 4.9 2.3 9.6 6.1 12.6l360 281.1-360 281.1c-3.9 3-6.1 7.7-6.1 12.6V883c0 6.7 7.7 10.4 12.9 6.3l450.8-352.1a31.96 31.96 0 000-50.4z"}}]},name:"right",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},96871:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M909.6 854.5L649.9 594.8C690.2 542.7 712 479 712 412c0-80.2-31.3-155.4-87.9-212.1-56.6-56.7-132-87.9-212.1-87.9s-155.5 31.3-212.1 87.9C143.2 256.5 112 331.8 112 412c0 80.1 31.3 155.5 87.9 212.1C256.5 680.8 331.8 712 412 712c67 0 130.6-21.8 182.7-62l259.7 259.6a8.2 8.2 0 0011.6 0l43.6-43.5a8.2 8.2 0 000-11.6zM570.4 570.4C528 612.7 471.8 636 412 636s-116-23.3-158.4-65.6C211.3 528 188 471.8 188 412s23.3-116.1 65.6-158.4C296 211.3 352.2 188 412 188s116.1 23.2 158.4 65.6S636 352.2 636 412s-23.3 116.1-65.6 158.4z"}}]},name:"search",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},97766:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M400 317.7h73.9V656c0 4.4 3.6 8 8 8h60c4.4 0 8-3.6 8-8V317.7H624c6.7 0 10.4-7.7 6.3-12.9L518.3 163a8 8 0 00-12.6 0l-112 141.7c-4.1 5.3-.4 13 6.3 13zM878 626h-60c-4.4 0-8 3.6-8 8v154H214V634c0-4.4-3.6-8-8-8h-60c-4.4 0-8 3.6-8 8v198c0 17.7 14.3 32 32 32h684c17.7 0 32-14.3 32-32V634c0-4.4-3.6-8-8-8z"}}]},name:"upload",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},41785:function(e,t,n){n.d(t,{T6:function(){return p},VD:function(){return f},WE:function(){return c},Yt:function(){return m},lC:function(){return a},py:function(){return s},rW:function(){return o},s:function(){return d},ve:function(){return l},vq:function(){return u}});var r=n(27974);function o(e,t,n){return{r:255*(0,r.sh)(e,255),g:255*(0,r.sh)(t,255),b:255*(0,r.sh)(n,255)}}function a(e,t,n){var o=Math.max(e=(0,r.sh)(e,255),t=(0,r.sh)(t,255),n=(0,r.sh)(n,255)),a=Math.min(e,t,n),i=0,l=0,s=(o+a)/2;if(o===a)l=0,i=0;else{var c=o-a;switch(l=s>.5?c/(2-o-a):c/(o+a),o){case e:i=(t-n)/c+(t1&&(n-=1),n<1/6)?e+6*n*(t-e):n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}function l(e,t,n){if(e=(0,r.sh)(e,360),t=(0,r.sh)(t,100),n=(0,r.sh)(n,100),0===t)a=n,l=n,o=n;else{var o,a,l,s=n<.5?n*(1+t):n+t-n*t,c=2*n-s;o=i(c,s,e+1/3),a=i(c,s,e),l=i(c,s,e-1/3)}return{r:255*o,g:255*a,b:255*l}}function s(e,t,n){var o=Math.max(e=(0,r.sh)(e,255),t=(0,r.sh)(t,255),n=(0,r.sh)(n,255)),a=Math.min(e,t,n),i=0,l=o-a;if(o===a)i=0;else{switch(o){case e:i=(t-n)/l+(t>16,g:(65280&e)>>8,b:255&e}}},6564:function(e,t,n){n.d(t,{R:function(){return r}});var r={aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",goldenrod:"#daa520",gold:"#ffd700",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavenderblush:"#fff0f5",lavender:"#e6e6fa",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",rebeccapurple:"#663399",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"}},76991:function(e,t,n){n.d(t,{uA:function(){return i}});var r=n(41785),o=n(6564),a=n(27974);function i(e){var t={r:0,g:0,b:0},n=1,i=null,l=null,s=null,c=!1,p=!1;return"string"==typeof e&&(e=function(e){if(0===(e=e.trim().toLowerCase()).length)return!1;var t=!1;if(o.R[e])e=o.R[e],t=!0;else if("transparent"===e)return{r:0,g:0,b:0,a:0,format:"name"};var n=u.rgb.exec(e);return n?{r:n[1],g:n[2],b:n[3]}:(n=u.rgba.exec(e))?{r:n[1],g:n[2],b:n[3],a:n[4]}:(n=u.hsl.exec(e))?{h:n[1],s:n[2],l:n[3]}:(n=u.hsla.exec(e))?{h:n[1],s:n[2],l:n[3],a:n[4]}:(n=u.hsv.exec(e))?{h:n[1],s:n[2],v:n[3]}:(n=u.hsva.exec(e))?{h:n[1],s:n[2],v:n[3],a:n[4]}:(n=u.hex8.exec(e))?{r:(0,r.VD)(n[1]),g:(0,r.VD)(n[2]),b:(0,r.VD)(n[3]),a:(0,r.T6)(n[4]),format:t?"name":"hex8"}:(n=u.hex6.exec(e))?{r:(0,r.VD)(n[1]),g:(0,r.VD)(n[2]),b:(0,r.VD)(n[3]),format:t?"name":"hex"}:(n=u.hex4.exec(e))?{r:(0,r.VD)(n[1]+n[1]),g:(0,r.VD)(n[2]+n[2]),b:(0,r.VD)(n[3]+n[3]),a:(0,r.T6)(n[4]+n[4]),format:t?"name":"hex8"}:!!(n=u.hex3.exec(e))&&{r:(0,r.VD)(n[1]+n[1]),g:(0,r.VD)(n[2]+n[2]),b:(0,r.VD)(n[3]+n[3]),format:t?"name":"hex"}}(e)),"object"==typeof e&&(d(e.r)&&d(e.g)&&d(e.b)?(t=(0,r.rW)(e.r,e.g,e.b),c=!0,p="%"===String(e.r).substr(-1)?"prgb":"rgb"):d(e.h)&&d(e.s)&&d(e.v)?(i=(0,a.JX)(e.s),l=(0,a.JX)(e.v),t=(0,r.WE)(e.h,i,l),c=!0,p="hsv"):d(e.h)&&d(e.s)&&d(e.l)&&(i=(0,a.JX)(e.s),s=(0,a.JX)(e.l),t=(0,r.ve)(e.h,i,s),c=!0,p="hsl"),Object.prototype.hasOwnProperty.call(e,"a")&&(n=e.a)),n=(0,a.Yq)(n),{ok:c,format:e.format||p,r:Math.min(255,Math.max(t.r,0)),g:Math.min(255,Math.max(t.g,0)),b:Math.min(255,Math.max(t.b,0)),a:n}}var l="(?:".concat("[-\\+]?\\d*\\.\\d+%?",")|(?:").concat("[-\\+]?\\d+%?",")"),s="[\\s|\\(]+(".concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")\\s*\\)?"),c="[\\s|\\(]+(".concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")\\s*\\)?"),u={CSS_UNIT:new RegExp(l),rgb:RegExp("rgb"+s),rgba:RegExp("rgba"+c),hsl:RegExp("hsl"+s),hsla:RegExp("hsla"+c),hsv:RegExp("hsv"+s),hsva:RegExp("hsva"+c),hex3:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex6:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,hex4:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex8:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/};function d(e){return!!u.CSS_UNIT.exec(String(e))}},6336:function(e,t,n){n.d(t,{C:function(){return l}});var r=n(41785),o=n(6564),a=n(76991),i=n(27974),l=function(){function e(t,n){if(void 0===t&&(t=""),void 0===n&&(n={}),t instanceof e)return t;"number"==typeof t&&(t=(0,r.Yt)(t)),this.originalInput=t;var o,i=(0,a.uA)(t);this.originalInput=t,this.r=i.r,this.g=i.g,this.b=i.b,this.a=i.a,this.roundA=Math.round(100*this.a)/100,this.format=null!==(o=n.format)&&void 0!==o?o:i.format,this.gradientType=n.gradientType,this.r<1&&(this.r=Math.round(this.r)),this.g<1&&(this.g=Math.round(this.g)),this.b<1&&(this.b=Math.round(this.b)),this.isValid=i.ok}return e.prototype.isDark=function(){return 128>this.getBrightness()},e.prototype.isLight=function(){return!this.isDark()},e.prototype.getBrightness=function(){var e=this.toRgb();return(299*e.r+587*e.g+114*e.b)/1e3},e.prototype.getLuminance=function(){var e=this.toRgb(),t=e.r/255,n=e.g/255,r=e.b/255;return .2126*(t<=.03928?t/12.92:Math.pow((t+.055)/1.055,2.4))+.7152*(n<=.03928?n/12.92:Math.pow((n+.055)/1.055,2.4))+.0722*(r<=.03928?r/12.92:Math.pow((r+.055)/1.055,2.4))},e.prototype.getAlpha=function(){return this.a},e.prototype.setAlpha=function(e){return this.a=(0,i.Yq)(e),this.roundA=Math.round(100*this.a)/100,this},e.prototype.isMonochrome=function(){return 0===this.toHsl().s},e.prototype.toHsv=function(){var e=(0,r.py)(this.r,this.g,this.b);return{h:360*e.h,s:e.s,v:e.v,a:this.a}},e.prototype.toHsvString=function(){var e=(0,r.py)(this.r,this.g,this.b),t=Math.round(360*e.h),n=Math.round(100*e.s),o=Math.round(100*e.v);return 1===this.a?"hsv(".concat(t,", ").concat(n,"%, ").concat(o,"%)"):"hsva(".concat(t,", ").concat(n,"%, ").concat(o,"%, ").concat(this.roundA,")")},e.prototype.toHsl=function(){var e=(0,r.lC)(this.r,this.g,this.b);return{h:360*e.h,s:e.s,l:e.l,a:this.a}},e.prototype.toHslString=function(){var e=(0,r.lC)(this.r,this.g,this.b),t=Math.round(360*e.h),n=Math.round(100*e.s),o=Math.round(100*e.l);return 1===this.a?"hsl(".concat(t,", ").concat(n,"%, ").concat(o,"%)"):"hsla(".concat(t,", ").concat(n,"%, ").concat(o,"%, ").concat(this.roundA,")")},e.prototype.toHex=function(e){return void 0===e&&(e=!1),(0,r.vq)(this.r,this.g,this.b,e)},e.prototype.toHexString=function(e){return void 0===e&&(e=!1),"#"+this.toHex(e)},e.prototype.toHex8=function(e){return void 0===e&&(e=!1),(0,r.s)(this.r,this.g,this.b,this.a,e)},e.prototype.toHex8String=function(e){return void 0===e&&(e=!1),"#"+this.toHex8(e)},e.prototype.toHexShortString=function(e){return void 0===e&&(e=!1),1===this.a?this.toHexString(e):this.toHex8String(e)},e.prototype.toRgb=function(){return{r:Math.round(this.r),g:Math.round(this.g),b:Math.round(this.b),a:this.a}},e.prototype.toRgbString=function(){var e=Math.round(this.r),t=Math.round(this.g),n=Math.round(this.b);return 1===this.a?"rgb(".concat(e,", ").concat(t,", ").concat(n,")"):"rgba(".concat(e,", ").concat(t,", ").concat(n,", ").concat(this.roundA,")")},e.prototype.toPercentageRgb=function(){var e=function(e){return"".concat(Math.round(100*(0,i.sh)(e,255)),"%")};return{r:e(this.r),g:e(this.g),b:e(this.b),a:this.a}},e.prototype.toPercentageRgbString=function(){var e=function(e){return Math.round(100*(0,i.sh)(e,255))};return 1===this.a?"rgb(".concat(e(this.r),"%, ").concat(e(this.g),"%, ").concat(e(this.b),"%)"):"rgba(".concat(e(this.r),"%, ").concat(e(this.g),"%, ").concat(e(this.b),"%, ").concat(this.roundA,")")},e.prototype.toName=function(){if(0===this.a)return"transparent";if(this.a<1)return!1;for(var e="#"+(0,r.vq)(this.r,this.g,this.b,!1),t=0,n=Object.entries(o.R);t=0;return!t&&r&&(e.startsWith("hex")||"name"===e)?"name"===e&&0===this.a?this.toName():this.toRgbString():("rgb"===e&&(n=this.toRgbString()),"prgb"===e&&(n=this.toPercentageRgbString()),("hex"===e||"hex6"===e)&&(n=this.toHexString()),"hex3"===e&&(n=this.toHexString(!0)),"hex4"===e&&(n=this.toHex8String(!0)),"hex8"===e&&(n=this.toHex8String()),"name"===e&&(n=this.toName()),"hsl"===e&&(n=this.toHslString()),"hsv"===e&&(n=this.toHsvString()),n||this.toHexString())},e.prototype.toNumber=function(){return(Math.round(this.r)<<16)+(Math.round(this.g)<<8)+Math.round(this.b)},e.prototype.clone=function(){return new e(this.toString())},e.prototype.lighten=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.l+=t/100,n.l=(0,i.V2)(n.l),new e(n)},e.prototype.brighten=function(t){void 0===t&&(t=10);var n=this.toRgb();return n.r=Math.max(0,Math.min(255,n.r-Math.round(-(t/100*255)))),n.g=Math.max(0,Math.min(255,n.g-Math.round(-(t/100*255)))),n.b=Math.max(0,Math.min(255,n.b-Math.round(-(t/100*255)))),new e(n)},e.prototype.darken=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.l-=t/100,n.l=(0,i.V2)(n.l),new e(n)},e.prototype.tint=function(e){return void 0===e&&(e=10),this.mix("white",e)},e.prototype.shade=function(e){return void 0===e&&(e=10),this.mix("black",e)},e.prototype.desaturate=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.s-=t/100,n.s=(0,i.V2)(n.s),new e(n)},e.prototype.saturate=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.s+=t/100,n.s=(0,i.V2)(n.s),new e(n)},e.prototype.greyscale=function(){return this.desaturate(100)},e.prototype.spin=function(t){var n=this.toHsl(),r=(n.h+t)%360;return n.h=r<0?360+r:r,new e(n)},e.prototype.mix=function(t,n){void 0===n&&(n=50);var r=this.toRgb(),o=new e(t).toRgb(),a=n/100;return new e({r:(o.r-r.r)*a+r.r,g:(o.g-r.g)*a+r.g,b:(o.b-r.b)*a+r.b,a:(o.a-r.a)*a+r.a})},e.prototype.analogous=function(t,n){void 0===t&&(t=6),void 0===n&&(n=30);var r=this.toHsl(),o=360/n,a=[this];for(r.h=(r.h-(o*t>>1)+720)%360;--t;)r.h=(r.h+o)%360,a.push(new e(r));return a},e.prototype.complement=function(){var t=this.toHsl();return t.h=(t.h+180)%360,new e(t)},e.prototype.monochromatic=function(t){void 0===t&&(t=6);for(var n=this.toHsv(),r=n.h,o=n.s,a=n.v,i=[],l=1/t;t--;)i.push(new e({h:r,s:o,v:a})),a=(a+l)%1;return i},e.prototype.splitcomplement=function(){var t=this.toHsl(),n=t.h;return[this,new e({h:(n+72)%360,s:t.s,l:t.l}),new e({h:(n+216)%360,s:t.s,l:t.l})]},e.prototype.onBackground=function(t){var n=this.toRgb(),r=new e(t).toRgb(),o=n.a+r.a*(1-n.a);return new e({r:(n.r*n.a+r.r*r.a*(1-n.a))/o,g:(n.g*n.a+r.g*r.a*(1-n.a))/o,b:(n.b*n.a+r.b*r.a*(1-n.a))/o,a:o})},e.prototype.triad=function(){return this.polyad(3)},e.prototype.tetrad=function(){return this.polyad(4)},e.prototype.polyad=function(t){for(var n=this.toHsl(),r=n.h,o=[this],a=360/t,i=1;iMath.abs(e-t))?1:e=360===t?(e<0?e%t+t:e%t)/parseFloat(String(t)):e%t/parseFloat(String(t))}function o(e){return Math.min(1,Math.max(0,e))}function a(e){return(isNaN(e=parseFloat(e))||e<0||e>1)&&(e=1),e}function i(e){return e<=1?"".concat(100*Number(e),"%"):e}function l(e){return 1===e.length?"0"+e:String(e)}n.d(t,{FZ:function(){return l},JX:function(){return i},V2:function(){return o},Yq:function(){return a},sh:function(){return r}})},88804:function(e,t,n){n.d(t,{Z:function(){return y}});var r,o=n(80406),a=n(64090),i=n(89542),l=n(22127);n(53850);var s=n(74084),c=a.createContext(null),u=n(63787),d=n(24800),p=[],f=n(24050);function m(e){var t=e.match(/^(.*)px$/),n=Number(null==t?void 0:t[1]);return Number.isNaN(n)?function(e){if("undefined"==typeof document)return 0;if(void 0===r){var t=document.createElement("div");t.style.width="100%",t.style.height="200px";var n=document.createElement("div"),o=n.style;o.position="absolute",o.top="0",o.left="0",o.pointerEvents="none",o.visibility="hidden",o.width="200px",o.height="150px",o.overflow="hidden",n.appendChild(t),document.body.appendChild(n);var a=t.offsetWidth;n.style.overflow="scroll";var i=t.offsetWidth;a===i&&(i=n.clientWidth),document.body.removeChild(n),r=a-i}return r}():n}var g="rc-util-locker-".concat(Date.now()),h=0,b=!1,v=function(e){return!1!==e&&((0,l.Z)()&&e?"string"==typeof e?document.querySelector(e):"function"==typeof e?e():e:null)},y=a.forwardRef(function(e,t){var n,r,y,E,S=e.open,w=e.autoLock,x=e.getContainer,O=(e.debug,e.autoDestroy),k=void 0===O||O,C=e.children,A=a.useState(S),T=(0,o.Z)(A,2),I=T[0],N=T[1],R=I||S;a.useEffect(function(){(k||S)&&N(S)},[S,k]);var _=a.useState(function(){return v(x)}),P=(0,o.Z)(_,2),L=P[0],M=P[1];a.useEffect(function(){var e=v(x);M(null!=e?e:null)});var D=function(e,t){var n=a.useState(function(){return(0,l.Z)()?document.createElement("div"):null}),r=(0,o.Z)(n,1)[0],i=a.useRef(!1),s=a.useContext(c),f=a.useState(p),m=(0,o.Z)(f,2),g=m[0],h=m[1],b=s||(i.current?void 0:function(e){h(function(t){return[e].concat((0,u.Z)(t))})});function v(){r.parentElement||document.body.appendChild(r),i.current=!0}function y(){var e;null===(e=r.parentElement)||void 0===e||e.removeChild(r),i.current=!1}return(0,d.Z)(function(){return e?s?s(v):v():y(),y},[e]),(0,d.Z)(function(){g.length&&(g.forEach(function(e){return e()}),h(p))},[g]),[r,b]}(R&&!L,0),j=(0,o.Z)(D,2),F=j[0],B=j[1],Z=null!=L?L:F;n=!!(w&&S&&(0,l.Z)()&&(Z===F||Z===document.body)),r=a.useState(function(){return h+=1,"".concat(g,"_").concat(h)}),y=(0,o.Z)(r,1)[0],(0,d.Z)(function(){if(n){var e=function(e){if("undefined"==typeof document||!e||!(e instanceof Element))return{width:0,height:0};var t=getComputedStyle(e,"::-webkit-scrollbar"),n=t.width,r=t.height;return{width:m(n),height:m(r)}}(document.body).width,t=document.body.scrollHeight>(window.innerHeight||document.documentElement.clientHeight)&&window.innerWidth>document.body.offsetWidth;(0,f.hq)("\nhtml body {\n overflow-y: hidden;\n ".concat(t?"width: calc(100% - ".concat(e,"px);"):"","\n}"),y)}else(0,f.jL)(y);return function(){(0,f.jL)(y)}},[n,y]);var U=null;C&&(0,s.Yr)(C)&&t&&(U=C.ref);var z=(0,s.x1)(U,t);if(!R||!(0,l.Z)()||void 0===L)return null;var H=!1===Z||("boolean"==typeof E&&(b=E),b),G=C;return t&&(G=a.cloneElement(C,{ref:z})),a.createElement(c.Provider,{value:B},H?G:(0,i.createPortal)(G,Z))})},44101:function(e,t,n){n.d(t,{Z:function(){return z}});var r=n(5239),o=n(80406),a=n(6787),i=n(88804),l=n(16480),s=n.n(l),c=n(46505),u=n(97472),d=n(74687),p=n(54811),f=n(91010),m=n(24800),g=n(76158),h=n(64090),b=n(14749),v=n(49367),y=n(74084);function E(e){var t=e.prefixCls,n=e.align,r=e.arrow,o=e.arrowPos,a=r||{},i=a.className,l=a.content,c=o.x,u=o.y,d=h.useRef();if(!n||!n.points)return null;var p={position:"absolute"};if(!1!==n.autoArrow){var f=n.points[0],m=n.points[1],g=f[0],b=f[1],v=m[0],y=m[1];g!==v&&["t","b"].includes(g)?"t"===g?p.top=0:p.bottom=0:p.top=void 0===u?0:u,b!==y&&["l","r"].includes(b)?"l"===b?p.left=0:p.right=0:p.left=void 0===c?0:c}return h.createElement("div",{ref:d,className:s()("".concat(t,"-arrow"),i),style:p},l)}function S(e){var t=e.prefixCls,n=e.open,r=e.zIndex,o=e.mask,a=e.motion;return o?h.createElement(v.ZP,(0,b.Z)({},a,{motionAppear:!0,visible:n,removeOnLeave:!0}),function(e){var n=e.className;return h.createElement("div",{style:{zIndex:r},className:s()("".concat(t,"-mask"),n)})}):null}var w=h.memo(function(e){return e.children},function(e,t){return t.cache}),x=h.forwardRef(function(e,t){var n=e.popup,a=e.className,i=e.prefixCls,l=e.style,u=e.target,d=e.onVisibleChanged,p=e.open,f=e.keepDom,g=e.fresh,x=e.onClick,O=e.mask,k=e.arrow,C=e.arrowPos,A=e.align,T=e.motion,I=e.maskMotion,N=e.forceRender,R=e.getPopupContainer,_=e.autoDestroy,P=e.portal,L=e.zIndex,M=e.onMouseEnter,D=e.onMouseLeave,j=e.onPointerEnter,F=e.ready,B=e.offsetX,Z=e.offsetY,U=e.offsetR,z=e.offsetB,H=e.onAlign,G=e.onPrepare,$=e.stretch,W=e.targetWidth,V=e.targetHeight,q="function"==typeof n?n():n,Y=p||f,K=(null==R?void 0:R.length)>0,X=h.useState(!R||!K),Q=(0,o.Z)(X,2),J=Q[0],ee=Q[1];if((0,m.Z)(function(){!J&&K&&u&&ee(!0)},[J,K,u]),!J)return null;var et="auto",en={left:"-1000vw",top:"-1000vh",right:et,bottom:et};if(F||!p){var er,eo=A.points,ea=A.dynamicInset||(null===(er=A._experimental)||void 0===er?void 0:er.dynamicInset),ei=ea&&"r"===eo[0][1],el=ea&&"b"===eo[0][0];ei?(en.right=U,en.left=et):(en.left=B,en.right=et),el?(en.bottom=z,en.top=et):(en.top=Z,en.bottom=et)}var es={};return $&&($.includes("height")&&V?es.height=V:$.includes("minHeight")&&V&&(es.minHeight=V),$.includes("width")&&W?es.width=W:$.includes("minWidth")&&W&&(es.minWidth=W)),p||(es.pointerEvents="none"),h.createElement(P,{open:N||Y,getContainer:R&&function(){return R(u)},autoDestroy:_},h.createElement(S,{prefixCls:i,open:p,zIndex:L,mask:O,motion:I}),h.createElement(c.Z,{onResize:H,disabled:!p},function(e){return h.createElement(v.ZP,(0,b.Z)({motionAppear:!0,motionEnter:!0,motionLeave:!0,removeOnLeave:!1,forceRender:N,leavedClassName:"".concat(i,"-hidden")},T,{onAppearPrepare:G,onEnterPrepare:G,visible:p,onVisibleChanged:function(e){var t;null==T||null===(t=T.onVisibleChanged)||void 0===t||t.call(T,e),d(e)}}),function(n,o){var c=n.className,u=n.style,d=s()(i,c,a);return h.createElement("div",{ref:(0,y.sQ)(e,t,o),className:d,style:(0,r.Z)((0,r.Z)((0,r.Z)((0,r.Z)({"--arrow-x":"".concat(C.x||0,"px"),"--arrow-y":"".concat(C.y||0,"px")},en),es),u),{},{boxSizing:"border-box",zIndex:L},l),onMouseEnter:M,onMouseLeave:D,onPointerEnter:j,onClick:x},k&&h.createElement(E,{prefixCls:i,arrow:k,arrowPos:C,align:A}),h.createElement(w,{cache:!p&&!g},q))})}))}),O=h.forwardRef(function(e,t){var n=e.children,r=e.getTriggerDOMNode,o=(0,y.Yr)(n),a=h.useCallback(function(e){(0,y.mH)(t,r?r(e):e)},[r]),i=(0,y.x1)(a,n.ref);return o?h.cloneElement(n,{ref:i}):n}),k=h.createContext(null);function C(e){return e?Array.isArray(e)?e:[e]:[]}var A=n(73193);function T(e,t,n,r){return t||(n?{motionName:"".concat(e,"-").concat(n)}:r?{motionName:r}:null)}function I(e){return e.ownerDocument.defaultView}function N(e){for(var t=[],n=null==e?void 0:e.parentElement,r=["hidden","scroll","clip","auto"];n;){var o=I(n).getComputedStyle(n);[o.overflowX,o.overflowY,o.overflow].some(function(e){return r.includes(e)})&&t.push(n),n=n.parentElement}return t}function R(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1;return Number.isNaN(e)?t:e}function _(e){return R(parseFloat(e),0)}function P(e,t){var n=(0,r.Z)({},e);return(t||[]).forEach(function(e){if(!(e instanceof HTMLBodyElement||e instanceof HTMLHtmlElement)){var t=I(e).getComputedStyle(e),r=t.overflow,o=t.overflowClipMargin,a=t.borderTopWidth,i=t.borderBottomWidth,l=t.borderLeftWidth,s=t.borderRightWidth,c=e.getBoundingClientRect(),u=e.offsetHeight,d=e.clientHeight,p=e.offsetWidth,f=e.clientWidth,m=_(a),g=_(i),h=_(l),b=_(s),v=R(Math.round(c.width/p*1e3)/1e3),y=R(Math.round(c.height/u*1e3)/1e3),E=m*y,S=h*v,w=0,x=0;if("clip"===r){var O=_(o);w=O*v,x=O*y}var k=c.x+S-w,C=c.y+E-x,A=k+c.width+2*w-S-b*v-(p-f-h-b)*v,T=C+c.height+2*x-E-g*y-(u-d-m-g)*y;n.left=Math.max(n.left,k),n.top=Math.max(n.top,C),n.right=Math.min(n.right,A),n.bottom=Math.min(n.bottom,T)}}),n}function L(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,n="".concat(t),r=n.match(/^(.*)\%$/);return r?parseFloat(r[1])/100*e:parseFloat(n)}function M(e,t){var n=(0,o.Z)(t||[],2),r=n[0],a=n[1];return[L(e.width,r),L(e.height,a)]}function D(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return[e[0],e[1]]}function j(e,t){var n,r=t[0],o=t[1];return n="t"===r?e.y:"b"===r?e.y+e.height:e.y+e.height/2,{x:"l"===o?e.x:"r"===o?e.x+e.width:e.x+e.width/2,y:n}}function F(e,t){var n={t:"b",b:"t",l:"r",r:"l"};return e.map(function(e,r){return r===t?n[e]||"c":e}).join("")}var B=n(63787);n(53850);var Z=n(19223),U=["prefixCls","children","action","showAction","hideAction","popupVisible","defaultPopupVisible","onPopupVisibleChange","afterPopupVisibleChange","mouseEnterDelay","mouseLeaveDelay","focusDelay","blurDelay","mask","maskClosable","getPopupContainer","forceRender","autoDestroy","destroyPopupOnHide","popup","popupClassName","popupStyle","popupPlacement","builtinPlacements","popupAlign","zIndex","stretch","getPopupClassNameFromAlign","fresh","alignPoint","onPopupClick","onPopupAlign","arrow","popupMotion","maskMotion","popupTransitionName","popupAnimation","maskTransitionName","maskAnimation","className","getTriggerDOMNode"],z=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:i.Z;return h.forwardRef(function(t,n){var i,l,b,v,y,E,S,w,_,L,z,H,G,$,W,V,q,Y=t.prefixCls,K=void 0===Y?"rc-trigger-popup":Y,X=t.children,Q=t.action,J=t.showAction,ee=t.hideAction,et=t.popupVisible,en=t.defaultPopupVisible,er=t.onPopupVisibleChange,eo=t.afterPopupVisibleChange,ea=t.mouseEnterDelay,ei=t.mouseLeaveDelay,el=void 0===ei?.1:ei,es=t.focusDelay,ec=t.blurDelay,eu=t.mask,ed=t.maskClosable,ep=t.getPopupContainer,ef=t.forceRender,em=t.autoDestroy,eg=t.destroyPopupOnHide,eh=t.popup,eb=t.popupClassName,ev=t.popupStyle,ey=t.popupPlacement,eE=t.builtinPlacements,eS=void 0===eE?{}:eE,ew=t.popupAlign,ex=t.zIndex,eO=t.stretch,ek=t.getPopupClassNameFromAlign,eC=t.fresh,eA=t.alignPoint,eT=t.onPopupClick,eI=t.onPopupAlign,eN=t.arrow,eR=t.popupMotion,e_=t.maskMotion,eP=t.popupTransitionName,eL=t.popupAnimation,eM=t.maskTransitionName,eD=t.maskAnimation,ej=t.className,eF=t.getTriggerDOMNode,eB=(0,a.Z)(t,U),eZ=h.useState(!1),eU=(0,o.Z)(eZ,2),ez=eU[0],eH=eU[1];(0,m.Z)(function(){eH((0,g.Z)())},[]);var eG=h.useRef({}),e$=h.useContext(k),eW=h.useMemo(function(){return{registerSubPopup:function(e,t){eG.current[e]=t,null==e$||e$.registerSubPopup(e,t)}}},[e$]),eV=(0,f.Z)(),eq=h.useState(null),eY=(0,o.Z)(eq,2),eK=eY[0],eX=eY[1],eQ=(0,p.Z)(function(e){(0,u.S)(e)&&eK!==e&&eX(e),null==e$||e$.registerSubPopup(eV,e)}),eJ=h.useState(null),e0=(0,o.Z)(eJ,2),e1=e0[0],e2=e0[1],e4=h.useRef(null),e6=(0,p.Z)(function(e){(0,u.S)(e)&&e1!==e&&(e2(e),e4.current=e)}),e3=h.Children.only(X),e5=(null==e3?void 0:e3.props)||{},e8={},e9=(0,p.Z)(function(e){var t,n;return(null==e1?void 0:e1.contains(e))||(null===(t=(0,d.A)(e1))||void 0===t?void 0:t.host)===e||e===e1||(null==eK?void 0:eK.contains(e))||(null===(n=(0,d.A)(eK))||void 0===n?void 0:n.host)===e||e===eK||Object.values(eG.current).some(function(t){return(null==t?void 0:t.contains(e))||e===t})}),e7=T(K,eR,eL,eP),te=T(K,e_,eD,eM),tt=h.useState(en||!1),tn=(0,o.Z)(tt,2),tr=tn[0],to=tn[1],ta=null!=et?et:tr,ti=(0,p.Z)(function(e){void 0===et&&to(e)});(0,m.Z)(function(){to(et||!1)},[et]);var tl=h.useRef(ta);tl.current=ta;var ts=h.useRef([]);ts.current=[];var tc=(0,p.Z)(function(e){var t;ti(e),(null!==(t=ts.current[ts.current.length-1])&&void 0!==t?t:ta)!==e&&(ts.current.push(e),null==er||er(e))}),tu=h.useRef(),td=function(){clearTimeout(tu.current)},tp=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0;td(),0===t?tc(e):tu.current=setTimeout(function(){tc(e)},1e3*t)};h.useEffect(function(){return td},[]);var tf=h.useState(!1),tm=(0,o.Z)(tf,2),tg=tm[0],th=tm[1];(0,m.Z)(function(e){(!e||ta)&&th(!0)},[ta]);var tb=h.useState(null),tv=(0,o.Z)(tb,2),ty=tv[0],tE=tv[1],tS=h.useState([0,0]),tw=(0,o.Z)(tS,2),tx=tw[0],tO=tw[1],tk=function(e){tO([e.clientX,e.clientY])},tC=(i=eA?tx:e1,l=h.useState({ready:!1,offsetX:0,offsetY:0,offsetR:0,offsetB:0,arrowX:0,arrowY:0,scaleX:1,scaleY:1,align:eS[ey]||{}}),v=(b=(0,o.Z)(l,2))[0],y=b[1],E=h.useRef(0),S=h.useMemo(function(){return eK?N(eK):[]},[eK]),w=h.useRef({}),ta||(w.current={}),_=(0,p.Z)(function(){if(eK&&i&&ta){var e,t,n,a,l,s,c,d=eK.ownerDocument,p=I(eK).getComputedStyle(eK),f=p.width,m=p.height,g=p.position,h=eK.style.left,b=eK.style.top,v=eK.style.right,E=eK.style.bottom,x=eK.style.overflow,O=(0,r.Z)((0,r.Z)({},eS[ey]),ew),k=d.createElement("div");if(null===(e=eK.parentElement)||void 0===e||e.appendChild(k),k.style.left="".concat(eK.offsetLeft,"px"),k.style.top="".concat(eK.offsetTop,"px"),k.style.position=g,k.style.height="".concat(eK.offsetHeight,"px"),k.style.width="".concat(eK.offsetWidth,"px"),eK.style.left="0",eK.style.top="0",eK.style.right="auto",eK.style.bottom="auto",eK.style.overflow="hidden",Array.isArray(i))n={x:i[0],y:i[1],width:0,height:0};else{var C=i.getBoundingClientRect();n={x:C.x,y:C.y,width:C.width,height:C.height}}var T=eK.getBoundingClientRect(),N=d.documentElement,_=N.clientWidth,L=N.clientHeight,B=N.scrollWidth,Z=N.scrollHeight,U=N.scrollTop,z=N.scrollLeft,H=T.height,G=T.width,$=n.height,W=n.width,V=O.htmlRegion,q="visible",Y="visibleFirst";"scroll"!==V&&V!==Y&&(V=q);var K=V===Y,X=P({left:-z,top:-U,right:B-z,bottom:Z-U},S),Q=P({left:0,top:0,right:_,bottom:L},S),J=V===q?Q:X,ee=K?Q:J;eK.style.left="auto",eK.style.top="auto",eK.style.right="0",eK.style.bottom="0";var et=eK.getBoundingClientRect();eK.style.left=h,eK.style.top=b,eK.style.right=v,eK.style.bottom=E,eK.style.overflow=x,null===(t=eK.parentElement)||void 0===t||t.removeChild(k);var en=R(Math.round(G/parseFloat(f)*1e3)/1e3),er=R(Math.round(H/parseFloat(m)*1e3)/1e3);if(!(0===en||0===er||(0,u.S)(i)&&!(0,A.Z)(i))){var eo=O.offset,ea=O.targetOffset,ei=M(T,eo),el=(0,o.Z)(ei,2),es=el[0],ec=el[1],eu=M(n,ea),ed=(0,o.Z)(eu,2),ep=ed[0],ef=ed[1];n.x-=ep,n.y-=ef;var em=O.points||[],eg=(0,o.Z)(em,2),eh=eg[0],eb=D(eg[1]),ev=D(eh),eE=j(n,eb),ex=j(T,ev),eO=(0,r.Z)({},O),ek=eE.x-ex.x+es,eC=eE.y-ex.y+ec,eA=tt(ek,eC),eT=tt(ek,eC,Q),eN=j(n,["t","l"]),eR=j(T,["t","l"]),e_=j(n,["b","r"]),eP=j(T,["b","r"]),eL=O.overflow||{},eM=eL.adjustX,eD=eL.adjustY,ej=eL.shiftX,eF=eL.shiftY,eB=function(e){return"boolean"==typeof e?e:e>=0};tn();var eZ=eB(eD),eU=ev[0]===eb[0];if(eZ&&"t"===ev[0]&&(l>ee.bottom||w.current.bt)){var ez=eC;eU?ez-=H-$:ez=eN.y-eP.y-ec;var eH=tt(ek,ez),eG=tt(ek,ez,Q);eH>eA||eH===eA&&(!K||eG>=eT)?(w.current.bt=!0,eC=ez,ec=-ec,eO.points=[F(ev,0),F(eb,0)]):w.current.bt=!1}if(eZ&&"b"===ev[0]&&(aeA||eW===eA&&(!K||eV>=eT)?(w.current.tb=!0,eC=e$,ec=-ec,eO.points=[F(ev,0),F(eb,0)]):w.current.tb=!1}var eq=eB(eM),eY=ev[1]===eb[1];if(eq&&"l"===ev[1]&&(c>ee.right||w.current.rl)){var eX=ek;eY?eX-=G-W:eX=eN.x-eP.x-es;var eQ=tt(eX,eC),eJ=tt(eX,eC,Q);eQ>eA||eQ===eA&&(!K||eJ>=eT)?(w.current.rl=!0,ek=eX,es=-es,eO.points=[F(ev,1),F(eb,1)]):w.current.rl=!1}if(eq&&"r"===ev[1]&&(seA||e1===eA&&(!K||e2>=eT)?(w.current.lr=!0,ek=e0,es=-es,eO.points=[F(ev,1),F(eb,1)]):w.current.lr=!1}tn();var e4=!0===ej?0:ej;"number"==typeof e4&&(sQ.right&&(ek-=c-Q.right-es,n.x>Q.right-e4&&(ek+=n.x-Q.right+e4)));var e6=!0===eF?0:eF;"number"==typeof e6&&(aQ.bottom&&(eC-=l-Q.bottom-ec,n.y>Q.bottom-e6&&(eC+=n.y-Q.bottom+e6)));var e3=T.x+ek,e5=T.y+eC,e8=n.x,e9=n.y;null==eI||eI(eK,eO);var e7=et.right-T.x-(ek+T.width),te=et.bottom-T.y-(eC+T.height);y({ready:!0,offsetX:ek/en,offsetY:eC/er,offsetR:e7/en,offsetB:te/er,arrowX:((Math.max(e3,e8)+Math.min(e3+G,e8+W))/2-e3)/en,arrowY:((Math.max(e5,e9)+Math.min(e5+H,e9+$))/2-e5)/er,scaleX:en,scaleY:er,align:eO})}function tt(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:J,r=T.x+e,o=T.y+t,a=Math.max(r,n.left),i=Math.max(o,n.top);return Math.max(0,(Math.min(r+G,n.right)-a)*(Math.min(o+H,n.bottom)-i))}function tn(){l=(a=T.y+eC)+H,c=(s=T.x+ek)+G}}}),L=function(){y(function(e){return(0,r.Z)((0,r.Z)({},e),{},{ready:!1})})},(0,m.Z)(L,[ey]),(0,m.Z)(function(){ta||L()},[ta]),[v.ready,v.offsetX,v.offsetY,v.offsetR,v.offsetB,v.arrowX,v.arrowY,v.scaleX,v.scaleY,v.align,function(){E.current+=1;var e=E.current;Promise.resolve().then(function(){E.current===e&&_()})}]),tA=(0,o.Z)(tC,11),tT=tA[0],tI=tA[1],tN=tA[2],tR=tA[3],t_=tA[4],tP=tA[5],tL=tA[6],tM=tA[7],tD=tA[8],tj=tA[9],tF=tA[10],tB=(z=void 0===Q?"hover":Q,h.useMemo(function(){var e=C(null!=J?J:z),t=C(null!=ee?ee:z),n=new Set(e),r=new Set(t);return ez&&(n.has("hover")&&(n.delete("hover"),n.add("click")),r.has("hover")&&(r.delete("hover"),r.add("click"))),[n,r]},[ez,z,J,ee])),tZ=(0,o.Z)(tB,2),tU=tZ[0],tz=tZ[1],tH=tU.has("click"),tG=tz.has("click")||tz.has("contextMenu"),t$=(0,p.Z)(function(){tg||tF()});H=function(){tl.current&&eA&&tG&&tp(!1)},(0,m.Z)(function(){if(ta&&e1&&eK){var e=N(e1),t=N(eK),n=I(eK),r=new Set([n].concat((0,B.Z)(e),(0,B.Z)(t)));function o(){t$(),H()}return r.forEach(function(e){e.addEventListener("scroll",o,{passive:!0})}),n.addEventListener("resize",o,{passive:!0}),t$(),function(){r.forEach(function(e){e.removeEventListener("scroll",o),n.removeEventListener("resize",o)})}}},[ta,e1,eK]),(0,m.Z)(function(){t$()},[tx,ey]),(0,m.Z)(function(){ta&&!(null!=eS&&eS[ey])&&t$()},[JSON.stringify(ew)]);var tW=h.useMemo(function(){var e=function(e,t,n,r){for(var o=n.points,a=Object.keys(e),i=0;i0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],n=arguments.length>2?arguments[2]:void 0;return n?e[0]===t[0]:e[0]===t[0]&&e[1]===t[1]}(null===(l=e[s])||void 0===l?void 0:l.points,o,r))return"".concat(t,"-placement-").concat(s)}return""}(eS,K,tj,eA);return s()(e,null==ek?void 0:ek(tj))},[tj,ek,eS,K,eA]);h.useImperativeHandle(n,function(){return{nativeElement:e4.current,forceAlign:t$}});var tV=h.useState(0),tq=(0,o.Z)(tV,2),tY=tq[0],tK=tq[1],tX=h.useState(0),tQ=(0,o.Z)(tX,2),tJ=tQ[0],t0=tQ[1],t1=function(){if(eO&&e1){var e=e1.getBoundingClientRect();tK(e.width),t0(e.height)}};function t2(e,t,n,r){e8[e]=function(o){var a;null==r||r(o),tp(t,n);for(var i=arguments.length,l=Array(i>1?i-1:0),s=1;s1?n-1:0),o=1;o1?n-1:0),o=1;o{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M11.9999 13.1714L16.9497 8.22168L18.3639 9.63589L11.9999 15.9999L5.63599 9.63589L7.0502 8.22168L11.9999 13.1714Z"}))}},8903:function(e,t,n){n.d(t,{Z:function(){return a}});var r=n(69703),o=n(64090);let a=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M12 22C6.47715 22 2 17.5228 2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22ZM12 10.5858L9.17157 7.75736L7.75736 9.17157L10.5858 12L7.75736 14.8284L9.17157 16.2426L12 13.4142L14.8284 16.2426L16.2426 14.8284L13.4142 12L16.2426 9.17157L14.8284 7.75736L12 10.5858Z"}))}},57750:function(e,t,n){n.d(t,{Z:function(){return eg}});var r=n(69703),o=n(64090),a=n(26587),i=n(65558),l=n(75504),s=n(30638),c=n(80509),u=n.n(c),d=n(5037),p=n.n(d),f=n(71292),m=n.n(f),g=n(96240),h=n.n(g),b=n(93574),v=n.n(b),y=n(72996),E=n(84487),S=n(7986),w=n(71594),x=n(68139),O=n(20757),k=n(9586),C=n(765),A=["layout","type","stroke","connectNulls","isRange","ref"];function T(e){return(T="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function I(){return(I=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(a,A));return o.createElement(S.m,{clipPath:n?"url(#clipPath-".concat(r,")"):null},o.createElement(y.H,I({},(0,C.L6)(d,!0),{points:e,connectNulls:c,type:l,baseLine:t,layout:i,stroke:"none",className:"recharts-area-area"})),"none"!==s&&o.createElement(y.H,I({},(0,C.L6)(this.props,!1),{className:"recharts-area-curve",layout:i,type:l,connectNulls:c,fill:"none",points:e})),"none"!==s&&u&&o.createElement(y.H,I({},(0,C.L6)(this.props,!1),{className:"recharts-area-curve",layout:i,type:l,connectNulls:c,fill:"none",points:t})))}},{key:"renderAreaWithAnimation",value:function(e,t){var n=this,r=this.props,a=r.points,i=r.baseLine,l=r.isAnimationActive,c=r.animationBegin,u=r.animationDuration,d=r.animationEasing,p=r.animationId,f=this.state,g=f.prevPoints,b=f.prevBaseLine;return o.createElement(s.ZP,{begin:c,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"area-".concat(p),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var l=r.t;if(g){var s,c=g.length/a.length,u=a.map(function(e,t){var n=Math.floor(t*c);if(g[n]){var r=g[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return R(R({},e),{},{x:o(l),y:a(l)})}return e});return s=(0,O.hj)(i)&&"number"==typeof i?(0,O.k4)(b,i)(l):m()(i)||h()(i)?(0,O.k4)(b,0)(l):i.map(function(e,t){var n=Math.floor(t*c);if(b[n]){var r=b[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return R(R({},e),{},{x:o(l),y:a(l)})}return e}),n.renderAreaStatically(u,s,e,t)}return o.createElement(S.m,null,o.createElement("defs",null,o.createElement("clipPath",{id:"animationClipPath-".concat(t)},n.renderClipRect(l))),o.createElement(S.m,{clipPath:"url(#animationClipPath-".concat(t,")")},n.renderAreaStatically(a,i,e,t)))})}},{key:"renderArea",value:function(e,t){var n=this.props,r=n.points,o=n.baseLine,a=n.isAnimationActive,i=this.state,l=i.prevPoints,s=i.prevBaseLine,c=i.totalLength;return a&&r&&r.length&&(!l&&c>0||!v()(l,r)||!v()(s,o))?this.renderAreaWithAnimation(e,t):this.renderAreaStatically(r,o,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,a=t.points,i=t.className,s=t.top,c=t.left,u=t.xAxis,d=t.yAxis,p=t.width,f=t.height,g=t.isAnimationActive,h=t.id;if(n||!a||!a.length)return null;var b=this.state.isAnimationFinished,v=1===a.length,y=(0,l.Z)("recharts-area",i),E=u&&u.allowDataOverflow,x=d&&d.allowDataOverflow,O=E||x,k=m()(h)?this.id:h,A=null!==(e=(0,C.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},T=A.r,I=A.strokeWidth,N=((0,C.$k)(r)?r:{}).clipDot,R=void 0===N||N,_=2*(void 0===T?3:T)+(void 0===I?2:I);return o.createElement(S.m,{className:y},E||x?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(k)},o.createElement("rect",{x:E?c:c-p/2,y:x?s:s-f/2,width:E?p:2*p,height:x?f:2*f})),!R&&o.createElement("clipPath",{id:"clipPath-dots-".concat(k)},o.createElement("rect",{x:c-_/2,y:s-_/2,width:p+_,height:f+_}))):null,v?null:this.renderArea(O,k),(r||v)&&this.renderDots(O,R,k),(!g||b)&&w.e.renderCallByParent(this.props,a))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,curBaseLine:e.baseLine,prevPoints:t.curPoints,prevBaseLine:t.curBaseLine}:e.points!==t.curPoints||e.baseLine!==t.curBaseLine?{curPoints:e.points,curBaseLine:e.baseLine}:null}}],n&&_(i.prototype,n),r&&_(i,r),Object.defineProperty(i,"prototype",{writable:!1}),i}(o.PureComponent);D(F,"displayName","Area"),D(F,"defaultProps",{stroke:"#3182bd",fill:"#3182bd",fillOpacity:.6,xAxisId:0,yAxisId:0,legendType:"line",connectNulls:!1,points:[],dot:!1,activeDot:!0,hide:!1,isAnimationActive:!x.x.isSsr,animationBegin:0,animationDuration:1500,animationEasing:"ease"}),D(F,"getBaseValue",function(e,t,n,r){var o=e.layout,a=e.baseValue,i=t.props.baseValue,l=null!=i?i:a;if((0,O.hj)(l)&&"number"==typeof l)return l;var s="horizontal"===o?r:n,c=s.scale.domain();if("number"===s.type){var u=Math.max(c[0],c[1]),d=Math.min(c[0],c[1]);return"dataMin"===l?d:"dataMax"===l?u:u<0?u:Math.max(Math.min(c[0],c[1]),0)}return"dataMin"===l?c[0]:"dataMax"===l?c[1]:c[0]}),D(F,"getComposedData",function(e){var t,n=e.props,r=e.item,o=e.xAxis,a=e.yAxis,i=e.xAxisTicks,l=e.yAxisTicks,s=e.bandSize,c=e.dataKey,u=e.stackedData,d=e.dataStartIndex,p=e.displayedData,f=e.offset,m=n.layout,g=u&&u.length,h=F.getBaseValue(n,r,o,a),b="horizontal"===m,v=!1,y=p.map(function(e,t){g?n=u[d+t]:Array.isArray(n=(0,k.F$)(e,c))?v=!0:n=[h,n];var n,r=null==n[1]||g&&null==(0,k.F$)(e,c);return b?{x:(0,k.Hv)({axis:o,ticks:i,bandSize:s,entry:e,index:t}),y:r?null:a.scale(n[1]),value:n,payload:e}:{x:r?null:o.scale(n[1]),y:(0,k.Hv)({axis:a,ticks:l,bandSize:s,entry:e,index:t}),value:n,payload:e}});return t=g||v?y.map(function(e){var t=Array.isArray(e.value)?e.value[0]:null;return b?{x:e.x,y:null!=t&&null!=e.y?a.scale(t):null}:{x:null!=t?o.scale(t):null,y:e.y}}):b?a.scale(h):o.scale(h),R({points:y,baseLine:t,layout:m,isRange:v},f)}),D(F,"renderDotItem",function(e,t){return o.isValidElement(e)?o.cloneElement(e,t):u()(e)?e(t):o.createElement(E.o,I({},t,{className:"recharts-area-dot"}))});var B=n(23356),Z=n(22983),U=n(12627),z=(0,i.z)({chartName:"AreaChart",GraphicalChild:F,axisComponents:[{axisType:"xAxis",AxisComp:B.K},{axisType:"yAxis",AxisComp:Z.B}],formatAxisMap:U.t9}),H=n(38333),G=n(10166),$=n(94866),W=n(99355),V=["type","layout","connectNulls","ref"];function q(e){return(q="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function Y(){return(Y=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);na){s=[].concat(Q(r.slice(0,c)),[a-u]);break}var d=s.length%2==0?[0,l]:[l];return[].concat(Q(i.repeat(r,Math.floor(t/o))),Q(s),d).map(function(e){return"".concat(e,"px")}).join(", ")}),eo(en(e),"id",(0,O.EL)("recharts-line-")),eo(en(e),"pathRef",function(t){e.mainCurve=t}),eo(en(e),"handleAnimationEnd",function(){e.setState({isAnimationFinished:!0}),e.props.onAnimationEnd&&e.props.onAnimationEnd()}),eo(en(e),"handleAnimationStart",function(){e.setState({isAnimationFinished:!1}),e.props.onAnimationStart&&e.props.onAnimationStart()}),e}return n=[{key:"componentDidMount",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();this.setState({totalLength:e})}}},{key:"componentDidUpdate",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();e!==this.state.totalLength&&this.setState({totalLength:e})}}},{key:"getTotalLength",value:function(){var e=this.mainCurve;try{return e&&e.getTotalLength&&e.getTotalLength()||0}catch(e){return 0}}},{key:"renderErrorBar",value:function(e,t){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var n=this.props,r=n.points,a=n.xAxis,i=n.yAxis,l=n.layout,s=n.children,c=(0,C.NN)(s,W.W);if(!c)return null;var u=function(e,t){return{x:e.x,y:e.y,value:e.value,errorVal:(0,k.F$)(e.payload,t)}};return o.createElement(S.m,{clipPath:e?"url(#clipPath-".concat(t,")"):null},c.map(function(e){return o.cloneElement(e,{key:"bar-".concat(e.props.dataKey),data:r,xAxis:a,yAxis:i,layout:l,dataPointFormatter:u})}))}},{key:"renderDots",value:function(e,t,n){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var r=this.props,a=r.dot,l=r.points,s=r.dataKey,c=(0,C.L6)(this.props,!1),u=(0,C.L6)(a,!0),d=l.map(function(e,t){var n=X(X(X({key:"dot-".concat(t),r:3},c),u),{},{value:e.value,dataKey:s,cx:e.x,cy:e.y,index:t,payload:e.payload});return i.renderDotItem(a,n)}),p={clipPath:e?"url(#clipPath-".concat(t?"":"dots-").concat(n,")"):null};return o.createElement(S.m,Y({className:"recharts-line-dots",key:"dots"},p),d)}},{key:"renderCurveStatically",value:function(e,t,n,r){var a=this.props,i=a.type,l=a.layout,s=a.connectNulls,c=(a.ref,function(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},a=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(a,V)),u=X(X(X({},(0,C.L6)(c,!0)),{},{fill:"none",className:"recharts-line-curve",clipPath:t?"url(#clipPath-".concat(n,")"):null,points:e},r),{},{type:i,layout:l,connectNulls:s});return o.createElement(y.H,Y({},u,{pathRef:this.pathRef}))}},{key:"renderCurveWithAnimation",value:function(e,t){var n=this,r=this.props,a=r.points,i=r.strokeDasharray,l=r.isAnimationActive,c=r.animationBegin,u=r.animationDuration,d=r.animationEasing,p=r.animationId,f=r.animateNewValues,m=r.width,g=r.height,h=this.state,b=h.prevPoints,v=h.totalLength;return o.createElement(s.ZP,{begin:c,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"line-".concat(p),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var o,l=r.t;if(b){var s=b.length/a.length,c=a.map(function(e,t){var n=Math.floor(t*s);if(b[n]){var r=b[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return X(X({},e),{},{x:o(l),y:a(l)})}if(f){var i=(0,O.k4)(2*m,e.x),c=(0,O.k4)(g/2,e.y);return X(X({},e),{},{x:i(l),y:c(l)})}return X(X({},e),{},{x:e.x,y:e.y})});return n.renderCurveStatically(c,e,t)}var u=(0,O.k4)(0,v)(l);if(i){var d="".concat(i).split(/[,\s]+/gim).map(function(e){return parseFloat(e)});o=n.getStrokeDasharray(u,v,d)}else o=n.generateSimpleStrokeDasharray(v,u);return n.renderCurveStatically(a,e,t,{strokeDasharray:o})})}},{key:"renderCurve",value:function(e,t){var n=this.props,r=n.points,o=n.isAnimationActive,a=this.state,i=a.prevPoints,l=a.totalLength;return o&&r&&r.length&&(!i&&l>0||!v()(i,r))?this.renderCurveWithAnimation(e,t):this.renderCurveStatically(r,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,a=t.points,i=t.className,s=t.xAxis,c=t.yAxis,u=t.top,d=t.left,p=t.width,f=t.height,g=t.isAnimationActive,h=t.id;if(n||!a||!a.length)return null;var b=this.state.isAnimationFinished,v=1===a.length,y=(0,l.Z)("recharts-line",i),E=s&&s.allowDataOverflow,x=c&&c.allowDataOverflow,O=E||x,k=m()(h)?this.id:h,A=null!==(e=(0,C.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},T=A.r,I=A.strokeWidth,N=((0,C.$k)(r)?r:{}).clipDot,R=void 0===N||N,_=2*(void 0===T?3:T)+(void 0===I?2:I);return o.createElement(S.m,{className:y},E||x?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(k)},o.createElement("rect",{x:E?d:d-p/2,y:x?u:u-f/2,width:E?p:2*p,height:x?f:2*f})),!R&&o.createElement("clipPath",{id:"clipPath-dots-".concat(k)},o.createElement("rect",{x:d-_/2,y:u-_/2,width:p+_,height:f+_}))):null,!v&&this.renderCurve(O,k),this.renderErrorBar(O,k),(v||r)&&this.renderDots(O,R,k),(!g||b)&&w.e.renderCallByParent(this.props,a))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,prevPoints:t.curPoints}:e.points!==t.curPoints?{curPoints:e.points}:null}},{key:"repeat",value:function(e,t){for(var n=e.length%2!=0?[].concat(Q(e),[0]):e,r=[],o=0;o{let{data:n=[],categories:i=[],index:l,stack:s=!1,colors:c=ep.s,valueFormatter:u=em.Cj,startEndOnly:d=!1,showXAxis:p=!0,showYAxis:f=!0,yAxisWidth:m=56,intervalType:g="equidistantPreserveStart",showAnimation:h=!1,animationDuration:b=900,showTooltip:v=!0,showLegend:y=!0,showGridLines:S=!0,showGradient:w=!0,autoMinValue:x=!1,curveType:O="linear",minValue:k,maxValue:C,connectNulls:A=!1,allowDecimals:T=!0,noDataText:I,className:N,onValueChange:R,enableLegendSlider:_=!1,customTooltip:P,rotateLabelX:L,tickGap:M=5}=e,D=(0,r._T)(e,["data","categories","index","stack","colors","valueFormatter","startEndOnly","showXAxis","showYAxis","yAxisWidth","intervalType","showAnimation","animationDuration","showTooltip","showLegend","showGridLines","showGradient","autoMinValue","curveType","minValue","maxValue","connectNulls","allowDecimals","noDataText","className","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap"]),j=(p||f)&&(!d||f)?20:0,[U,W]=(0,o.useState)(60),[V,q]=(0,o.useState)(void 0),[Y,K]=(0,o.useState)(void 0),X=(0,eu.me)(i,c),Q=(0,eu.i4)(x,k,C),J=!!R;function ee(e){J&&(e===Y&&!V||(0,eu.FB)(n,e)&&V&&V.dataKey===e?(K(void 0),null==R||R(null)):(K(e),null==R||R({eventType:"category",categoryClicked:e})),q(void 0))}return o.createElement("div",Object.assign({ref:t,className:(0,ef.q)("w-full h-80",N)},D),o.createElement(a.h,{className:"h-full w-full"},(null==n?void 0:n.length)?o.createElement(z,{data:n,onClick:J&&(Y||V)?()=>{q(void 0),K(void 0),null==R||R(null)}:void 0},S?o.createElement(H.q,{className:(0,ef.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:!0,vertical:!1}):null,o.createElement(B.K,{padding:{left:j,right:j},hide:!p,dataKey:l,tick:{transform:"translate(0, 6)"},ticks:d?[n[0][l],n[n.length-1][l]]:void 0,fill:"",stroke:"",className:(0,ef.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),interval:d?"preserveStartEnd":g,tickLine:!1,axisLine:!1,minTickGap:M,angle:null==L?void 0:L.angle,dy:null==L?void 0:L.verticalShift,height:null==L?void 0:L.xAxisHeight}),o.createElement(Z.B,{width:m,hide:!f,axisLine:!1,tickLine:!1,type:"number",domain:Q,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,ef.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:u,allowDecimals:T}),o.createElement(G.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{stroke:"#d1d5db",strokeWidth:1},content:v?e=>{let{active:t,payload:n,label:r}=e;return P?o.createElement(P,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=X.get(e.dataKey))&&void 0!==t?t:ed.fr.Gray})}),active:t,label:r}):o.createElement(es.ZP,{active:t,payload:n,label:r,valueFormatter:u,categoryColors:X})}:o.createElement(o.Fragment,null),position:{y:0}}),y?o.createElement($.D,{verticalAlign:"top",height:U,content:e=>{let{payload:t}=e;return(0,el.Z)({payload:t},X,W,Y,J?e=>ee(e):void 0,_)}}):null,i.map(e=>{var t,n;return o.createElement("defs",{key:e},w?o.createElement("linearGradient",{className:(0,em.bM)(null!==(t=X.get(e))&&void 0!==t?t:ed.fr.Gray,ep.K.text).textColor,id:X.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{offset:"5%",stopColor:"currentColor",stopOpacity:V||Y&&Y!==e?.15:.4}),o.createElement("stop",{offset:"95%",stopColor:"currentColor",stopOpacity:0})):o.createElement("linearGradient",{className:(0,em.bM)(null!==(n=X.get(e))&&void 0!==n?n:ed.fr.Gray,ep.K.text).textColor,id:X.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{stopColor:"currentColor",stopOpacity:V||Y&&Y!==e?.1:.3})))}),i.map(e=>{var t;return o.createElement(F,{className:(0,em.bM)(null!==(t=X.get(e))&&void 0!==t?t:ed.fr.Gray,ep.K.text).strokeColor,strokeOpacity:V||Y&&Y!==e?.3:1,activeDot:e=>{var t;let{cx:r,cy:a,stroke:i,strokeLinecap:l,strokeLinejoin:s,strokeWidth:c,dataKey:u}=e;return o.createElement(E.o,{className:(0,ef.q)("stroke-tremor-background dark:stroke-dark-tremor-background",R?"cursor-pointer":"",(0,em.bM)(null!==(t=X.get(u))&&void 0!==t?t:ed.fr.Gray,ep.K.text).fillColor),cx:r,cy:a,r:5,fill:"",stroke:i,strokeLinecap:l,strokeLinejoin:s,strokeWidth:c,onClick:(t,r)=>{r.stopPropagation(),J&&(e.index===(null==V?void 0:V.index)&&e.dataKey===(null==V?void 0:V.dataKey)||(0,eu.FB)(n,e.dataKey)&&Y&&Y===e.dataKey?(K(void 0),q(void 0),null==R||R(null)):(K(e.dataKey),q({index:e.index,dataKey:e.dataKey}),null==R||R(Object.assign({eventType:"dot",categoryClicked:e.dataKey},e.payload))))}})},dot:t=>{var r;let{stroke:a,strokeLinecap:i,strokeLinejoin:l,strokeWidth:s,cx:c,cy:u,dataKey:d,index:p}=t;return(0,eu.FB)(n,e)&&!(V||Y&&Y!==e)||(null==V?void 0:V.index)===p&&(null==V?void 0:V.dataKey)===e?o.createElement(E.o,{key:p,cx:c,cy:u,r:5,stroke:a,fill:"",strokeLinecap:i,strokeLinejoin:l,strokeWidth:s,className:(0,ef.q)("stroke-tremor-background dark:stroke-dark-tremor-background",R?"cursor-pointer":"",(0,em.bM)(null!==(r=X.get(d))&&void 0!==r?r:ed.fr.Gray,ep.K.text).fillColor)}):o.createElement(o.Fragment,{key:p})},key:e,name:e,type:O,dataKey:e,stroke:"",fill:"url(#".concat(X.get(e),")"),strokeWidth:2,strokeLinejoin:"round",strokeLinecap:"round",isAnimationActive:h,animationDuration:b,stackId:s?"a":void 0,connectNulls:A})}),R?i.map(e=>o.createElement(ei,{className:(0,ef.q)("cursor-pointer"),strokeOpacity:0,key:e,name:e,type:O,dataKey:e,stroke:"transparent",fill:"transparent",legendType:"none",tooltipType:"none",strokeWidth:12,connectNulls:A,onClick:(e,t)=>{t.stopPropagation();let{name:n}=e;ee(n)}})):null):o.createElement(ec.Z,{noDataText:I})))});eg.displayName="AreaChart"},44041:function(e,t,n){n.d(t,{Z:function(){return x}});var r=n(69703),o=n(54942),a=n(2898),i=n(99250),l=n(65492),s=n(64090),c=n(26587),u=n(65558),d=n(28485),p=n(23356),f=n(22983),m=n(12627),g=(0,u.z)({chartName:"BarChart",GraphicalChild:d.$,defaultTooltipEventType:"axis",validateTooltipEventTypes:["axis","item"],axisComponents:[{axisType:"xAxis",AxisComp:p.K},{axisType:"yAxis",AxisComp:f.B}],formatAxisMap:m.t9}),h=n(38333),b=n(10166),v=n(94866),y=n(17280),E=n(30470),S=n(77448),w=n(36342);let x=s.forwardRef((e,t)=>{let{data:n=[],categories:u=[],index:m,colors:x=a.s,valueFormatter:O=l.Cj,layout:k="horizontal",stack:C=!1,relative:A=!1,startEndOnly:T=!1,animationDuration:I=900,showAnimation:N=!1,showXAxis:R=!0,showYAxis:_=!0,yAxisWidth:P=56,intervalType:L="equidistantPreserveStart",showTooltip:M=!0,showLegend:D=!0,showGridLines:j=!0,autoMinValue:F=!1,minValue:B,maxValue:Z,allowDecimals:U=!0,noDataText:z,onValueChange:H,enableLegendSlider:G=!1,customTooltip:$,rotateLabelX:W,tickGap:V=5,className:q}=e,Y=(0,r._T)(e,["data","categories","index","colors","valueFormatter","layout","stack","relative","startEndOnly","animationDuration","showAnimation","showXAxis","showYAxis","yAxisWidth","intervalType","showTooltip","showLegend","showGridLines","autoMinValue","minValue","maxValue","allowDecimals","noDataText","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap","className"]),K=R||_?20:0,[X,Q]=(0,s.useState)(60),J=(0,w.me)(u,x),[ee,et]=s.useState(void 0),[en,er]=(0,s.useState)(void 0),eo=!!H;function ea(e,t,n){var r,o,a,i;n.stopPropagation(),H&&((0,w.vZ)(ee,Object.assign(Object.assign({},e.payload),{value:e.value}))?(er(void 0),et(void 0),null==H||H(null)):(er(null===(o=null===(r=e.tooltipPayload)||void 0===r?void 0:r[0])||void 0===o?void 0:o.dataKey),et(Object.assign(Object.assign({},e.payload),{value:e.value})),null==H||H(Object.assign({eventType:"bar",categoryClicked:null===(i=null===(a=e.tooltipPayload)||void 0===a?void 0:a[0])||void 0===i?void 0:i.dataKey},e.payload))))}let ei=(0,w.i4)(F,B,Z);return s.createElement("div",Object.assign({ref:t,className:(0,i.q)("w-full h-80",q)},Y),s.createElement(c.h,{className:"h-full w-full"},(null==n?void 0:n.length)?s.createElement(g,{data:n,stackOffset:C?"sign":A?"expand":"none",layout:"vertical"===k?"vertical":"horizontal",onClick:eo&&(en||ee)?()=>{et(void 0),er(void 0),null==H||H(null)}:void 0},j?s.createElement(h.q,{className:(0,i.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:"vertical"!==k,vertical:"vertical"===k}):null,"vertical"!==k?s.createElement(p.K,{padding:{left:K,right:K},hide:!R,dataKey:m,interval:T?"preserveStartEnd":L,tick:{transform:"translate(0, 6)"},ticks:T?[n[0][m],n[n.length-1][m]]:void 0,fill:"",stroke:"",className:(0,i.q)("mt-4 text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,angle:null==W?void 0:W.angle,dy:null==W?void 0:W.verticalShift,height:null==W?void 0:W.xAxisHeight,minTickGap:V}):s.createElement(p.K,{hide:!R,type:"number",tick:{transform:"translate(-3, 0)"},domain:ei,fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,tickFormatter:O,minTickGap:V,allowDecimals:U,angle:null==W?void 0:W.angle,dy:null==W?void 0:W.verticalShift,height:null==W?void 0:W.xAxisHeight}),"vertical"!==k?s.createElement(f.B,{width:P,hide:!_,axisLine:!1,tickLine:!1,type:"number",domain:ei,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:A?e=>"".concat((100*e).toString()," %"):O,allowDecimals:U}):s.createElement(f.B,{width:P,hide:!_,dataKey:m,axisLine:!1,tickLine:!1,ticks:T?[n[0][m],n[n.length-1][m]]:void 0,type:"category",interval:"preserveStartEnd",tick:{transform:"translate(0, 6)"},fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content")}),s.createElement(b.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{fill:"#d1d5db",opacity:"0.15"},content:M?e=>{let{active:t,payload:n,label:r}=e;return $?s.createElement($,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=J.get(e.dataKey))&&void 0!==t?t:o.fr.Gray})}),active:t,label:r}):s.createElement(E.ZP,{active:t,payload:n,label:r,valueFormatter:O,categoryColors:J})}:s.createElement(s.Fragment,null),position:{y:0}}),D?s.createElement(v.D,{verticalAlign:"top",height:X,content:e=>{let{payload:t}=e;return(0,y.Z)({payload:t},J,Q,en,eo?e=>{eo&&(e!==en||ee?(er(e),null==H||H({eventType:"category",categoryClicked:e})):(er(void 0),null==H||H(null)),et(void 0))}:void 0,G)}}):null,u.map(e=>{var t;return s.createElement(d.$,{className:(0,i.q)((0,l.bM)(null!==(t=J.get(e))&&void 0!==t?t:o.fr.Gray,a.K.background).fillColor,H?"cursor-pointer":""),key:e,name:e,type:"linear",stackId:C||A?"a":void 0,dataKey:e,fill:"",isAnimationActive:N,animationDuration:I,shape:e=>((e,t,n,r)=>{let{fillOpacity:o,name:a,payload:i,value:l}=e,{x:c,width:u,y:d,height:p}=e;return"horizontal"===r&&p<0?(d+=p,p=Math.abs(p)):"vertical"===r&&u<0&&(c+=u,u=Math.abs(u)),s.createElement("rect",{x:c,y:d,width:u,height:p,opacity:t||n&&n!==a?(0,w.vZ)(t,Object.assign(Object.assign({},i),{value:l}))?o:.3:o})})(e,ee,en,k),onClick:ea})})):s.createElement(S.Z,{noDataText:z})))});x.displayName="BarChart"},17280:function(e,t,n){n.d(t,{Z:function(){return g}});var r=n(64090);let o=(e,t)=>{let[n,o]=(0,r.useState)(t);(0,r.useEffect)(()=>{let t=()=>{o(window.innerWidth),e()};return t(),window.addEventListener("resize",t),()=>window.removeEventListener("resize",t)},[e,n])};var a=n(69703),i=n(2898),l=n(99250),s=n(65492);let c=e=>{var t=(0,a._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M8 12L14 6V18L8 12Z"}))},u=e=>{var t=(0,a._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M16 12L10 18V6L16 12Z"}))},d=(0,s.fn)("Legend"),p=e=>{let{name:t,color:n,onClick:o,activeLegend:a}=e,c=!!o;return r.createElement("li",{className:(0,l.q)(d("legendItem"),"group inline-flex items-center px-2 py-0.5 rounded-tremor-small transition whitespace-nowrap",c?"cursor-pointer":"cursor-default","text-tremor-content",c?"hover:bg-tremor-background-subtle":"","dark:text-dark-tremor-content",c?"dark:hover:bg-dark-tremor-background-subtle":""),onClick:e=>{e.stopPropagation(),null==o||o(t,n)}},r.createElement("svg",{className:(0,l.q)("flex-none h-2 w-2 mr-1.5",(0,s.bM)(n,i.K.text).textColor,a&&a!==t?"opacity-40":"opacity-100"),fill:"currentColor",viewBox:"0 0 8 8"},r.createElement("circle",{cx:4,cy:4,r:4})),r.createElement("p",{className:(0,l.q)("whitespace-nowrap truncate text-tremor-default","text-tremor-content",c?"group-hover:text-tremor-content-emphasis":"","dark:text-dark-tremor-content",a&&a!==t?"opacity-40":"opacity-100",c?"dark:group-hover:text-dark-tremor-content-emphasis":"")},t))},f=e=>{let{icon:t,onClick:n,disabled:o}=e,[a,i]=r.useState(!1),s=r.useRef(null);return r.useEffect(()=>(a?s.current=setInterval(()=>{null==n||n()},300):clearInterval(s.current),()=>clearInterval(s.current)),[a,n]),(0,r.useEffect)(()=>{o&&(clearInterval(s.current),i(!1))},[o]),r.createElement("button",{type:"button",className:(0,l.q)(d("legendSliderButton"),"w-5 group inline-flex items-center truncate rounded-tremor-small transition",o?"cursor-not-allowed":"cursor-pointer",o?"text-tremor-content-subtle":"text-tremor-content hover:text-tremor-content-emphasis hover:bg-tremor-background-subtle",o?"dark:text-dark-tremor-subtle":"dark:text-dark-tremor dark:hover:text-tremor-content-emphasis dark:hover:bg-dark-tremor-background-subtle"),disabled:o,onClick:e=>{e.stopPropagation(),null==n||n()},onMouseDown:e=>{e.stopPropagation(),i(!0)},onMouseUp:e=>{e.stopPropagation(),i(!1)}},r.createElement(t,{className:"w-full"}))},m=r.forwardRef((e,t)=>{var n,o;let{categories:s,colors:m=i.s,className:g,onClickLegendItem:h,activeLegend:b,enableLegendSlider:v=!1}=e,y=(0,a._T)(e,["categories","colors","className","onClickLegendItem","activeLegend","enableLegendSlider"]),E=r.useRef(null),[S,w]=r.useState(null),[x,O]=r.useState(null),k=r.useRef(null),C=(0,r.useCallback)(()=>{let e=null==E?void 0:E.current;e&&w({left:e.scrollLeft>0,right:e.scrollWidth-e.clientWidth>e.scrollLeft})},[w]),A=(0,r.useCallback)(e=>{var t;let n=null==E?void 0:E.current,r=null!==(t=null==n?void 0:n.clientWidth)&&void 0!==t?t:0;n&&v&&(n.scrollTo({left:"left"===e?n.scrollLeft-r:n.scrollLeft+r,behavior:"smooth"}),setTimeout(()=>{C()},400))},[v,C]);r.useEffect(()=>{let e=e=>{"ArrowLeft"===e?A("left"):"ArrowRight"===e&&A("right")};return x?(e(x),k.current=setInterval(()=>{e(x)},300)):clearInterval(k.current),()=>clearInterval(k.current)},[x,A]);let T=e=>{e.stopPropagation(),"ArrowLeft"!==e.key&&"ArrowRight"!==e.key||(e.preventDefault(),O(e.key))},I=e=>{e.stopPropagation(),O(null)};return r.useEffect(()=>{let e=null==E?void 0:E.current;return v&&(C(),null==e||e.addEventListener("keydown",T),null==e||e.addEventListener("keyup",I)),()=>{null==e||e.removeEventListener("keydown",T),null==e||e.removeEventListener("keyup",I)}},[C,v]),r.createElement("ol",Object.assign({ref:t,className:(0,l.q)(d("root"),"relative overflow-hidden",g)},y),r.createElement("div",{ref:E,tabIndex:0,className:(0,l.q)("h-full flex",v?(null==S?void 0:S.right)||(null==S?void 0:S.left)?"pl-4 pr-12 items-center overflow-auto snap-mandatory [&::-webkit-scrollbar]:hidden [scrollbar-width:none]":"":"flex-wrap")},s.map((e,t)=>r.createElement(p,{key:"item-".concat(t),name:e,color:m[t],onClick:h,activeLegend:b}))),v&&((null==S?void 0:S.right)||(null==S?void 0:S.left))?r.createElement(r.Fragment,null,r.createElement("div",{className:(0,l.q)("from-tremor-background","dark:from-dark-tremor-background","absolute top-0 bottom-0 left-0 w-4 bg-gradient-to-r to-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("to-tremor-background","dark:to-dark-tremor-background","absolute top-0 bottom-0 right-10 w-4 bg-gradient-to-r from-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("bg-tremor-background","dark:bg-dark-tremor-background","absolute flex top-0 pr-1 bottom-0 right-0 items-center justify-center h-full")},r.createElement(f,{icon:c,onClick:()=>{O(null),A("left")},disabled:!(null==S?void 0:S.left)}),r.createElement(f,{icon:u,onClick:()=>{O(null),A("right")},disabled:!(null==S?void 0:S.right)}))):null)});m.displayName="Legend";let g=(e,t,n,a,i,l)=>{let{payload:s}=e,c=(0,r.useRef)(null);o(()=>{var e,t;n((t=null===(e=c.current)||void 0===e?void 0:e.clientHeight)?Number(t)+20:60)});let u=s.filter(e=>"none"!==e.type);return r.createElement("div",{ref:c,className:"flex items-center justify-end"},r.createElement(m,{categories:u.map(e=>e.value),colors:u.map(e=>t.get(e.value)),onClickLegendItem:i,activeLegend:a,enableLegendSlider:l}))}},30470:function(e,t,n){n.d(t,{ZP:function(){return u}});var r=n(64090),o=n(54942),a=n(2898),i=n(99250),l=n(65492);let s=e=>{let{children:t}=e;return r.createElement("div",{className:(0,i.q)("rounded-tremor-default text-tremor-default border","bg-tremor-background shadow-tremor-dropdown border-tremor-border","dark:bg-dark-tremor-background dark:shadow-dark-tremor-dropdown dark:border-dark-tremor-border")},t)},c=e=>{let{value:t,name:n,color:o}=e;return r.createElement("div",{className:"flex items-center justify-between space-x-8"},r.createElement("div",{className:"flex items-center space-x-2"},r.createElement("span",{className:(0,i.q)("shrink-0 rounded-tremor-full border-2 h-3 w-3","border-tremor-background shadow-tremor-card","dark:border-dark-tremor-background dark:shadow-dark-tremor-card",(0,l.bM)(o,a.K.background).bgColor)}),r.createElement("p",{className:(0,i.q)("text-right whitespace-nowrap","text-tremor-content","dark:text-dark-tremor-content")},n)),r.createElement("p",{className:(0,i.q)("font-medium tabular-nums text-right whitespace-nowrap","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},t))},u=e=>{let{active:t,payload:n,label:a,categoryColors:l,valueFormatter:u}=e;if(t&&n){let e=n.filter(e=>"none"!==e.type);return r.createElement(s,null,r.createElement("div",{className:(0,i.q)("border-tremor-border border-b px-4 py-2","dark:border-dark-tremor-border")},r.createElement("p",{className:(0,i.q)("font-medium","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},a)),r.createElement("div",{className:(0,i.q)("px-4 py-2 space-y-1")},e.map((e,t)=>{var n;let{value:a,name:i}=e;return r.createElement(c,{key:"id-".concat(t),value:u(a),name:i,color:null!==(n=l.get(i))&&void 0!==n?n:o.fr.Blue})})))}return null}},77448:function(e,t,n){n.d(t,{Z:function(){return p}});var r=n(99250),o=n(64090),a=n(69703);let i=(0,n(65492).fn)("Flex"),l={start:"justify-start",end:"justify-end",center:"justify-center",between:"justify-between",around:"justify-around",evenly:"justify-evenly"},s={start:"items-start",end:"items-end",center:"items-center",baseline:"items-baseline",stretch:"items-stretch"},c={row:"flex-row",col:"flex-col","row-reverse":"flex-row-reverse","col-reverse":"flex-col-reverse"},u=o.forwardRef((e,t)=>{let{flexDirection:n="row",justifyContent:u="between",alignItems:d="center",children:p,className:f}=e,m=(0,a._T)(e,["flexDirection","justifyContent","alignItems","children","className"]);return o.createElement("div",Object.assign({ref:t,className:(0,r.q)(i("root"),"flex w-full",c[n],l[u],s[d],f)},m),p)});u.displayName="Flex";var d=n(71801);let p=e=>{let{noDataText:t="No data"}=e;return o.createElement(u,{alignItems:"center",justifyContent:"center",className:(0,r.q)("w-full h-full border border-dashed rounded-tremor-default","border-tremor-border","dark:border-dark-tremor-border")},o.createElement(d.Z,{className:(0,r.q)("text-tremor-content","dark:text-dark-tremor-content")},t))}},36342:function(e,t,n){n.d(t,{FB:function(){return a},i4:function(){return o},me:function(){return r},vZ:function(){return function e(t,n){if(t===n)return!0;if("object"!=typeof t||"object"!=typeof n||null===t||null===n)return!1;let r=Object.keys(t),o=Object.keys(n);if(r.length!==o.length)return!1;for(let a of r)if(!o.includes(a)||!e(t[a],n[a]))return!1;return!0}}});let r=(e,t)=>{let n=new Map;return e.forEach((e,r)=>{n.set(e,t[r])}),n},o=(e,t,n)=>[e?"auto":null!=t?t:0,null!=n?n:"auto"];function a(e,t){let n=[];for(let r of e)if(Object.prototype.hasOwnProperty.call(r,t)&&(n.push(r[t]),n.length>1))return!1;return!0}},5:function(e,t,n){n.d(t,{Z:function(){return f}});var r=n(69703),o=n(64090),a=n(58437),i=n(54942),l=n(2898),s=n(99250),c=n(65492);let u={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},d={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},p=(0,c.fn)("Badge"),f=o.forwardRef((e,t)=>{let{color:n,icon:f,size:m=i.u8.SM,tooltip:g,className:h,children:b}=e,v=(0,r._T)(e,["color","icon","size","tooltip","className","children"]),y=f||null,{tooltipProps:E,getReferenceProps:S}=(0,a.l)();return o.createElement("span",Object.assign({ref:(0,c.lq)([t,E.refs.setReference]),className:(0,s.q)(p("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full",n?(0,s.q)((0,c.bM)(n,l.K.background).bgColor,(0,c.bM)(n,l.K.text).textColor,"bg-opacity-20 dark:bg-opacity-25"):(0,s.q)("bg-tremor-brand-muted text-tremor-brand-emphasis","dark:bg-dark-tremor-brand-muted dark:text-dark-tremor-brand-emphasis"),u[m].paddingX,u[m].paddingY,u[m].fontSize,h)},S,v),o.createElement(a.Z,Object.assign({text:g},E)),y?o.createElement(y,{className:(0,s.q)(p("icon"),"shrink-0 -ml-1 mr-1.5",d[m].height,d[m].width)}):null,o.createElement("p",{className:(0,s.q)(p("text"),"text-sm whitespace-nowrap")},b))});f.displayName="Badge"},61244:function(e,t,n){n.d(t,{Z:function(){return g}});var r=n(69703),o=n(64090),a=n(58437),i=n(54942),l=n(99250),s=n(65492),c=n(2898);let u={xs:{paddingX:"px-1.5",paddingY:"py-1.5"},sm:{paddingX:"px-1.5",paddingY:"py-1.5"},md:{paddingX:"px-2",paddingY:"py-2"},lg:{paddingX:"px-2",paddingY:"py-2"},xl:{paddingX:"px-2.5",paddingY:"py-2.5"}},d={xs:{height:"h-3",width:"w-3"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-7",width:"w-7"},xl:{height:"h-9",width:"w-9"}},p={simple:{rounded:"",border:"",ring:"",shadow:""},light:{rounded:"rounded-tremor-default",border:"",ring:"",shadow:""},shadow:{rounded:"rounded-tremor-default",border:"border",ring:"",shadow:"shadow-tremor-card dark:shadow-dark-tremor-card"},solid:{rounded:"rounded-tremor-default",border:"border-2",ring:"ring-1",shadow:""},outlined:{rounded:"rounded-tremor-default",border:"border",ring:"ring-2",shadow:""}},f=(e,t)=>{switch(e){case"simple":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:"",borderColor:"",ringColor:""};case"light":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand-muted dark:bg-dark-tremor-brand-muted",borderColor:"",ringColor:""};case"shadow":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:"border-tremor-border dark:border-dark-tremor-border",ringColor:""};case"solid":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand dark:bg-dark-tremor-brand",borderColor:"border-tremor-brand-inverted dark:border-dark-tremor-brand-inverted",ringColor:"ring-tremor-ring dark:ring-dark-tremor-ring"};case"outlined":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:t?(0,s.bM)(t,c.K.ring).borderColor:"border-tremor-brand-subtle dark:border-dark-tremor-brand-subtle",ringColor:t?(0,l.q)((0,s.bM)(t,c.K.ring).ringColor,"ring-opacity-40"):"ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted"}}},m=(0,s.fn)("Icon"),g=o.forwardRef((e,t)=>{let{icon:n,variant:c="simple",tooltip:g,size:h=i.u8.SM,color:b,className:v}=e,y=(0,r._T)(e,["icon","variant","tooltip","size","color","className"]),E=f(c,b),{tooltipProps:S,getReferenceProps:w}=(0,a.l)();return o.createElement("span",Object.assign({ref:(0,s.lq)([t,S.refs.setReference]),className:(0,l.q)(m("root"),"inline-flex flex-shrink-0 items-center",E.bgColor,E.textColor,E.borderColor,E.ringColor,p[c].rounded,p[c].border,p[c].shadow,p[c].ring,u[h].paddingX,u[h].paddingY,v)},w,y),o.createElement(a.Z,Object.assign({text:g},S)),o.createElement(n,{className:(0,l.q)(m("icon"),"shrink-0",d[h].height,d[h].width)}))});g.displayName="Icon"},2179:function(e,t,n){n.d(t,{Z:function(){return O}});var r=n(69703),o=n(58437),a=n(64090);let i=["preEnter","entering","entered","preExit","exiting","exited","unmounted"],l=e=>({_s:e,status:i[e],isEnter:e<3,isMounted:6!==e,isResolved:2===e||e>4}),s=e=>e?6:5,c=(e,t)=>{switch(e){case 1:case 0:return 2;case 4:case 3:return s(t)}},u=e=>"object"==typeof e?[e.enter,e.exit]:[e,e],d=(e,t)=>setTimeout(()=>{isNaN(document.body.offsetTop)||e(t+1)},0),p=(e,t,n,r,o)=>{clearTimeout(r.current);let a=l(e);t(a),n.current=a,o&&o({current:a})},f=function(){let{enter:e=!0,exit:t=!0,preEnter:n,preExit:r,timeout:o,initialEntered:i,mountOnEnter:f,unmountOnExit:m,onStateChange:g}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},[h,b]=(0,a.useState)(()=>l(i?2:s(f))),v=(0,a.useRef)(h),y=(0,a.useRef)(),[E,S]=u(o),w=(0,a.useCallback)(()=>{let e=c(v.current._s,m);e&&p(e,b,v,y,g)},[g,m]),x=(0,a.useCallback)(o=>{let a=e=>{switch(p(e,b,v,y,g),e){case 1:E>=0&&(y.current=setTimeout(w,E));break;case 4:S>=0&&(y.current=setTimeout(w,S));break;case 0:case 3:y.current=d(a,e)}},i=v.current.isEnter;"boolean"!=typeof o&&(o=!i),o?i||a(e?n?0:1:2):i&&a(t?r?3:4:s(m))},[w,g,e,t,n,r,E,S,m]);return(0,a.useEffect)(()=>()=>clearTimeout(y.current),[]),[h,x,w]};var m=n(54942),g=n(99250),h=n(65492);let b=e=>{var t=(0,r._T)(e,[]);return a.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),a.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),a.createElement("path",{d:"M18.364 5.636L16.95 7.05A7 7 0 1 0 19 12h2a9 9 0 1 1-2.636-6.364z"}))};var v=n(2898);let y={xs:{height:"h-4",width:"w-4"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-6",width:"w-6"},xl:{height:"h-6",width:"w-6"}},E=e=>"light"!==e?{xs:{paddingX:"px-2.5",paddingY:"py-1.5",fontSize:"text-xs"},sm:{paddingX:"px-4",paddingY:"py-2",fontSize:"text-sm"},md:{paddingX:"px-4",paddingY:"py-2",fontSize:"text-md"},lg:{paddingX:"px-4",paddingY:"py-2.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-3",fontSize:"text-xl"}}:{xs:{paddingX:"",paddingY:"",fontSize:"text-xs"},sm:{paddingX:"",paddingY:"",fontSize:"text-sm"},md:{paddingX:"",paddingY:"",fontSize:"text-md"},lg:{paddingX:"",paddingY:"",fontSize:"text-lg"},xl:{paddingX:"",paddingY:"",fontSize:"text-xl"}},S=(e,t)=>{switch(e){case"primary":return{textColor:t?(0,h.bM)("white").textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",hoverTextColor:t?(0,h.bM)("white").textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,h.bM)(t,v.K.background).bgColor:"bg-tremor-brand dark:bg-dark-tremor-brand",hoverBgColor:t?(0,h.bM)(t,v.K.darkBackground).hoverBgColor:"hover:bg-tremor-brand-emphasis dark:hover:bg-dark-tremor-brand-emphasis",borderColor:t?(0,h.bM)(t,v.K.border).borderColor:"border-tremor-brand dark:border-dark-tremor-brand",hoverBorderColor:t?(0,h.bM)(t,v.K.darkBorder).hoverBorderColor:"hover:border-tremor-brand-emphasis dark:hover:border-dark-tremor-brand-emphasis"};case"secondary":return{textColor:t?(0,h.bM)(t,v.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",hoverTextColor:t?(0,h.bM)(t,v.K.text).textColor:"hover:text-tremor-brand-emphasis dark:hover:text-dark-tremor-brand-emphasis",bgColor:(0,h.bM)("transparent").bgColor,hoverBgColor:t?(0,g.q)((0,h.bM)(t,v.K.background).hoverBgColor,"hover:bg-opacity-20 dark:hover:bg-opacity-20"):"hover:bg-tremor-brand-faint dark:hover:bg-dark-tremor-brand-faint",borderColor:t?(0,h.bM)(t,v.K.border).borderColor:"border-tremor-brand dark:border-dark-tremor-brand"};case"light":return{textColor:t?(0,h.bM)(t,v.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",hoverTextColor:t?(0,h.bM)(t,v.K.darkText).hoverTextColor:"hover:text-tremor-brand-emphasis dark:hover:text-dark-tremor-brand-emphasis",bgColor:(0,h.bM)("transparent").bgColor,borderColor:"",hoverBorderColor:""}}},w=(0,h.fn)("Button"),x=e=>{let{loading:t,iconSize:n,iconPosition:r,Icon:o,needMargin:i,transitionStatus:l}=e,s=i?r===m.zS.Left?(0,g.q)("-ml-1","mr-1.5"):(0,g.q)("-mr-1","ml-1.5"):"",c=(0,g.q)("w-0 h-0"),u={default:c,entering:c,entered:n,exiting:n,exited:c};return t?a.createElement(b,{className:(0,g.q)(w("icon"),"animate-spin shrink-0",s,u.default,u[l]),style:{transition:"width 150ms"}}):a.createElement(o,{className:(0,g.q)(w("icon"),"shrink-0",n,s)})},O=a.forwardRef((e,t)=>{let{icon:n,iconPosition:i=m.zS.Left,size:l=m.u8.SM,color:s,variant:c="primary",disabled:u,loading:d=!1,loadingText:p,children:b,tooltip:v,className:O}=e,k=(0,r._T)(e,["icon","iconPosition","size","color","variant","disabled","loading","loadingText","children","tooltip","className"]),C=d||u,A=void 0!==n||d,T=d&&p,I=!(!b&&!T),N=(0,g.q)(y[l].height,y[l].width),R="light"!==c?(0,g.q)("rounded-tremor-default border","shadow-tremor-input","dark:shadow-dark-tremor-input"):"",_=S(c,s),P=E(c)[l],{tooltipProps:L,getReferenceProps:M}=(0,o.l)(300),[D,j]=f({timeout:50});return(0,a.useEffect)(()=>{j(d)},[d]),a.createElement("button",Object.assign({ref:(0,h.lq)([t,L.refs.setReference]),className:(0,g.q)(w("root"),"flex-shrink-0 inline-flex justify-center items-center group font-medium outline-none",R,P.paddingX,P.paddingY,P.fontSize,_.textColor,_.bgColor,_.borderColor,_.hoverBorderColor,C?"opacity-50 cursor-not-allowed":(0,g.q)(S(c,s).hoverTextColor,S(c,s).hoverBgColor,S(c,s).hoverBorderColor),O),disabled:C},M,k),a.createElement(o.Z,Object.assign({text:v},L)),A&&i!==m.zS.Right?a.createElement(x,{loading:d,iconSize:N,iconPosition:i,Icon:n,transitionStatus:D.status,needMargin:I}):null,T||b?a.createElement("span",{className:(0,g.q)(w("text"),"text-tremor-default whitespace-nowrap")},T?p:b):null,A&&i===m.zS.Right?a.createElement(x,{loading:d,iconSize:N,iconPosition:i,Icon:n,transitionStatus:D.status,needMargin:I}):null)});O.displayName="Button"},47047:function(e,t,n){n.d(t,{Z:function(){return b}});var r=n(69703),o=n(64090);n(50027),n(18174),n(21871);var a=n(41213),i=n(46457),l=n(54518);let s=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M18.031 16.6168L22.3137 20.8995L20.8995 22.3137L16.6168 18.031C15.0769 19.263 13.124 20 11 20C6.032 20 2 15.968 2 11C2 6.032 6.032 2 11 2C15.968 2 20 6.032 20 11C20 13.124 19.263 15.0769 18.031 16.6168ZM16.0247 15.8748C17.2475 14.6146 18 12.8956 18 11C18 7.1325 14.8675 4 11 4C7.1325 4 4 7.1325 4 11C4 14.8675 7.1325 18 11 18C12.8956 18 14.6146 17.2475 15.8748 16.0247L16.0247 15.8748Z"}))};var c=n(8903),u=n(25163),d=n(70129);let p=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",width:"100%",height:"100%",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"},t),o.createElement("line",{x1:"18",y1:"6",x2:"6",y2:"18"}),o.createElement("line",{x1:"6",y1:"6",x2:"18",y2:"18"}))};var f=n(99250),m=n(65492),g=n(91753);let h=(0,m.fn)("MultiSelect"),b=o.forwardRef((e,t)=>{let{defaultValue:n,value:m,onValueChange:b,placeholder:v="Select...",placeholderSearch:y="Search",disabled:E=!1,icon:S,children:w,className:x}=e,O=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","placeholderSearch","disabled","icon","children","className"]),[k,C]=(0,i.Z)(n,m),{reactElementChildren:A,optionsAvailable:T}=(0,o.useMemo)(()=>{let e=o.Children.toArray(w).filter(o.isValidElement);return{reactElementChildren:e,optionsAvailable:(0,g.n0)("",e)}},[w]),[I,N]=(0,o.useState)(""),R=(null!=k?k:[]).length>0,_=(0,o.useMemo)(()=>I?(0,g.n0)(I,A):T,[I,A,T]),P=()=>{N("")};return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:k,value:k,onChange:e=>{null==b||b(e),C(e)},disabled:E,className:(0,f.q)("w-full min-w-[10rem] relative text-tremor-default",x)},O,{multiple:!0}),e=>{let{value:t}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,f.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-1.5","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",S?"pl-11 -ml-0.5":"pl-3",(0,g.um)(t.length>0,E))},S&&o.createElement("span",{className:(0,f.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(S,{className:(0,f.q)(h("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("div",{className:"h-6 flex items-center"},t.length>0?o.createElement("div",{className:"flex flex-nowrap overflow-x-scroll [&::-webkit-scrollbar]:hidden [scrollbar-width:none] gap-x-1 mr-5 -ml-1.5 relative"},T.filter(e=>t.includes(e.props.value)).map((e,n)=>{var r;return o.createElement("div",{key:n,className:(0,f.q)("max-w-[100px] lg:max-w-[200px] flex justify-center items-center pl-2 pr-1.5 py-1 font-medium","rounded-tremor-small","bg-tremor-background-muted dark:bg-dark-tremor-background-muted","bg-tremor-background-subtle dark:bg-dark-tremor-background-subtle","text-tremor-content-default dark:text-dark-tremor-content-default","text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis")},o.createElement("div",{className:"text-xs truncate "},null!==(r=e.props.children)&&void 0!==r?r:e.props.value),o.createElement("div",{onClick:n=>{n.preventDefault();let r=t.filter(t=>t!==e.props.value);null==b||b(r),C(r)}},o.createElement(p,{className:(0,f.q)(h("clearIconItem"),"cursor-pointer rounded-tremor-full w-3.5 h-3.5 ml-2","text-tremor-content-subtle hover:text-tremor-content","dark:text-dark-tremor-content-subtle dark:hover:text-tremor-content")})))})):o.createElement("span",null,v)),o.createElement("span",{className:(0,f.q)("absolute inset-y-0 right-0 flex items-center mr-2.5")},o.createElement(l.Z,{className:(0,f.q)(h("arrowDownIcon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),R&&!E?o.createElement("button",{type:"button",className:(0,f.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),C([]),null==b||b([])}},o.createElement(c.Z,{className:(0,f.q)(h("clearIconAllItems"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,f.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},o.createElement("div",{className:(0,f.q)("flex items-center w-full px-2.5","bg-tremor-background-muted","dark:bg-dark-tremor-background-muted")},o.createElement("span",null,o.createElement(s,{className:(0,f.q)("flex-none w-4 h-4 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("input",{name:"search",type:"input",autoComplete:"off",placeholder:y,className:(0,f.q)("w-full focus:outline-none focus:ring-none bg-transparent text-tremor-default py-2","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis"),onKeyDown:e=>{"Space"===e.code&&""!==e.target.value&&e.stopPropagation()},onChange:e=>N(e.target.value),value:I})),o.createElement(a.Z.Provider,Object.assign({},{onBlur:{handleResetSearch:P}},{value:{selectedValue:t}}),_))))})});b.displayName="MultiSelect"},76628:function(e,t,n){n.d(t,{Z:function(){return u}});var r=n(69703);n(50027),n(18174),n(21871);var o=n(41213),a=n(64090),i=n(99250),l=n(65492),s=n(25163);let c=(0,l.fn)("MultiSelectItem"),u=a.forwardRef((e,t)=>{let{value:n,className:u,children:d}=e,p=(0,r._T)(e,["value","className","children"]),{selectedValue:f}=(0,a.useContext)(o.Z),m=(0,l.NZ)(n,f);return a.createElement(s.R.Option,Object.assign({className:(0,i.q)(c("root"),"flex justify-start items-center cursor-default text-tremor-default p-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",u),ref:t,key:n,value:n},p),a.createElement("input",{type:"checkbox",className:(0,i.q)(c("checkbox"),"flex-none focus:ring-none focus:outline-none cursor-pointer mr-2.5","accent-tremor-brand","dark:accent-dark-tremor-brand"),checked:m,readOnly:!0}),a.createElement("span",{className:"whitespace-nowrap truncate"},null!=d?d:n))});u.displayName="MultiSelectItem"},95093:function(e,t,n){n.d(t,{Z:function(){return m}});var r=n(69703),o=n(64090),a=n(54518),i=n(8903),l=n(99250),s=n(65492),c=n(91753),u=n(25163),d=n(70129),p=n(46457);let f=(0,s.fn)("Select"),m=o.forwardRef((e,t)=>{let{defaultValue:n,value:s,onValueChange:m,placeholder:g="Select...",disabled:h=!1,icon:b,enableClear:v=!0,children:y,className:E}=e,S=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","disabled","icon","enableClear","children","className"]),[w,x]=(0,p.Z)(n,s),O=(0,o.useMemo)(()=>{let e=o.Children.toArray(y).filter(o.isValidElement);return(0,c.sl)(e)},[y]);return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:w,value:w,onChange:e=>{null==m||m(e),x(e)},disabled:h,className:(0,l.q)("w-full min-w-[10rem] relative text-tremor-default",E)},S),e=>{var t;let{value:n}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,l.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-2","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",b?"pl-10":"pl-3",(0,c.um)((0,c.Uh)(n),h))},b&&o.createElement("span",{className:(0,l.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(b,{className:(0,l.q)(f("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("span",{className:"w-[90%] block truncate"},n&&null!==(t=O.get(n))&&void 0!==t?t:g),o.createElement("span",{className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-3")},o.createElement(a.Z,{className:(0,l.q)(f("arrowDownIcon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),v&&w?o.createElement("button",{type:"button",className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),x(""),null==m||m("")}},o.createElement(i.Z,{className:(0,l.q)(f("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,l.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},y)))})});m.displayName="Select"},27166:function(e,t,n){n.d(t,{Z:function(){return s}});var r=n(69703),o=n(64090),a=n(25163),i=n(99250);let l=(0,n(65492).fn)("SelectItem"),s=o.forwardRef((e,t)=>{let{value:n,icon:s,className:c,children:u}=e,d=(0,r._T)(e,["value","icon","className","children"]);return o.createElement(a.R.Option,Object.assign({className:(0,i.q)(l("root"),"flex justify-start items-center cursor-default text-tremor-default px-2.5 py-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong ui-selected:bg-tremor-background-muted text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",c),ref:t,key:n,value:n},d),s&&o.createElement(s,{className:(0,i.q)(l("icon"),"flex-none w-5 h-5 mr-1.5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}),o.createElement("span",{className:"whitespace-nowrap truncate"},null!=u?u:n))});s.displayName="SelectItem"},12224:function(e,t,n){n.d(t,{Z:function(){return I}});var r=n(69703),o=n(64090),a=n(83891),i=n(20044),l=n(10641),s=n(92381),c=n(71454),u=n(36601),d=n(37700),p=n(84152),f=n(34797),m=n(18318),g=n(71014),h=n(67409),b=n(39790);let v=(0,o.createContext)(null),y=Object.assign((0,m.yV)(function(e,t){let n=(0,s.M)(),{id:r="headlessui-label-".concat(n),passive:a=!1,...i}=e,l=function e(){let t=(0,o.useContext)(v);if(null===t){let t=Error("You used a