diff --git a/.circleci/config.yml b/.circleci/config.yml
index 18bfeedb52..5f4628d26d 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -344,4 +344,4 @@ workflows:
filters:
branches:
only:
- - main
+ - main
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 8d99ae8af8..69061d62d3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -59,3 +59,4 @@ myenv/*
litellm/proxy/_experimental/out/404/index.html
litellm/proxy/_experimental/out/model_hub/index.html
litellm/proxy/_experimental/out/onboarding/index.html
+litellm/tests/log.txt
diff --git a/docs/my-website/docs/observability/custom_callback.md b/docs/my-website/docs/observability/custom_callback.md
index 3168222273..373b4a96c0 100644
--- a/docs/my-website/docs/observability/custom_callback.md
+++ b/docs/my-website/docs/observability/custom_callback.md
@@ -38,7 +38,7 @@ class MyCustomHandler(CustomLogger):
print(f"On Async Success")
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
- print(f"On Async Success")
+ print(f"On Async Failure")
customHandler = MyCustomHandler()
diff --git a/docs/my-website/docs/projects/llmcord.py (Discord LLM Chatbot).md b/docs/my-website/docs/projects/llmcord.py (Discord LLM Chatbot).md
new file mode 100644
index 0000000000..f8acb9383c
--- /dev/null
+++ b/docs/my-website/docs/projects/llmcord.py (Discord LLM Chatbot).md
@@ -0,0 +1,3 @@
+llmcord.py lets you and your friends chat with LLMs directly in your Discord server. It works with practically any LLM, remote or locally hosted.
+
+Github: https://github.com/jakobdylanc/discord-llm-chatbot
diff --git a/docs/my-website/docs/proxy/alerting.md b/docs/my-website/docs/proxy/alerting.md
index 3ef676bbd6..402de410c9 100644
--- a/docs/my-website/docs/proxy/alerting.md
+++ b/docs/my-website/docs/proxy/alerting.md
@@ -62,6 +62,23 @@ curl -X GET 'http://localhost:4000/health/services?service=slack' \
-H 'Authorization: Bearer sk-1234'
```
+## Advanced - Redacting Messages from Alerts
+
+By default alerts show the `messages/input` passed to the LLM. If you want to redact this from slack alerting set the following setting on your config
+
+
+```shell
+general_settings:
+ alerting: ["slack"]
+ alert_types: ["spend_reports"]
+
+litellm_settings:
+ redact_messages_in_exceptions: True
+```
+
+
+
+
## Advanced - Opting into specific alert types
Set `alert_types` if you want to Opt into only specific alert types
diff --git a/docs/my-website/docs/proxy/enterprise.md b/docs/my-website/docs/proxy/enterprise.md
index 2b984b3e78..48cab60932 100644
--- a/docs/my-website/docs/proxy/enterprise.md
+++ b/docs/my-website/docs/proxy/enterprise.md
@@ -14,6 +14,7 @@ Features:
- ✅ [SSO for Admin UI](./ui.md#✨-enterprise-features)
- ✅ [Audit Logs](#audit-logs)
- ✅ [Tracking Spend for Custom Tags](#tracking-spend-for-custom-tags)
+- ✅ [Enforce Required Params for LLM Requests (ex. Reject requests missing ["metadata"]["generation_name"])](#enforce-required-params-for-llm-requests)
- ✅ [Content Moderation with LLM Guard, LlamaGuard, Google Text Moderations](#content-moderation)
- ✅ [Prompt Injection Detection (with LakeraAI API)](#prompt-injection-detection---lakeraai)
- ✅ [Custom Branding + Routes on Swagger Docs](#swagger-docs---custom-routes--branding)
@@ -204,6 +205,109 @@ curl -X GET "http://0.0.0.0:4000/spend/tags" \
```
+## Enforce Required Params for LLM Requests
+Use this when you want to enforce all requests to include certain params. Example you need all requests to include the `user` and `["metadata]["generation_name"]` params.
+
+**Step 1** Define all Params you want to enforce on config.yaml
+
+This means `["user"]` and `["metadata]["generation_name"]` are required in all LLM Requests to LiteLLM
+
+```yaml
+general_settings:
+ master_key: sk-1234
+ enforced_params:
+ - user
+ - metadata.generation_name
+```
+
+Start LiteLLM Proxy
+
+**Step 2 Verify if this works**
+
+
+
+
+
+```shell
+curl --location 'http://localhost:4000/chat/completions' \
+ --header 'Authorization: Bearer sk-5fmYeaUEbAMpwBNT-QpxyA' \
+ --header 'Content-Type: application/json' \
+ --data '{
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {
+ "role": "user",
+ "content": "hi"
+ }
+ ]
+}'
+```
+
+Expected Response
+
+```shell
+{"error":{"message":"Authentication Error, BadRequest please pass param=user in request body. This is a required param","type":"auth_error","param":"None","code":401}}%
+```
+
+
+
+
+
+```shell
+curl --location 'http://localhost:4000/chat/completions' \
+ --header 'Authorization: Bearer sk-5fmYeaUEbAMpwBNT-QpxyA' \
+ --header 'Content-Type: application/json' \
+ --data '{
+ "model": "gpt-3.5-turbo",
+ "user": "gm",
+ "messages": [
+ {
+ "role": "user",
+ "content": "hi"
+ }
+ ],
+ "metadata": {}
+}'
+```
+
+Expected Response
+
+```shell
+{"error":{"message":"Authentication Error, BadRequest please pass param=[metadata][generation_name] in request body. This is a required param","type":"auth_error","param":"None","code":401}}%
+```
+
+
+
+
+
+```shell
+curl --location 'http://localhost:4000/chat/completions' \
+ --header 'Authorization: Bearer sk-5fmYeaUEbAMpwBNT-QpxyA' \
+ --header 'Content-Type: application/json' \
+ --data '{
+ "model": "gpt-3.5-turbo",
+ "user": "gm",
+ "messages": [
+ {
+ "role": "user",
+ "content": "hi"
+ }
+ ],
+ "metadata": {"generation_name": "prod-app"}
+}'
+```
+
+Expected Response
+
+```shell
+{"id":"chatcmpl-9XALnHqkCBMBKrOx7Abg0hURHqYtY","choices":[{"finish_reason":"stop","index":0,"message":{"content":"Hello! How can I assist you today?","role":"assistant"}}],"created":1717691639,"model":"gpt-3.5-turbo-0125","object":"chat.completion","system_fingerprint":null,"usage":{"completion_tokens":9,"prompt_tokens":8,"total_tokens":17}}%
+```
+
+
+
+
+
+
diff --git a/docs/my-website/docs/proxy/logging.md b/docs/my-website/docs/proxy/logging.md
index fd57545ca9..a55e42d558 100644
--- a/docs/my-website/docs/proxy/logging.md
+++ b/docs/my-website/docs/proxy/logging.md
@@ -313,6 +313,18 @@ You will see `raw_request` in your Langfuse Metadata. This is the RAW CURL comma
## Logging Proxy Input/Output in OpenTelemetry format
+
+:::info
+
+[Optional] Customize OTEL Service Name and OTEL TRACER NAME by setting the following variables in your environment
+
+```shell
+OTEL_TRACER_NAME= # default="litellm"
+OTEL_SERVICE_NAME=` # default="litellm"
+```
+
+:::
+
diff --git a/docs/my-website/docs/scheduler.md b/docs/my-website/docs/scheduler.md
index 486549a08c..e7943c4591 100644
--- a/docs/my-website/docs/scheduler.md
+++ b/docs/my-website/docs/scheduler.md
@@ -100,4 +100,76 @@ print(response)
```
-
\ No newline at end of file
+
+
+## Advanced - Redis Caching
+
+Use redis caching to do request prioritization across multiple instances of LiteLLM.
+
+### SDK
+```python
+from litellm import Router
+
+router = Router(
+ model_list=[
+ {
+ "model_name": "gpt-3.5-turbo",
+ "litellm_params": {
+ "model": "gpt-3.5-turbo",
+ "mock_response": "Hello world this is Macintosh!", # fakes the LLM API call
+ "rpm": 1,
+ },
+ },
+ ],
+ ### REDIS PARAMS ###
+ redis_host=os.environ["REDIS_HOST"],
+ redis_password=os.environ["REDIS_PASSWORD"],
+ redis_port=os.environ["REDIS_PORT"],
+)
+
+try:
+ _response = await router.schedule_acompletion( # 👈 ADDS TO QUEUE + POLLS + MAKES CALL
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "Hey!"}],
+ priority=0, # 👈 LOWER IS BETTER
+ )
+except Exception as e:
+ print("didn't make request")
+```
+
+### PROXY
+
+```yaml
+model_list:
+ - model_name: gpt-3.5-turbo-fake-model
+ litellm_params:
+ model: gpt-3.5-turbo
+ mock_response: "hello world!"
+ api_key: my-good-key
+
+router_settings:
+ redis_host; os.environ/REDIS_HOST
+ redis_password: os.environ/REDIS_PASSWORD
+ redis_port: os.environ/REDIS_PORT
+```
+
+```bash
+$ litellm --config /path/to/config.yaml
+
+# RUNNING on http://0.0.0.0:4000s
+```
+
+```bash
+curl -X POST 'http://localhost:4000/queue/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-D '{
+ "model": "gpt-3.5-turbo-fake-model",
+ "messages": [
+ {
+ "role": "user",
+ "content": "what is the meaning of the universe? 1234"
+ }],
+ "priority": 0 👈 SET VALUE HERE
+}'
+```
\ No newline at end of file
diff --git a/docs/my-website/docs/secret.md b/docs/my-website/docs/secret.md
index 2b945837ad..08c2e89d1f 100644
--- a/docs/my-website/docs/secret.md
+++ b/docs/my-website/docs/secret.md
@@ -1,11 +1,31 @@
# Secret Manager
LiteLLM supports reading secrets from Azure Key Vault and Infisical
+- AWS Key Managemenet Service
+- AWS Secret Manager
- [Azure Key Vault](#azure-key-vault)
- Google Key Management Service
- [Infisical Secret Manager](#infisical-secret-manager)
- [.env Files](#env-files)
+## AWS Key Management Service
+
+Use AWS KMS to storing a hashed copy of your Proxy Master Key in the environment.
+
+```bash
+export LITELLM_MASTER_KEY="djZ9xjVaZ..." # 👈 ENCRYPTED KEY
+export AWS_REGION_NAME="us-west-2"
+```
+
+```yaml
+general_settings:
+ key_management_system: "aws_kms"
+ key_management_settings:
+ hosted_keys: ["LITELLM_MASTER_KEY"] # 👈 WHICH KEYS ARE STORED ON KMS
+```
+
+[**See Decryption Code**](https://github.com/BerriAI/litellm/blob/a2da2a8f168d45648b61279d4795d647d94f90c9/litellm/utils.py#L10182)
+
## AWS Secret Manager
Store your proxy keys in AWS Secret Manager.
diff --git a/docs/my-website/package-lock.json b/docs/my-website/package-lock.json
index 0738c4d09f..48b5419862 100644
--- a/docs/my-website/package-lock.json
+++ b/docs/my-website/package-lock.json
@@ -5975,9 +5975,9 @@
}
},
"node_modules/caniuse-lite": {
- "version": "1.0.30001519",
- "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001519.tgz",
- "integrity": "sha512-0QHgqR+Jv4bxHMp8kZ1Kn8CH55OikjKJ6JmKkZYP1F3D7w+lnFXF70nG5eNfsZS89jadi5Ywy5UCSKLAglIRkg==",
+ "version": "1.0.30001629",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001629.tgz",
+ "integrity": "sha512-c3dl911slnQhmxUIT4HhYzT7wnBK/XYpGnYLOj4nJBaRiw52Ibe7YxlDaAeRECvA786zCuExhxIUJ2K7nHMrBw==",
"funding": [
{
"type": "opencollective",
diff --git a/docs/my-website/yarn.lock b/docs/my-website/yarn.lock
index 3acc481539..e96d90e26f 100644
--- a/docs/my-website/yarn.lock
+++ b/docs/my-website/yarn.lock
@@ -84,7 +84,7 @@
"@algolia/requester-common" "4.19.1"
"@algolia/transporter" "4.19.1"
-"@algolia/client-search@4.19.1":
+"@algolia/client-search@>= 4.9.1 < 6", "@algolia/client-search@4.19.1":
version "4.19.1"
resolved "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.19.1.tgz"
integrity sha512-mBecfMFS4N+yK/p0ZbK53vrZbL6OtWMk8YmnOv1i0LXx4pelY8TFhqKoTit3NPVPwoSNN0vdSN9dTu1xr1XOVw==
@@ -146,13 +146,6 @@
"@jridgewell/gen-mapping" "^0.3.0"
"@jridgewell/trace-mapping" "^0.3.9"
-"@babel/code-frame@7.10.4", "@babel/code-frame@^7.5.5":
- version "7.10.4"
- resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz"
- integrity sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==
- dependencies:
- "@babel/highlight" "^7.10.4"
-
"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.22.10", "@babel/code-frame@^7.22.13", "@babel/code-frame@^7.8.3":
version "7.22.13"
resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz"
@@ -161,11 +154,39 @@
"@babel/highlight" "^7.22.13"
chalk "^2.4.2"
+"@babel/code-frame@^7.5.5", "@babel/code-frame@7.10.4":
+ version "7.10.4"
+ resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz"
+ integrity sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==
+ dependencies:
+ "@babel/highlight" "^7.10.4"
+
"@babel/compat-data@^7.22.5", "@babel/compat-data@^7.22.6", "@babel/compat-data@^7.22.9":
version "7.22.9"
resolved "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.9.tgz"
integrity sha512-5UamI7xkUcJ3i9qVDS+KFDEK8/7oJ55/sJMB1Ge7IEapr7KfdfV/HErR+koZwOfd+SgtFKOKRhRakdg++DcJpQ==
+"@babel/core@^7.0.0", "@babel/core@^7.0.0-0", "@babel/core@^7.0.0-0 || ^8.0.0-0 <8.0.0", "@babel/core@^7.11.6", "@babel/core@^7.12.0", "@babel/core@^7.12.3", "@babel/core@^7.13.0", "@babel/core@^7.18.6", "@babel/core@^7.19.6", "@babel/core@^7.4.0 || ^8.0.0-0 <8.0.0":
+ version "7.22.10"
+ resolved "https://registry.npmjs.org/@babel/core/-/core-7.22.10.tgz"
+ integrity sha512-fTmqbbUBAwCcre6zPzNngvsI0aNrPZe77AeqvDxWM9Nm+04RrJ3CAmGHA9f7lJQY6ZMhRztNemy4uslDxTX4Qw==
+ dependencies:
+ "@ampproject/remapping" "^2.2.0"
+ "@babel/code-frame" "^7.22.10"
+ "@babel/generator" "^7.22.10"
+ "@babel/helper-compilation-targets" "^7.22.10"
+ "@babel/helper-module-transforms" "^7.22.9"
+ "@babel/helpers" "^7.22.10"
+ "@babel/parser" "^7.22.10"
+ "@babel/template" "^7.22.5"
+ "@babel/traverse" "^7.22.10"
+ "@babel/types" "^7.22.10"
+ convert-source-map "^1.7.0"
+ debug "^4.1.0"
+ gensync "^1.0.0-beta.2"
+ json5 "^2.2.2"
+ semver "^6.3.1"
+
"@babel/core@7.12.9":
version "7.12.9"
resolved "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz"
@@ -188,27 +209,6 @@
semver "^5.4.1"
source-map "^0.5.0"
-"@babel/core@^7.12.3", "@babel/core@^7.18.6", "@babel/core@^7.19.6":
- version "7.22.10"
- resolved "https://registry.npmjs.org/@babel/core/-/core-7.22.10.tgz"
- integrity sha512-fTmqbbUBAwCcre6zPzNngvsI0aNrPZe77AeqvDxWM9Nm+04RrJ3CAmGHA9f7lJQY6ZMhRztNemy4uslDxTX4Qw==
- dependencies:
- "@ampproject/remapping" "^2.2.0"
- "@babel/code-frame" "^7.22.10"
- "@babel/generator" "^7.22.10"
- "@babel/helper-compilation-targets" "^7.22.10"
- "@babel/helper-module-transforms" "^7.22.9"
- "@babel/helpers" "^7.22.10"
- "@babel/parser" "^7.22.10"
- "@babel/template" "^7.22.5"
- "@babel/traverse" "^7.22.10"
- "@babel/types" "^7.22.10"
- convert-source-map "^1.7.0"
- debug "^4.1.0"
- gensync "^1.0.0-beta.2"
- json5 "^2.2.2"
- semver "^6.3.1"
-
"@babel/generator@^7.12.5", "@babel/generator@^7.18.7", "@babel/generator@^7.22.10", "@babel/generator@^7.23.3":
version "7.23.3"
resolved "https://registry.npmjs.org/@babel/generator/-/generator-7.23.3.tgz"
@@ -331,16 +331,16 @@
dependencies:
"@babel/types" "^7.22.5"
-"@babel/helper-plugin-utils@7.10.4":
- version "7.10.4"
- resolved "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz"
- integrity sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==
-
"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3":
version "7.22.5"
resolved "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz"
integrity sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==
+"@babel/helper-plugin-utils@7.10.4":
+ version "7.10.4"
+ resolved "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz"
+ integrity sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==
+
"@babel/helper-remap-async-to-generator@^7.22.5", "@babel/helper-remap-async-to-generator@^7.22.9":
version "7.22.9"
resolved "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.9.tgz"
@@ -451,7 +451,7 @@
"@babel/helper-create-class-features-plugin" "^7.18.6"
"@babel/helper-plugin-utils" "^7.18.6"
-"@babel/plugin-proposal-object-rest-spread@7.12.1", "@babel/plugin-proposal-object-rest-spread@^7.12.1":
+"@babel/plugin-proposal-object-rest-spread@^7.12.1", "@babel/plugin-proposal-object-rest-spread@7.12.1":
version "7.12.1"
resolved "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz"
integrity sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==
@@ -528,13 +528,6 @@
dependencies:
"@babel/helper-plugin-utils" "^7.8.0"
-"@babel/plugin-syntax-jsx@7.12.1":
- version "7.12.1"
- resolved "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz"
- integrity sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==
- dependencies:
- "@babel/helper-plugin-utils" "^7.10.4"
-
"@babel/plugin-syntax-jsx@^7.22.5":
version "7.22.5"
resolved "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz"
@@ -542,6 +535,13 @@
dependencies:
"@babel/helper-plugin-utils" "^7.22.5"
+"@babel/plugin-syntax-jsx@7.12.1":
+ version "7.12.1"
+ resolved "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz"
+ integrity sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.10.4"
+
"@babel/plugin-syntax-logical-assignment-operators@^7.10.4":
version "7.10.4"
resolved "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz"
@@ -563,7 +563,7 @@
dependencies:
"@babel/helper-plugin-utils" "^7.10.4"
-"@babel/plugin-syntax-object-rest-spread@7.8.3", "@babel/plugin-syntax-object-rest-spread@^7.8.0", "@babel/plugin-syntax-object-rest-spread@^7.8.3":
+"@babel/plugin-syntax-object-rest-spread@^7.8.0", "@babel/plugin-syntax-object-rest-spread@^7.8.3", "@babel/plugin-syntax-object-rest-spread@7.8.3":
version "7.8.3"
resolved "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz"
integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==
@@ -1279,7 +1279,7 @@
"@docsearch/css" "3.5.1"
algoliasearch "^4.0.0"
-"@docusaurus/core@2.4.1":
+"@docusaurus/core@^2.0.0-alpha.60 || ^2.0.0", "@docusaurus/core@2.4.1":
version "2.4.1"
resolved "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.1.tgz"
integrity sha512-SNsY7PshK3Ri7vtsLXVeAJGS50nJN3RgF836zkyUfAD01Fq+sAk5EwWgLw+nnm5KVNGDu7PRR2kRGDsWvqpo0g==
@@ -1502,7 +1502,7 @@
"@docusaurus/utils-validation" "2.4.1"
tslib "^2.4.0"
-"@docusaurus/plugin-google-gtag@2.4.1", "@docusaurus/plugin-google-gtag@^2.4.1":
+"@docusaurus/plugin-google-gtag@^2.4.1", "@docusaurus/plugin-google-gtag@2.4.1":
version "2.4.1"
resolved "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.1.tgz"
integrity sha512-mKIefK+2kGTQBYvloNEKtDmnRD7bxHLsBcxgnbt4oZwzi2nxCGjPX6+9SQO2KCN5HZbNrYmGo5GJfMgoRvy6uA==
@@ -1573,7 +1573,7 @@
"@docusaurus/theme-search-algolia" "2.4.1"
"@docusaurus/types" "2.4.1"
-"@docusaurus/react-loadable@5.5.2", "react-loadable@npm:@docusaurus/react-loadable@5.5.2":
+"@docusaurus/react-loadable@5.5.2":
version "5.5.2"
resolved "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz"
integrity sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==
@@ -1671,7 +1671,7 @@
fs-extra "^10.1.0"
tslib "^2.4.0"
-"@docusaurus/types@2.4.1":
+"@docusaurus/types@*", "@docusaurus/types@2.4.1":
version "2.4.1"
resolved "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.1.tgz"
integrity sha512-0R+cbhpMkhbRXX138UOc/2XZFF8hiZa6ooZAEEJFp5scytzCw4tC1gChMFXrpa3d2tYE6AX8IrOEpSonLmfQuQ==
@@ -1857,16 +1857,16 @@
"@nodelib/fs.stat" "2.0.5"
run-parallel "^1.1.9"
-"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2":
- version "2.0.5"
- resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz"
- integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==
-
"@nodelib/fs.stat@^1.1.2":
version "1.1.3"
resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz"
integrity sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==
+"@nodelib/fs.stat@^2.0.2", "@nodelib/fs.stat@2.0.5":
+ version "2.0.5"
+ resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz"
+ integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==
+
"@nodelib/fs.walk@^1.2.3":
version "1.2.8"
resolved "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz"
@@ -1975,7 +1975,7 @@
"@svgr/babel-plugin-transform-react-native-svg" "^6.5.1"
"@svgr/babel-plugin-transform-svg-component" "^6.5.1"
-"@svgr/core@^6.5.1":
+"@svgr/core@*", "@svgr/core@^6.0.0", "@svgr/core@^6.5.1":
version "6.5.1"
resolved "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz"
integrity sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==
@@ -2248,7 +2248,7 @@
"@types/history" "^4.7.11"
"@types/react" "*"
-"@types/react@*":
+"@types/react@*", "@types/react@>= 16.8.0 < 19.0.0":
version "18.2.20"
resolved "https://registry.npmjs.org/@types/react/-/react-18.2.20.tgz"
integrity sha512-WKNtmsLWJM/3D5mG4U84cysVY31ivmyw85dE84fOCk5Hx78wezB/XEjVPWl2JTZ5FkEeaTJf+VgUAUn3PE7Isw==
@@ -2329,7 +2329,7 @@
dependencies:
"@types/yargs-parser" "*"
-"@webassemblyjs/ast@1.11.6", "@webassemblyjs/ast@^1.11.5":
+"@webassemblyjs/ast@^1.11.5", "@webassemblyjs/ast@1.11.6":
version "1.11.6"
resolved "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz"
integrity sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==
@@ -2430,7 +2430,7 @@
"@webassemblyjs/wasm-gen" "1.11.6"
"@webassemblyjs/wasm-parser" "1.11.6"
-"@webassemblyjs/wasm-parser@1.11.6", "@webassemblyjs/wasm-parser@^1.11.5":
+"@webassemblyjs/wasm-parser@^1.11.5", "@webassemblyjs/wasm-parser@1.11.6":
version "1.11.6"
resolved "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz"
integrity sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==
@@ -2483,21 +2483,21 @@ acorn-walk@^8.0.0:
resolved "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz"
integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==
-acorn@^8.0.4, acorn@^8.7.1, acorn@^8.8.2:
+acorn@^8, acorn@^8.0.4, acorn@^8.7.1, acorn@^8.8.2:
version "8.10.0"
resolved "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz"
integrity sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==
-address@1.1.2:
- version "1.1.2"
- resolved "https://registry.npmjs.org/address/-/address-1.1.2.tgz"
- integrity sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==
-
address@^1.0.1, address@^1.1.2:
version "1.2.2"
resolved "https://registry.npmjs.org/address/-/address-1.2.2.tgz"
integrity sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==
+address@1.1.2:
+ version "1.1.2"
+ resolved "https://registry.npmjs.org/address/-/address-1.1.2.tgz"
+ integrity sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==
+
aggregate-error@^3.0.0:
version "3.1.0"
resolved "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz"
@@ -2540,7 +2540,7 @@ ajv-keywords@^5.1.0:
dependencies:
fast-deep-equal "^3.1.3"
-ajv@^6.12.2, ajv@^6.12.3, ajv@^6.12.4, ajv@^6.12.5:
+ajv@^6.12.2, ajv@^6.12.3, ajv@^6.12.4, ajv@^6.12.5, ajv@^6.9.1:
version "6.12.6"
resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz"
integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
@@ -2550,7 +2550,17 @@ ajv@^6.12.2, ajv@^6.12.3, ajv@^6.12.4, ajv@^6.12.5:
json-schema-traverse "^0.4.1"
uri-js "^4.2.2"
-ajv@^8.0.0, ajv@^8.9.0:
+ajv@^8.0.0:
+ version "8.12.0"
+ resolved "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz"
+ integrity sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==
+ dependencies:
+ fast-deep-equal "^3.1.1"
+ json-schema-traverse "^1.0.0"
+ require-from-string "^2.0.2"
+ uri-js "^4.2.2"
+
+ajv@^8.8.2, ajv@^8.9.0:
version "8.12.0"
resolved "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz"
integrity sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==
@@ -2567,7 +2577,7 @@ algoliasearch-helper@^3.10.0:
dependencies:
"@algolia/events" "^4.0.1"
-algoliasearch@^4.0.0, algoliasearch@^4.13.1:
+algoliasearch@^4.0.0, algoliasearch@^4.13.1, "algoliasearch@>= 3.1 < 6", "algoliasearch@>= 4.9.1 < 6":
version "4.19.1"
resolved "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.19.1.tgz"
integrity sha512-IJF5b93b2MgAzcE/tuzW0yOPnuUyRgGAtaPv5UUywXM8kzqfdwZTO4sPJBzoGz1eOy6H9uEchsJsBFTELZSu+g==
@@ -2725,16 +2735,16 @@ array-find-index@^1.0.1:
resolved "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz"
integrity sha512-M1HQyIXcBGtVywBt8WVdim+lrNaK7VHp99Qt5pSNziXznKHViIBbXWtfRTpEFpF/c4FdfxNAsCCwPp5phBYJtw==
-array-flatten@1.1.1:
- version "1.1.1"
- resolved "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz"
- integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==
-
array-flatten@^2.1.2:
version "2.1.2"
resolved "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz"
integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==
+array-flatten@1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz"
+ integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==
+
array-union@^1.0.1:
version "1.0.2"
resolved "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz"
@@ -2829,7 +2839,7 @@ asn1@~0.2.3:
dependencies:
safer-buffer "~2.1.0"
-assert-plus@1.0.0, assert-plus@^1.0.0:
+assert-plus@^1.0.0, assert-plus@1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz"
integrity sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==
@@ -3005,16 +3015,6 @@ balanced-match@^1.0.0:
resolved "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
-base16@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/base16/-/base16-1.0.0.tgz"
- integrity sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==
-
-base64-js@^1.3.1:
- version "1.5.1"
- resolved "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz"
- integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==
-
base@^0.11.1:
version "0.11.2"
resolved "https://registry.npmjs.org/base/-/base-0.11.2.tgz"
@@ -3028,6 +3028,16 @@ base@^0.11.1:
mixin-deep "^1.2.0"
pascalcase "^0.1.1"
+base16@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/base16/-/base16-1.0.0.tgz"
+ integrity sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==
+
+base64-js@^1.3.1:
+ version "1.5.1"
+ resolved "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz"
+ integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==
+
batch@0.6.1:
version "0.6.1"
resolved "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz"
@@ -3140,7 +3150,7 @@ bluebird@~3.4.1:
body-parser@1.20.2:
version "1.20.2"
- resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.2.tgz#6feb0e21c4724d06de7ff38da36dad4f57a747fd"
+ resolved "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz"
integrity sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==
dependencies:
bytes "3.1.2"
@@ -3240,17 +3250,7 @@ braces@^3.0.2, braces@~3.0.2:
dependencies:
fill-range "^7.0.1"
-browserslist@4.14.2, browserslist@^4.12.0:
- version "4.14.2"
- resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.14.2.tgz"
- integrity sha512-HI4lPveGKUR0x2StIz+2FXfDk9SfVMrxn6PLh1JeGUwcuoDkdKZebWiyLRJ68iIPDpMI4JLVDf7S7XzslgWOhw==
- dependencies:
- caniuse-lite "^1.0.30001125"
- electron-to-chromium "^1.3.564"
- escalade "^3.0.2"
- node-releases "^1.1.61"
-
-browserslist@^4.0.0, browserslist@^4.14.5, browserslist@^4.18.1, browserslist@^4.21.4, browserslist@^4.21.5, browserslist@^4.21.9:
+browserslist@^4.0.0, browserslist@^4.14.5, browserslist@^4.18.1, browserslist@^4.21.4, browserslist@^4.21.5, browserslist@^4.21.9, "browserslist@>= 4.21.0":
version "4.21.10"
resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.21.10.tgz"
integrity sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ==
@@ -3260,6 +3260,16 @@ browserslist@^4.0.0, browserslist@^4.14.5, browserslist@^4.18.1, browserslist@^4
node-releases "^2.0.13"
update-browserslist-db "^1.0.11"
+browserslist@^4.12.0, browserslist@4.14.2:
+ version "4.14.2"
+ resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.14.2.tgz"
+ integrity sha512-HI4lPveGKUR0x2StIz+2FXfDk9SfVMrxn6PLh1JeGUwcuoDkdKZebWiyLRJ68iIPDpMI4JLVDf7S7XzslgWOhw==
+ dependencies:
+ caniuse-lite "^1.0.30001125"
+ electron-to-chromium "^1.3.564"
+ escalade "^3.0.2"
+ node-releases "^1.1.61"
+
buffer-alloc-unsafe@^1.1.0:
version "1.1.0"
resolved "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz"
@@ -3442,9 +3452,9 @@ caniuse-api@^3.0.0:
lodash.uniq "^4.5.0"
caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001109, caniuse-lite@^1.0.30001125, caniuse-lite@^1.0.30001464, caniuse-lite@^1.0.30001517:
- version "1.0.30001519"
- resolved "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001519.tgz"
- integrity sha512-0QHgqR+Jv4bxHMp8kZ1Kn8CH55OikjKJ6JmKkZYP1F3D7w+lnFXF70nG5eNfsZS89jadi5Ywy5UCSKLAglIRkg==
+ version "1.0.30001629"
+ resolved "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001629.tgz"
+ integrity sha512-c3dl911slnQhmxUIT4HhYzT7wnBK/XYpGnYLOj4nJBaRiw52Ibe7YxlDaAeRECvA786zCuExhxIUJ2K7nHMrBw==
caseless@~0.12.0:
version "0.12.0"
@@ -3473,15 +3483,6 @@ chainsaw@~0.1.0:
dependencies:
traverse ">=0.3.0 <0.4"
-chalk@2.4.2, chalk@^2.4.1, chalk@^2.4.2:
- version "2.4.2"
- resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz"
- integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
- dependencies:
- ansi-styles "^3.2.1"
- escape-string-regexp "^1.0.5"
- supports-color "^5.3.0"
-
chalk@^1.0.0:
version "1.1.3"
resolved "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz"
@@ -3493,6 +3494,24 @@ chalk@^1.0.0:
strip-ansi "^3.0.0"
supports-color "^2.0.0"
+chalk@^2.4.1:
+ version "2.4.2"
+ resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz"
+ integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
+ dependencies:
+ ansi-styles "^3.2.1"
+ escape-string-regexp "^1.0.5"
+ supports-color "^5.3.0"
+
+chalk@^2.4.2:
+ version "2.4.2"
+ resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz"
+ integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
+ dependencies:
+ ansi-styles "^3.2.1"
+ escape-string-regexp "^1.0.5"
+ supports-color "^5.3.0"
+
chalk@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz"
@@ -3509,6 +3528,15 @@ chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.2:
ansi-styles "^4.1.0"
supports-color "^7.1.0"
+chalk@2.4.2:
+ version "2.4.2"
+ resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz"
+ integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
+ dependencies:
+ ansi-styles "^3.2.1"
+ escape-string-regexp "^1.0.5"
+ supports-color "^5.3.0"
+
character-entities-legacy@^1.0.0:
version "1.1.4"
resolved "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz"
@@ -3536,6 +3564,19 @@ cheerio-select@^2.1.0:
domhandler "^5.0.3"
domutils "^3.0.1"
+cheerio@^1.0.0-rc.12, cheerio@^1.0.0-rc.3:
+ version "1.0.0-rc.12"
+ resolved "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz"
+ integrity sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==
+ dependencies:
+ cheerio-select "^2.1.0"
+ dom-serializer "^2.0.0"
+ domhandler "^5.0.3"
+ domutils "^3.0.1"
+ htmlparser2 "^8.0.1"
+ parse5 "^7.0.0"
+ parse5-htmlparser2-tree-adapter "^7.0.0"
+
cheerio@0.22.0:
version "0.22.0"
resolved "https://registry.npmjs.org/cheerio/-/cheerio-0.22.0.tgz"
@@ -3558,19 +3599,6 @@ cheerio@0.22.0:
lodash.reject "^4.4.0"
lodash.some "^4.4.0"
-cheerio@^1.0.0-rc.12, cheerio@^1.0.0-rc.3:
- version "1.0.0-rc.12"
- resolved "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz"
- integrity sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==
- dependencies:
- cheerio-select "^2.1.0"
- dom-serializer "^2.0.0"
- domhandler "^5.0.3"
- domutils "^3.0.1"
- htmlparser2 "^8.0.1"
- parse5 "^7.0.0"
- parse5-htmlparser2-tree-adapter "^7.0.0"
-
chokidar@^3.4.2, chokidar@^3.5.3:
version "3.5.3"
resolved "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz"
@@ -3661,13 +3689,6 @@ clone-deep@^4.0.1:
kind-of "^6.0.2"
shallow-clone "^3.0.0"
-clone-response@1.0.2:
- version "1.0.2"
- resolved "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz"
- integrity sha512-yjLXh88P599UOyPTFX0POsd7WxnbsVsGohcwzHOLspIhhpalPw1BcqED8NblyZLKcGrL8dTgMlcaZxV2jAD41Q==
- dependencies:
- mimic-response "^1.0.0"
-
clone-response@^1.0.2:
version "1.0.3"
resolved "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz"
@@ -3675,6 +3696,13 @@ clone-response@^1.0.2:
dependencies:
mimic-response "^1.0.0"
+clone-response@1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz"
+ integrity sha512-yjLXh88P599UOyPTFX0POsd7WxnbsVsGohcwzHOLspIhhpalPw1BcqED8NblyZLKcGrL8dTgMlcaZxV2jAD41Q==
+ dependencies:
+ mimic-response "^1.0.0"
+
clsx@^1.2.1:
version "1.2.1"
resolved "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz"
@@ -3721,16 +3749,16 @@ color-convert@^2.0.1:
dependencies:
color-name "~1.1.4"
-color-name@1.1.3:
- version "1.1.3"
- resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz"
- integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==
-
color-name@^1.0.0, color-name@~1.1.4:
version "1.1.4"
resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
+color-name@1.1.3:
+ version "1.1.3"
+ resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz"
+ integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==
+
color-string@^1.6.0, color-string@^1.9.0:
version "1.9.1"
resolved "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz"
@@ -3787,7 +3815,17 @@ comma-separated-tokens@^1.0.0:
resolved "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz"
integrity sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==
-commander@^2.19.0, commander@^2.20.0, commander@^2.8.1:
+commander@^2.19.0:
+ version "2.20.3"
+ resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz"
+ integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
+
+commander@^2.20.0:
+ version "2.20.3"
+ resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz"
+ integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
+
+commander@^2.8.1:
version "2.20.3"
resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz"
integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
@@ -3909,7 +3947,7 @@ console-stream@^0.1.1:
resolved "https://registry.npmjs.org/consolidated-events/-/consolidated-events-2.0.2.tgz"
integrity sha512-2/uRVMdRypf5z/TW/ncD/66l75P5hH2vM/GR8Jf8HLc2xnfJtmina6F6du8+v4Z2vTrMo7jC+W1tmEEuuELgkQ==
-content-disposition@0.5.2, content-disposition@^0.5.2:
+content-disposition@^0.5.2, content-disposition@0.5.2:
version "0.5.2"
resolved "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz"
integrity sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==
@@ -3943,7 +3981,7 @@ cookie-signature@1.0.6:
cookie@0.6.0:
version "0.6.0"
- resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.6.0.tgz#2798b04b071b0ecbff0dbb62a505a8efa4e19051"
+ resolved "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz"
integrity sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==
copy-descriptor@^0.1.0:
@@ -3990,16 +4028,16 @@ core-js@^3.23.3:
resolved "https://registry.npmjs.org/core-js/-/core-js-3.32.0.tgz"
integrity sha512-rd4rYZNlF3WuoYuRIDEmbR/ga9CeuWX9U05umAvgrrZoHY4Z++cp/xwPQMvUpBB4Ag6J8KfD80G0zwCyaSxDww==
-core-util-is@1.0.2:
- version "1.0.2"
- resolved "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz"
- integrity sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==
-
core-util-is@~1.0.0:
version "1.0.3"
resolved "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz"
integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==
+core-util-is@1.0.2:
+ version "1.0.2"
+ resolved "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz"
+ integrity sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==
+
cosmiconfig@^5.0.0:
version "5.2.1"
resolved "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz"
@@ -4049,15 +4087,6 @@ cross-fetch@^3.1.5:
dependencies:
node-fetch "^2.6.12"
-cross-spawn@7.0.3, cross-spawn@^7.0.3:
- version "7.0.3"
- resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz"
- integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
- dependencies:
- path-key "^3.1.0"
- shebang-command "^2.0.0"
- which "^2.0.1"
-
cross-spawn@^5.0.1:
version "5.1.0"
resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz"
@@ -4078,6 +4107,15 @@ cross-spawn@^6.0.0:
shebang-command "^1.2.0"
which "^1.2.9"
+cross-spawn@^7.0.3, cross-spawn@7.0.3:
+ version "7.0.3"
+ resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz"
+ integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
+ dependencies:
+ path-key "^3.1.0"
+ shebang-command "^2.0.0"
+ which "^2.0.1"
+
crowdin-cli@^0.3.0:
version "0.3.0"
resolved "https://registry.npmjs.org/crowdin-cli/-/crowdin-cli-0.3.0.tgz"
@@ -4092,7 +4130,7 @@ crypto-random-string@^2.0.0:
resolved "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz"
integrity sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==
-css-color-names@0.0.4, css-color-names@^0.0.4:
+css-color-names@^0.0.4, css-color-names@0.0.4:
version "0.0.4"
resolved "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz"
integrity sha512-zj5D7X1U2h2zsXOAM8EyUREBnnts6H+Jm+d1M2DbiQQcUtnqgQsMrdo8JW9R80YFUmIdBZeMu5wvYM7hcgWP/Q==
@@ -4188,14 +4226,6 @@ css-selector-parser@^1.0.0:
resolved "https://registry.npmjs.org/css-selector-parser/-/css-selector-parser-1.4.1.tgz"
integrity sha512-HYPSb7y/Z7BNDCOrakL4raGO2zltZkbeXyAd6Tg9obzix6QhzxCotdBl6VT0Dv4vZfJGVz3WL/xaEI9Ly3ul0g==
-css-tree@1.0.0-alpha.37:
- version "1.0.0-alpha.37"
- resolved "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz"
- integrity sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==
- dependencies:
- mdn-data "2.0.4"
- source-map "^0.6.1"
-
css-tree@^1.1.2, css-tree@^1.1.3:
version "1.1.3"
resolved "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz"
@@ -4204,10 +4234,13 @@ css-tree@^1.1.2, css-tree@^1.1.3:
mdn-data "2.0.14"
source-map "^0.6.1"
-css-what@2.1:
- version "2.1.3"
- resolved "https://registry.npmjs.org/css-what/-/css-what-2.1.3.tgz"
- integrity sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg==
+css-tree@1.0.0-alpha.37:
+ version "1.0.0-alpha.37"
+ resolved "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz"
+ integrity sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==
+ dependencies:
+ mdn-data "2.0.4"
+ source-map "^0.6.1"
css-what@^3.2.1:
version "3.4.2"
@@ -4219,6 +4252,11 @@ css-what@^6.0.1, css-what@^6.1.0:
resolved "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz"
integrity sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==
+css-what@2.1:
+ version "2.1.3"
+ resolved "https://registry.npmjs.org/css-what/-/css-what-2.1.3.tgz"
+ integrity sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg==
+
cssesc@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz"
@@ -4379,20 +4417,55 @@ dashdash@^1.12.0:
dependencies:
assert-plus "^1.0.0"
-debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.0:
+debug@^2.2.0:
version "2.6.9"
resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
dependencies:
ms "2.0.0"
-debug@4, debug@^4.1.0, debug@^4.1.1:
+debug@^2.3.3:
+ version "2.6.9"
+ resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
+ integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
+ dependencies:
+ ms "2.0.0"
+
+debug@^2.6.0:
+ version "2.6.9"
+ resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
+ integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
+ dependencies:
+ ms "2.0.0"
+
+debug@^3.1.0:
+ version "3.2.7"
+ resolved "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
+ integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==
+ dependencies:
+ ms "^2.1.1"
+
+debug@^3.2.7:
+ version "3.2.7"
+ resolved "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
+ integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==
+ dependencies:
+ ms "^2.1.1"
+
+debug@^4.1.0, debug@^4.1.1, debug@4:
version "4.3.4"
resolved "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz"
integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==
dependencies:
ms "2.1.2"
+debug@2.6.9:
+ version "2.6.9"
+ resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
+ integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
+ dependencies:
+ ms "2.0.0"
+
debug@4.3.1:
version "4.3.1"
resolved "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz"
@@ -4400,13 +4473,6 @@ debug@4.3.1:
dependencies:
ms "2.1.2"
-debug@^3.1.0, debug@^3.2.7:
- version "3.2.7"
- resolved "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
- integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==
- dependencies:
- ms "^2.1.1"
-
decamelize@^1.1.2:
version "1.2.0"
resolved "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz"
@@ -4574,16 +4640,16 @@ delayed-stream@~1.0.0:
resolved "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz"
integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==
-depd@2.0.0:
- version "2.0.0"
- resolved "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz"
- integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==
-
depd@~1.1.2:
version "1.1.2"
resolved "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz"
integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==
+depd@2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz"
+ integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==
+
destroy@1.2.0:
version "1.2.0"
resolved "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz"
@@ -4606,7 +4672,7 @@ detect-node@^2.0.4:
resolved "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz"
integrity sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==
-detect-port-alt@1.1.6, detect-port-alt@^1.1.6:
+detect-port-alt@^1.1.6, detect-port-alt@1.1.6:
version "1.1.6"
resolved "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz"
integrity sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==
@@ -4627,6 +4693,13 @@ diacritics-map@^0.1.0:
resolved "https://registry.npmjs.org/diacritics-map/-/diacritics-map-0.1.0.tgz"
integrity sha512-3omnDTYrGigU0i4cJjvaKwD52B8aoqyX/NEIkukFFkogBemsIbhSa1O414fpTp5nuszJG6lvQ5vBvDVNCbSsaQ==
+dir-glob@^3.0.1:
+ version "3.0.1"
+ resolved "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz"
+ integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==
+ dependencies:
+ path-type "^4.0.0"
+
dir-glob@2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/dir-glob/-/dir-glob-2.0.0.tgz"
@@ -4635,13 +4708,6 @@ dir-glob@2.0.0:
arrify "^1.0.1"
path-type "^3.0.0"
-dir-glob@^3.0.1:
- version "3.0.1"
- resolved "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz"
- integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==
- dependencies:
- path-type "^4.0.0"
-
direction@^1.0.0:
version "1.0.4"
resolved "https://registry.npmjs.org/direction/-/direction-1.0.4.tgz"
@@ -4745,14 +4811,6 @@ dom-converter@^0.2.0:
dependencies:
utila "~0.4"
-dom-serializer@0:
- version "0.2.2"
- resolved "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz"
- integrity sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==
- dependencies:
- domelementtype "^2.0.1"
- entities "^2.0.0"
-
dom-serializer@^1.0.1:
version "1.4.1"
resolved "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz"
@@ -4779,7 +4837,15 @@ dom-serializer@~0.1.0:
domelementtype "^1.3.0"
entities "^1.1.1"
-domelementtype@1, domelementtype@^1.3.0, domelementtype@^1.3.1:
+dom-serializer@0:
+ version "0.2.2"
+ resolved "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz"
+ integrity sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==
+ dependencies:
+ domelementtype "^2.0.1"
+ entities "^2.0.0"
+
+domelementtype@^1.3.0, domelementtype@^1.3.1, domelementtype@1:
version "1.3.1"
resolved "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz"
integrity sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==
@@ -4810,7 +4876,7 @@ domhandler@^5.0.2, domhandler@^5.0.3:
dependencies:
domelementtype "^2.3.0"
-domutils@1.5.1, domutils@^1.5.1:
+domutils@^1.5.1, domutils@1.5.1:
version "1.5.1"
resolved "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz"
integrity sha512-gSu5Oi/I+3wDENBsOWBiRK1eoGxcywYSqg3rR960/+EfY0CF4EX1VPkgHOZ3WiS/Jg2DtliF6BhWcHlfpYUcGw==
@@ -4894,6 +4960,11 @@ download@^7.1.0:
p-event "^2.1.0"
pify "^3.0.0"
+duplexer@^0.1.1, duplexer@^0.1.2:
+ version "0.1.2"
+ resolved "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz"
+ integrity sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==
+
duplexer2@~0.1.4:
version "0.1.4"
resolved "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz"
@@ -4906,11 +4977,6 @@ duplexer3@^0.1.4:
resolved "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.5.tgz"
integrity sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA==
-duplexer@^0.1.1, duplexer@^0.1.2:
- version "0.1.2"
- resolved "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz"
- integrity sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==
-
eastasianwidth@^0.2.0:
version "0.2.0"
resolved "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz"
@@ -5025,7 +5091,7 @@ enzyme-shallow-equal@^1.0.1, enzyme-shallow-equal@^1.0.5:
has "^1.0.3"
object-is "^1.1.5"
-enzyme@^3.10.0:
+enzyme@^3.0.0, enzyme@^3.10.0:
version "3.11.0"
resolved "https://registry.npmjs.org/enzyme/-/enzyme-3.11.0.tgz"
integrity sha512-Dw8/Gs4vRjxY6/6i9wU0V+utmQO9kvh9XLnz3LIudviOnVYDEe2ec+0k+NQoMamn1VrjKgCUOWj5jG/5M5M0Qw==
@@ -5162,16 +5228,21 @@ escape-html@^1.0.3, escape-html@~1.0.3:
resolved "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz"
integrity sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==
-escape-string-regexp@2.0.0, escape-string-regexp@^2.0.0:
- version "2.0.0"
- resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz"
- integrity sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==
-
-escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5:
+escape-string-regexp@^1.0.2:
version "1.0.5"
resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz"
integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==
+escape-string-regexp@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz"
+ integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==
+
+escape-string-regexp@^2.0.0, escape-string-regexp@2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz"
+ integrity sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==
+
escape-string-regexp@^4.0.0:
version "4.0.0"
resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz"
@@ -5326,7 +5397,7 @@ expand-template@^2.0.3:
express@^4.17.1, express@^4.17.3:
version "4.19.2"
- resolved "https://registry.yarnpkg.com/express/-/express-4.19.2.tgz#e25437827a3aa7f2a827bc8171bbbb664a356465"
+ resolved "https://registry.npmjs.org/express/-/express-4.19.2.tgz"
integrity sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==
dependencies:
accepts "~1.3.8"
@@ -5383,7 +5454,15 @@ extend-shallow@^2.0.1:
dependencies:
is-extendable "^0.1.0"
-extend-shallow@^3.0.0, extend-shallow@^3.0.2:
+extend-shallow@^3.0.0:
+ version "3.0.2"
+ resolved "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz"
+ integrity sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==
+ dependencies:
+ assign-symbols "^1.0.0"
+ is-extendable "^1.0.1"
+
+extend-shallow@^3.0.2:
version "3.0.2"
resolved "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz"
integrity sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==
@@ -5410,7 +5489,7 @@ extglob@^2.0.4:
snapdragon "^0.8.1"
to-regex "^3.0.1"
-extsprintf@1.3.0, extsprintf@^1.2.0:
+extsprintf@^1.2.0, extsprintf@1.3.0:
version "1.3.0"
resolved "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz"
integrity sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==
@@ -5542,7 +5621,7 @@ figures@^1.3.5:
escape-string-regexp "^1.0.5"
object-assign "^4.1.0"
-file-loader@^6.2.0:
+file-loader@*, file-loader@^6.2.0:
version "6.2.0"
resolved "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz"
integrity sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==
@@ -5550,11 +5629,6 @@ file-loader@^6.2.0:
loader-utils "^2.0.0"
schema-utils "^3.0.0"
-file-type@5.2.0, file-type@^5.2.0:
- version "5.2.0"
- resolved "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz"
- integrity sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==
-
file-type@^10.4.0, file-type@^10.7.0:
version "10.11.0"
resolved "https://registry.npmjs.org/file-type/-/file-type-10.11.0.tgz"
@@ -5570,6 +5644,11 @@ file-type@^4.2.0:
resolved "https://registry.npmjs.org/file-type/-/file-type-4.4.0.tgz"
integrity sha512-f2UbFQEk7LXgWpi5ntcO86OeA/cC80fuDDDaX/fZ2ZGel+AF7leRQqBBW1eJNiiQkrZlAoM6P+VYP5P6bOlDEQ==
+file-type@^5.2.0:
+ version "5.2.0"
+ resolved "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz"
+ integrity sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==
+
file-type@^6.1.0:
version "6.2.0"
resolved "https://registry.npmjs.org/file-type/-/file-type-6.2.0.tgz"
@@ -5580,6 +5659,11 @@ file-type@^8.1.0:
resolved "https://registry.npmjs.org/file-type/-/file-type-8.1.0.tgz"
integrity sha512-qyQ0pzAy78gVoJsmYeNgl8uH8yKhr1lVhW7JbzJmnlRi0I4R2eEDEJZVKG8agpDnLpacwNbDhLNG/LMdxHD2YQ==
+file-type@5.2.0:
+ version "5.2.0"
+ resolved "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz"
+ integrity sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==
+
filename-reserved-regex@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/filename-reserved-regex/-/filename-reserved-regex-2.0.0.tgz"
@@ -5594,16 +5678,16 @@ filenamify@^2.0.0:
strip-outer "^1.0.0"
trim-repeated "^1.0.0"
-filesize@6.1.0:
- version "6.1.0"
- resolved "https://registry.npmjs.org/filesize/-/filesize-6.1.0.tgz"
- integrity sha512-LpCHtPQ3sFx67z+uh2HnSyWSLLu5Jxo21795uRDuar/EOuYWXib5EmPaGIBuSnRqH2IODiKA2k5re/K9OnN/Yg==
-
filesize@^8.0.6:
version "8.0.7"
resolved "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz"
integrity sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==
+filesize@6.1.0:
+ version "6.1.0"
+ resolved "https://registry.npmjs.org/filesize/-/filesize-6.1.0.tgz"
+ integrity sha512-LpCHtPQ3sFx67z+uh2HnSyWSLLu5Jxo21795uRDuar/EOuYWXib5EmPaGIBuSnRqH2IODiKA2k5re/K9OnN/Yg==
+
fill-range@^2.1.0:
version "2.2.4"
resolved "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz"
@@ -5663,14 +5747,6 @@ find-cache-dir@^3.3.1:
make-dir "^3.0.2"
pkg-dir "^4.1.0"
-find-up@4.1.0, find-up@^4.0.0:
- version "4.1.0"
- resolved "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz"
- integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==
- dependencies:
- locate-path "^5.0.0"
- path-exists "^4.0.0"
-
find-up@^1.0.0:
version "1.1.2"
resolved "https://registry.npmjs.org/find-up/-/find-up-1.1.2.tgz"
@@ -5686,6 +5762,14 @@ find-up@^3.0.0:
dependencies:
locate-path "^3.0.0"
+find-up@^4.0.0, find-up@4.1.0:
+ version "4.1.0"
+ resolved "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz"
+ integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==
+ dependencies:
+ locate-path "^5.0.0"
+ path-exists "^4.0.0"
+
find-up@^5.0.0:
version "5.0.0"
resolved "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz"
@@ -5711,7 +5795,7 @@ flux@^4.0.1:
follow-redirects@^1.0.0, follow-redirects@^1.14.7:
version "1.15.6"
- resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.6.tgz#7f815c0cda4249c74ff09e95ef97c23b5fd0399b"
+ resolved "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz"
integrity sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==
for-each@^0.3.3:
@@ -5731,19 +5815,6 @@ forever-agent@~0.6.1:
resolved "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz"
integrity sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==
-fork-ts-checker-webpack-plugin@4.1.6:
- version "4.1.6"
- resolved "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-4.1.6.tgz"
- integrity sha512-DUxuQaKoqfNne8iikd14SAkh5uw4+8vNifp6gmA73yYNS6ywLIWSLD/n/mBzHQRpW3J7rbATEakmiA8JvkTyZw==
- dependencies:
- "@babel/code-frame" "^7.5.5"
- chalk "^2.4.1"
- micromatch "^3.1.10"
- minimatch "^3.0.4"
- semver "^5.6.0"
- tapable "^1.0.0"
- worker-rpc "^0.1.0"
-
fork-ts-checker-webpack-plugin@^6.5.0:
version "6.5.3"
resolved "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz"
@@ -5763,6 +5834,19 @@ fork-ts-checker-webpack-plugin@^6.5.0:
semver "^7.3.2"
tapable "^1.0.0"
+fork-ts-checker-webpack-plugin@4.1.6:
+ version "4.1.6"
+ resolved "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-4.1.6.tgz"
+ integrity sha512-DUxuQaKoqfNne8iikd14SAkh5uw4+8vNifp6gmA73yYNS6ywLIWSLD/n/mBzHQRpW3J7rbATEakmiA8JvkTyZw==
+ dependencies:
+ "@babel/code-frame" "^7.5.5"
+ chalk "^2.4.1"
+ micromatch "^3.1.10"
+ minimatch "^3.0.4"
+ semver "^5.6.0"
+ tapable "^1.0.0"
+ worker-rpc "^0.1.0"
+
form-data@~2.3.2:
version "2.3.3"
resolved "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz"
@@ -5816,7 +5900,17 @@ fs-extra@^10.1.0:
jsonfile "^6.0.1"
universalify "^2.0.0"
-fs-extra@^9.0.0, fs-extra@^9.0.1:
+fs-extra@^9.0.0:
+ version "9.1.0"
+ resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz"
+ integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==
+ dependencies:
+ at-least-node "^1.0.0"
+ graceful-fs "^4.2.0"
+ jsonfile "^6.0.1"
+ universalify "^2.0.0"
+
+fs-extra@^9.0.1:
version "9.1.0"
resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz"
integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==
@@ -5925,11 +6019,6 @@ get-stdin@^4.0.1:
resolved "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz"
integrity sha512-F5aQMywwJ2n85s4hJPTT9RPxGmubonuB10MNYo17/xph174n2MIR33HRguhzVag10O/npM7SPk73LMZNP+FaWw==
-get-stream@3.0.0, get-stream@^3.0.0:
- version "3.0.0"
- resolved "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz"
- integrity sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==
-
get-stream@^2.2.0:
version "2.3.1"
resolved "https://registry.npmjs.org/get-stream/-/get-stream-2.3.1.tgz"
@@ -5938,6 +6027,11 @@ get-stream@^2.2.0:
object-assign "^4.0.1"
pinkie-promise "^2.0.0"
+get-stream@^3.0.0, get-stream@3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz"
+ integrity sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==
+
get-stream@^4.0.0, get-stream@^4.1.0:
version "4.1.0"
resolved "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz"
@@ -6060,7 +6154,7 @@ global-dirs@^3.0.0:
dependencies:
ini "2.0.0"
-global-modules@2.0.0, global-modules@^2.0.0:
+global-modules@^2.0.0, global-modules@2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz"
integrity sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==
@@ -6088,18 +6182,6 @@ globalthis@^1.0.3:
dependencies:
define-properties "^1.1.3"
-globby@11.0.1:
- version "11.0.1"
- resolved "https://registry.npmjs.org/globby/-/globby-11.0.1.tgz"
- integrity sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ==
- dependencies:
- array-union "^2.1.0"
- dir-glob "^3.0.1"
- fast-glob "^3.1.1"
- ignore "^5.1.4"
- merge2 "^1.3.0"
- slash "^3.0.0"
-
globby@^11.0.1, globby@^11.0.4, globby@^11.1.0:
version "11.1.0"
resolved "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz"
@@ -6136,6 +6218,18 @@ globby@^8.0.1:
pify "^3.0.0"
slash "^1.0.0"
+globby@11.0.1:
+ version "11.0.1"
+ resolved "https://registry.npmjs.org/globby/-/globby-11.0.1.tgz"
+ integrity sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ==
+ dependencies:
+ array-union "^2.1.0"
+ dir-glob "^3.0.1"
+ fast-glob "^3.1.1"
+ ignore "^5.1.4"
+ merge2 "^1.3.0"
+ slash "^3.0.0"
+
globule@^1.0.0:
version "1.3.4"
resolved "https://registry.npmjs.org/globule/-/globule-1.3.4.tgz"
@@ -6247,6 +6341,13 @@ gulp-header@^1.7.1:
lodash.template "^4.4.0"
through2 "^2.0.0"
+gzip-size@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz"
+ integrity sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==
+ dependencies:
+ duplexer "^0.1.2"
+
gzip-size@5.1.1:
version "5.1.1"
resolved "https://registry.npmjs.org/gzip-size/-/gzip-size-5.1.1.tgz"
@@ -6255,13 +6356,6 @@ gzip-size@5.1.1:
duplexer "^0.1.1"
pify "^4.0.1"
-gzip-size@^6.0.0:
- version "6.0.0"
- resolved "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz"
- integrity sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==
- dependencies:
- duplexer "^0.1.2"
-
handle-thing@^2.0.0:
version "2.0.1"
resolved "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz"
@@ -6656,21 +6750,31 @@ htmlparser2@^8.0.1:
domutils "^3.0.1"
entities "^4.4.0"
-http-cache-semantics@3.8.1:
- version "3.8.1"
- resolved "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz"
- integrity sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w==
-
http-cache-semantics@^4.0.0:
version "4.1.1"
resolved "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz"
integrity sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==
+http-cache-semantics@3.8.1:
+ version "3.8.1"
+ resolved "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz"
+ integrity sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w==
+
http-deceiver@^1.2.7:
version "1.2.7"
resolved "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz"
integrity sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==
+http-errors@~1.6.2:
+ version "1.6.3"
+ resolved "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz"
+ integrity sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==
+ dependencies:
+ depd "~1.1.2"
+ inherits "2.0.3"
+ setprototypeof "1.1.0"
+ statuses ">= 1.4.0 < 2"
+
http-errors@2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz"
@@ -6682,16 +6786,6 @@ http-errors@2.0.0:
statuses "2.0.1"
toidentifier "1.0.1"
-http-errors@~1.6.2:
- version "1.6.3"
- resolved "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz"
- integrity sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==
- dependencies:
- depd "~1.1.2"
- inherits "2.0.3"
- setprototypeof "1.1.0"
- statuses ">= 1.4.0 < 2"
-
http-parser-js@>=0.5.1:
version "0.5.8"
resolved "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz"
@@ -6817,16 +6911,16 @@ immediate@^3.2.3:
resolved "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz"
integrity sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==
-immer@8.0.1:
- version "8.0.1"
- resolved "https://registry.npmjs.org/immer/-/immer-8.0.1.tgz"
- integrity sha512-aqXhGP7//Gui2+UrEtvxZxSquQVXTpZ7KDxfCcKAF3Vysvw0CViVaW9RZ1j1xlIYqaaaipBoqdqeibkc18PNvA==
-
immer@^9.0.7:
version "9.0.21"
resolved "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz"
integrity sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==
+immer@8.0.1:
+ version "8.0.1"
+ resolved "https://registry.npmjs.org/immer/-/immer-8.0.1.tgz"
+ integrity sha512-aqXhGP7//Gui2+UrEtvxZxSquQVXTpZ7KDxfCcKAF3Vysvw0CViVaW9RZ1j1xlIYqaaaipBoqdqeibkc18PNvA==
+
import-fresh@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz"
@@ -6888,7 +6982,7 @@ inflight@^1.0.4:
once "^1.3.0"
wrappy "1"
-inherits@2, inherits@2.0.4, inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.0, inherits@~2.0.3:
+inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.0, inherits@~2.0.3, inherits@2, inherits@2.0.4:
version "2.0.4"
resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz"
integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
@@ -6898,16 +6992,16 @@ inherits@2.0.3:
resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz"
integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==
-ini@2.0.0:
- version "2.0.0"
- resolved "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz"
- integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==
-
ini@^1.3.4, ini@^1.3.5, ini@~1.3.0:
version "1.3.8"
resolved "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz"
integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==
+ini@2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz"
+ integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==
+
inline-style-parser@0.1.1:
version "0.1.1"
resolved "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz"
@@ -6947,16 +7041,16 @@ ip-regex@^4.1.0:
resolved "https://registry.npmjs.org/ip-regex/-/ip-regex-4.3.0.tgz"
integrity sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q==
-ipaddr.js@1.9.1:
- version "1.9.1"
- resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz"
- integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==
-
ipaddr.js@^2.0.1:
version "2.1.0"
resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz"
integrity sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==
+ipaddr.js@1.9.1:
+ version "1.9.1"
+ resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz"
+ integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==
+
is-absolute-url@^2.0.0:
version "2.1.0"
resolved "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz"
@@ -6969,7 +7063,7 @@ is-accessor-descriptor@^1.0.1:
dependencies:
hasown "^2.0.0"
-is-alphabetical@1.0.4, is-alphabetical@^1.0.0:
+is-alphabetical@^1.0.0, is-alphabetical@1.0.4:
version "1.0.4"
resolved "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz"
integrity sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==
@@ -7246,7 +7340,12 @@ is-path-inside@^3.0.2:
resolved "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz"
integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==
-is-plain-obj@^1.0.0, is-plain-obj@^1.1.0:
+is-plain-obj@^1.0.0:
+ version "1.1.0"
+ resolved "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz"
+ integrity sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==
+
+is-plain-obj@^1.1.0:
version "1.1.0"
resolved "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz"
integrity sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==
@@ -7296,7 +7395,7 @@ is-retry-allowed@^1.0.0, is-retry-allowed@^1.1.0:
resolved "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz"
integrity sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==
-is-root@2.1.0, is-root@^2.1.0:
+is-root@^2.1.0, is-root@2.1.0:
version "2.1.0"
resolved "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz"
integrity sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==
@@ -7308,7 +7407,12 @@ is-shared-array-buffer@^1.0.2:
dependencies:
call-bind "^1.0.2"
-is-stream@^1.0.0, is-stream@^1.1.0:
+is-stream@^1.0.0:
+ version "1.1.0"
+ resolved "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz"
+ integrity sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==
+
+is-stream@^1.1.0:
version "1.1.0"
resolved "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz"
integrity sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==
@@ -7409,21 +7513,26 @@ is2@^2.0.6:
ip-regex "^4.1.0"
is-url "^1.2.4"
+isarray@^2.0.5:
+ version "2.0.5"
+ resolved "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz"
+ integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==
+
+isarray@~1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz"
+ integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==
+
isarray@0.0.1:
version "0.0.1"
resolved "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz"
integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==
-isarray@1.0.0, isarray@~1.0.0:
+isarray@1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz"
integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==
-isarray@^2.0.5:
- version "2.0.5"
- resolved "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz"
- integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==
-
isexe@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz"
@@ -7515,7 +7624,15 @@ jpegtran-bin@^4.0.0:
resolved "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz"
integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==
-js-yaml@^3.13.1, js-yaml@^3.8.1:
+js-yaml@^3.13.1:
+ version "3.14.1"
+ resolved "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz"
+ integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==
+ dependencies:
+ argparse "^1.0.7"
+ esprima "^4.0.0"
+
+js-yaml@^3.8.1:
version "3.14.1"
resolved "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz"
integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==
@@ -7604,13 +7721,6 @@ jsprim@^1.2.2:
json-schema "0.4.0"
verror "1.10.0"
-keyv@3.0.0:
- version "3.0.0"
- resolved "https://registry.npmjs.org/keyv/-/keyv-3.0.0.tgz"
- integrity sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA==
- dependencies:
- json-buffer "3.0.0"
-
keyv@^3.0.0:
version "3.1.0"
resolved "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz"
@@ -7618,7 +7728,28 @@ keyv@^3.0.0:
dependencies:
json-buffer "3.0.0"
-kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0:
+keyv@3.0.0:
+ version "3.0.0"
+ resolved "https://registry.npmjs.org/keyv/-/keyv-3.0.0.tgz"
+ integrity sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA==
+ dependencies:
+ json-buffer "3.0.0"
+
+kind-of@^3.0.2:
+ version "3.2.2"
+ resolved "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz"
+ integrity sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==
+ dependencies:
+ is-buffer "^1.1.5"
+
+kind-of@^3.0.3:
+ version "3.2.2"
+ resolved "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz"
+ integrity sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==
+ dependencies:
+ is-buffer "^1.1.5"
+
+kind-of@^3.2.0:
version "3.2.2"
resolved "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz"
integrity sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==
@@ -7715,15 +7846,6 @@ loader-runner@^4.2.0:
resolved "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz"
integrity sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==
-loader-utils@2.0.0:
- version "2.0.0"
- resolved "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz"
- integrity sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ==
- dependencies:
- big.js "^5.2.2"
- emojis-list "^3.0.0"
- json5 "^2.1.2"
-
loader-utils@^2.0.0:
version "2.0.4"
resolved "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz"
@@ -7738,6 +7860,15 @@ loader-utils@^3.2.0:
resolved "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz"
integrity sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw==
+loader-utils@2.0.0:
+ version "2.0.0"
+ resolved "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz"
+ integrity sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ==
+ dependencies:
+ big.js "^5.2.2"
+ emojis-list "^3.0.0"
+ json5 "^2.1.2"
+
locate-path@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz"
@@ -7890,7 +8021,7 @@ lodash.templatesettings@^4.0.0:
dependencies:
lodash._reinterpolate "^3.0.0"
-lodash.uniq@4.5.0, lodash.uniq@^4.5.0:
+lodash.uniq@^4.5.0, lodash.uniq@4.5.0:
version "4.5.0"
resolved "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz"
integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==
@@ -7935,11 +8066,6 @@ lower-case@^2.0.2:
dependencies:
tslib "^2.0.3"
-lowercase-keys@1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz"
- integrity sha512-RPlX0+PHuvxVDZ7xX+EBVAp4RsVxP/TdDSN2mJYdiq1Lc4Hz7EUSjUI7RZrKKlmrIzVhf6Jo2stj7++gVarS0A==
-
lowercase-keys@^1.0.0, lowercase-keys@^1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz"
@@ -7950,6 +8076,11 @@ lowercase-keys@^2.0.0:
resolved "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz"
integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==
+lowercase-keys@1.0.0:
+ version "1.0.0"
+ resolved "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz"
+ integrity sha512-RPlX0+PHuvxVDZ7xX+EBVAp4RsVxP/TdDSN2mJYdiq1Lc4Hz7EUSjUI7RZrKKlmrIzVhf6Jo2stj7++gVarS0A==
+
lpad-align@^1.0.1:
version "1.1.2"
resolved "https://registry.npmjs.org/lpad-align/-/lpad-align-1.1.2.tgz"
@@ -7992,7 +8123,14 @@ lunr@^2.3.8:
resolved "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz"
integrity sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==
-make-dir@^1.0.0, make-dir@^1.2.0:
+make-dir@^1.0.0:
+ version "1.3.0"
+ resolved "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz"
+ integrity sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==
+ dependencies:
+ pify "^3.0.0"
+
+make-dir@^1.2.0:
version "1.3.0"
resolved "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz"
integrity sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==
@@ -8192,24 +8330,57 @@ micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5:
braces "^3.0.2"
picomatch "^2.3.1"
-mime-db@1.52.0, "mime-db@>= 1.43.0 < 2":
- version "1.52.0"
- resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz"
- integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
-
mime-db@^1.28.0, mime-db@~1.33.0:
version "1.33.0"
resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz"
integrity sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==
-mime-types@2.1.18, mime-types@^2.1.12, mime-types@~2.1.17:
+"mime-db@>= 1.43.0 < 2":
+ version "1.52.0"
+ resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz"
+ integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
+
+mime-db@1.52.0:
+ version "1.52.0"
+ resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz"
+ integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
+
+mime-types@^2.1.12, mime-types@~2.1.17, mime-types@2.1.18:
version "2.1.18"
resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz"
integrity sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==
dependencies:
mime-db "~1.33.0"
-mime-types@^2.1.27, mime-types@^2.1.31, mime-types@~2.1.19, mime-types@~2.1.24, mime-types@~2.1.34:
+mime-types@^2.1.27:
+ version "2.1.35"
+ resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
+ integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
+ dependencies:
+ mime-db "1.52.0"
+
+mime-types@^2.1.31:
+ version "2.1.35"
+ resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
+ integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
+ dependencies:
+ mime-db "1.52.0"
+
+mime-types@~2.1.19:
+ version "2.1.35"
+ resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
+ integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
+ dependencies:
+ mime-db "1.52.0"
+
+mime-types@~2.1.24:
+ version "2.1.35"
+ resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
+ integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
+ dependencies:
+ mime-db "1.52.0"
+
+mime-types@~2.1.34:
version "2.1.35"
resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
@@ -8248,14 +8419,7 @@ minimalistic-assert@^1.0.0:
resolved "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz"
integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==
-minimatch@3.0.4:
- version "3.0.4"
- resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz"
- integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==
- dependencies:
- brace-expansion "^1.1.7"
-
-minimatch@3.1.2, minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1:
+minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@3.1.2:
version "3.1.2"
resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz"
integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==
@@ -8269,6 +8433,13 @@ minimatch@~3.0.2:
dependencies:
brace-expansion "^1.1.7"
+minimatch@3.0.4:
+ version "3.0.4"
+ resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz"
+ integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==
+ dependencies:
+ brace-expansion "^1.1.7"
+
minimist@^1.1.3, minimist@^1.2.0, minimist@^1.2.3, minimist@^1.2.5, minimist@^1.2.6:
version "1.2.8"
resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz"
@@ -8287,18 +8458,32 @@ mkdirp-classic@^0.5.2, mkdirp-classic@^0.5.3:
resolved "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz"
integrity sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==
-mkdirp@0.3.0:
- version "0.3.0"
- resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz"
- integrity sha512-OHsdUcVAQ6pOtg5JYWpCBo9W/GySVuwvP9hueRMW7UqshC0tbfzLv8wjySTPm3tfUZ/21CE9E1pJagOA91Pxew==
-
-"mkdirp@>=0.5 0", mkdirp@^0.5.1, mkdirp@^0.5.6, mkdirp@~0.5.1:
+mkdirp@^0.5.1, mkdirp@~0.5.1:
version "0.5.6"
resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz"
integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==
dependencies:
minimist "^1.2.6"
+mkdirp@^0.5.6:
+ version "0.5.6"
+ resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz"
+ integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==
+ dependencies:
+ minimist "^1.2.6"
+
+"mkdirp@>=0.5 0":
+ version "0.5.6"
+ resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz"
+ integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==
+ dependencies:
+ minimist "^1.2.6"
+
+mkdirp@0.3.0:
+ version "0.3.0"
+ resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz"
+ integrity sha512-OHsdUcVAQ6pOtg5JYWpCBo9W/GySVuwvP9hueRMW7UqshC0tbfzLv8wjySTPm3tfUZ/21CE9E1pJagOA91Pxew==
+
moo@^0.5.0:
version "0.5.2"
resolved "https://registry.npmjs.org/moo/-/moo-0.5.2.tgz"
@@ -8309,16 +8494,16 @@ mrmime@^1.0.0:
resolved "https://registry.npmjs.org/mrmime/-/mrmime-1.0.1.tgz"
integrity sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==
+ms@^2.1.1, ms@2.1.2:
+ version "2.1.2"
+ resolved "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz"
+ integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
+
ms@2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==
-ms@2.1.2, ms@^2.1.1:
- version "2.1.2"
- resolved "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz"
- integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
-
ms@2.1.3:
version "2.1.3"
resolved "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz"
@@ -8465,15 +8650,6 @@ normalize-range@^0.1.2:
resolved "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz"
integrity sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==
-normalize-url@2.0.1:
- version "2.0.1"
- resolved "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz"
- integrity sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==
- dependencies:
- prepend-http "^2.0.0"
- query-string "^5.0.1"
- sort-keys "^2.0.0"
-
normalize-url@^3.0.0:
version "3.3.0"
resolved "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz"
@@ -8489,6 +8665,15 @@ normalize-url@^6.0.1:
resolved "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz"
integrity sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==
+normalize-url@2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz"
+ integrity sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==
+ dependencies:
+ prepend-http "^2.0.0"
+ query-string "^5.0.1"
+ sort-keys "^2.0.0"
+
not@^0.1.0:
version "0.1.0"
resolved "https://registry.npmjs.org/not/-/not-0.1.0.tgz"
@@ -8521,7 +8706,7 @@ nprogress@^0.2.0:
resolved "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz"
integrity sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==
-nth-check@^1.0.2, nth-check@~1.0.1:
+nth-check@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz"
integrity sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==
@@ -8535,6 +8720,13 @@ nth-check@^2.0.0, nth-check@^2.0.1:
dependencies:
boolbase "^1.0.0"
+nth-check@~1.0.1:
+ version "1.0.2"
+ resolved "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz"
+ integrity sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==
+ dependencies:
+ boolbase "~1.0.0"
+
num2fraction@^1.2.2:
version "1.2.2"
resolved "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz"
@@ -8985,6 +9177,13 @@ path-parse@^1.0.7:
resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz"
integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
+path-to-regexp@^1.7.0:
+ version "1.8.0"
+ resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz"
+ integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==
+ dependencies:
+ isarray "0.0.1"
+
path-to-regexp@0.1.7:
version "0.1.7"
resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz"
@@ -8995,13 +9194,6 @@ path-to-regexp@2.2.1:
resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz"
integrity sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==
-path-to-regexp@^1.7.0:
- version "1.8.0"
- resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz"
- integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==
- dependencies:
- isarray "0.0.1"
-
path-type@^1.0.0:
version "1.1.0"
resolved "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz"
@@ -9048,7 +9240,17 @@ picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1:
resolved "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz"
integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==
-pify@^2.0.0, pify@^2.2.0, pify@^2.3.0:
+pify@^2.0.0:
+ version "2.3.0"
+ resolved "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz"
+ integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==
+
+pify@^2.2.0:
+ version "2.3.0"
+ resolved "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz"
+ integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==
+
+pify@^2.3.0:
version "2.3.0"
resolved "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz"
integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==
@@ -9094,7 +9296,7 @@ pkg-dir@^4.1.0:
dependencies:
find-up "^4.0.0"
-pkg-up@3.1.0, pkg-up@^3.1.0:
+pkg-up@^3.1.0, pkg-up@3.1.0:
version "3.1.0"
resolved "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz"
integrity sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==
@@ -9656,15 +9858,7 @@ postcss-zindex@^5.1.0:
resolved "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz"
integrity sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==
-postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.23, postcss@^7.0.27, postcss@^7.0.32:
- version "7.0.39"
- resolved "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz"
- integrity sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==
- dependencies:
- picocolors "^0.2.1"
- source-map "^0.6.1"
-
-postcss@^8.3.11, postcss@^8.4.14, postcss@^8.4.17, postcss@^8.4.21:
+"postcss@^7.0.0 || ^8.0.1", postcss@^8.0.9, postcss@^8.1.0, postcss@^8.2.15, postcss@^8.2.2, postcss@^8.3.11, postcss@^8.4.14, postcss@^8.4.16, postcss@^8.4.17, postcss@^8.4.21:
version "8.4.31"
resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz"
integrity sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==
@@ -9673,6 +9867,14 @@ postcss@^8.3.11, postcss@^8.4.14, postcss@^8.4.17, postcss@^8.4.21:
picocolors "^1.0.0"
source-map-js "^1.0.2"
+postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.23, postcss@^7.0.27, postcss@^7.0.32:
+ version "7.0.39"
+ resolved "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz"
+ integrity sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==
+ dependencies:
+ picocolors "^0.2.1"
+ source-map "^0.6.1"
+
prebuild-install@^7.1.1:
version "7.1.1"
resolved "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz"
@@ -9741,14 +9943,6 @@ promise@^7.1.1:
dependencies:
asap "~2.0.3"
-prompts@2.4.0:
- version "2.4.0"
- resolved "https://registry.npmjs.org/prompts/-/prompts-2.4.0.tgz"
- integrity sha512-awZAKrk3vN6CroQukBL+R9051a4R3zCZBlJm/HBfrSZ8iTpYix3VX1vU4mveiLpiwmOJT4wokTF9m6HUk4KqWQ==
- dependencies:
- kleur "^3.0.3"
- sisteransi "^1.0.5"
-
prompts@^2.4.2:
version "2.4.2"
resolved "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz"
@@ -9757,6 +9951,14 @@ prompts@^2.4.2:
kleur "^3.0.3"
sisteransi "^1.0.5"
+prompts@2.4.0:
+ version "2.4.0"
+ resolved "https://registry.npmjs.org/prompts/-/prompts-2.4.0.tgz"
+ integrity sha512-awZAKrk3vN6CroQukBL+R9051a4R3zCZBlJm/HBfrSZ8iTpYix3VX1vU4mveiLpiwmOJT4wokTF9m6HUk4KqWQ==
+ dependencies:
+ kleur "^3.0.3"
+ sisteransi "^1.0.5"
+
prop-types-exact@^1.2.0:
version "1.2.0"
resolved "https://registry.npmjs.org/prop-types-exact/-/prop-types-exact-1.2.0.tgz"
@@ -9766,7 +9968,7 @@ prop-types-exact@^1.2.0:
object.assign "^4.1.0"
reflect.ownkeys "^0.2.0"
-prop-types@^15.0.0, prop-types@^15.6.2, prop-types@^15.7.2, prop-types@^15.8.1:
+prop-types@^15.0.0, prop-types@^15.6.2, prop-types@^15.7.2, prop-types@^15.8.1, prop-types@>=15:
version "15.8.1"
resolved "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz"
integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==
@@ -9818,7 +10020,12 @@ punycode@^1.3.2:
resolved "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz"
integrity sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==
-punycode@^2.1.0, punycode@^2.1.1:
+punycode@^2.1.0:
+ version "2.3.1"
+ resolved "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz"
+ integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==
+
+punycode@^2.1.1:
version "2.3.1"
resolved "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz"
integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==
@@ -9840,7 +10047,7 @@ q@^1.1.2:
resolved "https://registry.npmjs.org/q/-/q-1.5.1.tgz"
integrity sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw==
-qs@6.11.0, qs@^6.4.0:
+qs@^6.4.0, qs@6.11.0:
version "6.11.0"
resolved "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz"
integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==
@@ -9914,25 +10121,20 @@ randombytes@^2.1.0:
dependencies:
safe-buffer "^5.1.0"
-range-parser@1.2.0:
- version "1.2.0"
- resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz"
- integrity sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==
-
-range-parser@^1.2.1, range-parser@~1.2.1:
+range-parser@^1.2.1:
version "1.2.1"
resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz"
integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
-raw-body@2.5.2:
- version "2.5.2"
- resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.2.tgz#99febd83b90e08975087e8f1f9419a149366b68a"
- integrity sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==
- dependencies:
- bytes "3.1.2"
- http-errors "2.0.0"
- iconv-lite "0.4.24"
- unpipe "1.0.0"
+range-parser@~1.2.1:
+ version "1.2.1"
+ resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz"
+ integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
+
+range-parser@1.2.0:
+ version "1.2.0"
+ resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz"
+ integrity sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==
raw-body@~1.1.0:
version "1.1.7"
@@ -9942,7 +10144,17 @@ raw-body@~1.1.0:
bytes "1"
string_decoder "0.10"
-rc@1.2.8, rc@^1.2.7, rc@^1.2.8:
+raw-body@2.5.2:
+ version "2.5.2"
+ resolved "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz"
+ integrity sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==
+ dependencies:
+ bytes "3.1.2"
+ http-errors "2.0.0"
+ iconv-lite "0.4.24"
+ unpipe "1.0.0"
+
+rc@^1.2.7, rc@^1.2.8, rc@1.2.8:
version "1.2.8"
resolved "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz"
integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==
@@ -10022,7 +10234,16 @@ react-dev-utils@^12.0.1:
strip-ansi "^6.0.1"
text-table "^0.2.0"
-react-dom@^16.8.4:
+react-dom@*, "react-dom@^16.6.0 || ^17.0.0 || ^18.0.0", "react-dom@^16.8.4 || ^17", "react-dom@^16.8.4 || ^17.0.0", "react-dom@^17.0.0 || ^16.3.0 || ^15.5.4", react-dom@^17.0.2, "react-dom@>= 16.8.0 < 19.0.0":
+ version "17.0.2"
+ resolved "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz"
+ integrity sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==
+ dependencies:
+ loose-envify "^1.1.0"
+ object-assign "^4.1.1"
+ scheduler "^0.20.2"
+
+react-dom@^16.0.0-0, react-dom@^16.8.4:
version "16.14.0"
resolved "https://registry.npmjs.org/react-dom/-/react-dom-16.14.0.tgz"
integrity sha512-1gCeQXDLoIqMgqD3IO2Ah9bnf0w9kzhwN5q4FGnHZ67hBm9yePzB5JJAIQCc8x3pFnNlwFq4RidZggNAAkzWWw==
@@ -10032,15 +10253,6 @@ react-dom@^16.8.4:
prop-types "^15.6.2"
scheduler "^0.19.1"
-react-dom@^17.0.2:
- version "17.0.2"
- resolved "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz"
- integrity sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==
- dependencies:
- loose-envify "^1.1.0"
- object-assign "^4.1.1"
- scheduler "^0.20.2"
-
react-error-overlay@^6.0.11, react-error-overlay@^6.0.9:
version "6.0.11"
resolved "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz"
@@ -10094,6 +10306,14 @@ react-loadable-ssr-addon-v5-slorber@^1.0.1:
dependencies:
"@babel/runtime" "^7.10.3"
+react-loadable@*, "react-loadable@npm:@docusaurus/react-loadable@5.5.2":
+ version "5.5.2"
+ resolved "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz"
+ integrity sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==
+ dependencies:
+ "@types/react" "*"
+ prop-types "^15.6.2"
+
react-router-config@^5.1.1:
version "5.1.1"
resolved "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz"
@@ -10114,7 +10334,7 @@ react-router-dom@^5.3.3:
tiny-invariant "^1.0.2"
tiny-warning "^1.0.0"
-react-router@5.3.4, react-router@^5.3.3:
+react-router@^5.3.3, react-router@>=5, react-router@5.3.4:
version "5.3.4"
resolved "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz"
integrity sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==
@@ -10148,7 +10368,7 @@ react-textarea-autosize@^8.3.2:
use-composed-ref "^1.3.0"
use-latest "^1.2.1"
-react-waypoint@^10.3.0:
+react-waypoint@^10.3.0, react-waypoint@>=9.0.2:
version "10.3.0"
resolved "https://registry.npmjs.org/react-waypoint/-/react-waypoint-10.3.0.tgz"
integrity sha512-iF1y2c1BsoXuEGz08NoahaLFIGI9gTUAAOKip96HUmylRT6DUtpgoBPjk/Y8dfcFVmfVDvUzWjNXpZyKTOV0SQ==
@@ -10158,7 +10378,15 @@ react-waypoint@^10.3.0:
prop-types "^15.0.0"
react-is "^17.0.1 || ^18.0.0"
-react@^16.8.4:
+react@*, "react@^15.0.2 || ^16.0.0 || ^17.0.0", "react@^15.3.0 || ^16.0.0 || ^17.0.0 || ^18.0.0", "react@^16.13.1 || ^17.0.0", "react@^16.6.0 || ^17.0.0 || ^18.0.0", "react@^16.8.0 || ^17.0.0 || ^18.0.0", "react@^16.8.4 || ^17", "react@^16.8.4 || ^17.0.0", "react@^17.0.0 || ^16.3.0 || ^15.5.4", react@^17.0.2, "react@>= 16.8.0 < 19.0.0", react@>=0.14.9, react@>=0.14.x, react@>=15, react@17.0.2:
+ version "17.0.2"
+ resolved "https://registry.npmjs.org/react/-/react-17.0.2.tgz"
+ integrity sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==
+ dependencies:
+ loose-envify "^1.1.0"
+ object-assign "^4.1.1"
+
+"react@^0.14 || ^15.0.0 || ^16.0.0-alpha", react@^16.0.0-0, react@^16.14.0, react@^16.8.4, "react@0.13.x || 0.14.x || ^15.0.0-0 || ^16.0.0-0":
version "16.14.0"
resolved "https://registry.npmjs.org/react/-/react-16.14.0.tgz"
integrity sha512-0X2CImDkJGApiAlcf0ODKIneSwBPhqJawOa5wCtKbu7ZECrmS26NvtSILynQ66cgkT/RJ4LidJOc3bUESwmU8g==
@@ -10167,14 +10395,6 @@ react@^16.8.4:
object-assign "^4.1.1"
prop-types "^15.6.2"
-react@^17.0.2:
- version "17.0.2"
- resolved "https://registry.npmjs.org/react/-/react-17.0.2.tgz"
- integrity sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==
- dependencies:
- loose-envify "^1.1.0"
- object-assign "^4.1.1"
-
read-pkg-up@^1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-1.0.1.tgz"
@@ -10192,7 +10412,59 @@ read-pkg@^1.0.0:
normalize-package-data "^2.3.2"
path-type "^1.0.0"
-readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.2.2, readable-stream@^2.3.0, readable-stream@^2.3.5, readable-stream@~2.3.6:
+readable-stream@^2.0.0:
+ version "2.3.8"
+ resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz"
+ integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==
+ dependencies:
+ core-util-is "~1.0.0"
+ inherits "~2.0.3"
+ isarray "~1.0.0"
+ process-nextick-args "~2.0.0"
+ safe-buffer "~5.1.1"
+ string_decoder "~1.1.1"
+ util-deprecate "~1.0.1"
+
+readable-stream@^2.0.1:
+ version "2.3.8"
+ resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz"
+ integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==
+ dependencies:
+ core-util-is "~1.0.0"
+ inherits "~2.0.3"
+ isarray "~1.0.0"
+ process-nextick-args "~2.0.0"
+ safe-buffer "~5.1.1"
+ string_decoder "~1.1.1"
+ util-deprecate "~1.0.1"
+
+readable-stream@^2.0.2:
+ version "2.3.8"
+ resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz"
+ integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==
+ dependencies:
+ core-util-is "~1.0.0"
+ inherits "~2.0.3"
+ isarray "~1.0.0"
+ process-nextick-args "~2.0.0"
+ safe-buffer "~5.1.1"
+ string_decoder "~1.1.1"
+ util-deprecate "~1.0.1"
+
+readable-stream@^2.2.2:
+ version "2.3.8"
+ resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz"
+ integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==
+ dependencies:
+ core-util-is "~1.0.0"
+ inherits "~2.0.3"
+ isarray "~1.0.0"
+ process-nextick-args "~2.0.0"
+ safe-buffer "~5.1.1"
+ string_decoder "~1.1.1"
+ util-deprecate "~1.0.1"
+
+readable-stream@^2.3.0, readable-stream@^2.3.5:
version "2.3.8"
resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz"
integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==
@@ -10214,6 +10486,19 @@ readable-stream@^3.0.6, readable-stream@^3.1.1, readable-stream@^3.4.0:
string_decoder "^1.1.1"
util-deprecate "^1.0.1"
+readable-stream@~2.3.6:
+ version "2.3.8"
+ resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz"
+ integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==
+ dependencies:
+ core-util-is "~1.0.0"
+ inherits "~2.0.3"
+ isarray "~1.0.0"
+ process-nextick-args "~2.0.0"
+ safe-buffer "~5.1.1"
+ string_decoder "~1.1.1"
+ util-deprecate "~1.0.1"
+
readdirp@~3.6.0:
version "3.6.0"
resolved "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz"
@@ -10233,13 +10518,6 @@ rechoir@^0.6.2:
dependencies:
resolve "^1.1.6"
-recursive-readdir@2.2.2:
- version "2.2.2"
- resolved "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.2.tgz"
- integrity sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg==
- dependencies:
- minimatch "3.0.4"
-
recursive-readdir@^2.2.2:
version "2.2.3"
resolved "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz"
@@ -10247,6 +10525,13 @@ recursive-readdir@^2.2.2:
dependencies:
minimatch "^3.0.5"
+recursive-readdir@2.2.2:
+ version "2.2.2"
+ resolved "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.2.tgz"
+ integrity sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg==
+ dependencies:
+ minimatch "3.0.4"
+
redent@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/redent/-/redent-1.0.0.tgz"
@@ -10528,7 +10813,7 @@ resolve@^1.1.6, resolve@^1.10.0, resolve@^1.14.2, resolve@^1.3.2:
path-parse "^1.0.7"
supports-preserve-symlinks-flag "^1.0.0"
-responselike@1.0.2, responselike@^1.0.2:
+responselike@^1.0.2, responselike@1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz"
integrity sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ==
@@ -10560,7 +10845,7 @@ rgba-regex@^1.0.0:
resolved "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz"
integrity sha512-zgn5OjNQXLUTdq8m17KdaicF6w89TZs8ZU8y0AYENIU6wG8GG6LLm0yLSiPY8DmaYmHdgRW8rnApjoT0fQRfMg==
-rimraf@2, rimraf@^2.5.4:
+rimraf@^2.5.4:
version "2.7.1"
resolved "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz"
integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==
@@ -10574,6 +10859,13 @@ rimraf@^3.0.2:
dependencies:
glob "^7.1.3"
+rimraf@2:
+ version "2.7.1"
+ resolved "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz"
+ integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==
+ dependencies:
+ glob "^7.1.3"
+
rst-selector-parser@^2.2.3:
version "2.2.3"
resolved "https://registry.npmjs.org/rst-selector-parser/-/rst-selector-parser-2.2.3.tgz"
@@ -10621,15 +10913,20 @@ safe-array-concat@^1.0.0, safe-array-concat@^1.0.1:
has-symbols "^1.0.3"
isarray "^2.0.5"
-safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1:
+safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@>=5.1.0, safe-buffer@~5.2.0, safe-buffer@5.2.1:
+ version "5.2.1"
+ resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz"
+ integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
+
+safe-buffer@~5.1.0, safe-buffer@~5.1.1:
version "5.1.2"
resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz"
integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
-safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.2.0:
- version "5.2.1"
- resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz"
- integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
+safe-buffer@5.1.2:
+ version "5.1.2"
+ resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz"
+ integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
safe-json-parse@~1.0.1:
version "1.0.1"
@@ -10652,7 +10949,7 @@ safe-regex@^1.1.0:
dependencies:
ret "~0.1.10"
-"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0:
+safer-buffer@^2.0.2, safer-buffer@^2.1.0, "safer-buffer@>= 2.1.2 < 3", safer-buffer@~2.1.0:
version "2.1.2"
resolved "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz"
integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==
@@ -10678,15 +10975,6 @@ scheduler@^0.20.2:
loose-envify "^1.1.0"
object-assign "^4.1.1"
-schema-utils@2.7.0:
- version "2.7.0"
- resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz"
- integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==
- dependencies:
- "@types/json-schema" "^7.0.4"
- ajv "^6.12.2"
- ajv-keywords "^3.4.1"
-
schema-utils@^2.6.5:
version "2.7.1"
resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz"
@@ -10696,7 +10984,25 @@ schema-utils@^2.6.5:
ajv "^6.12.4"
ajv-keywords "^3.5.2"
-schema-utils@^3.0.0, schema-utils@^3.1.1, schema-utils@^3.2.0:
+schema-utils@^3.0.0:
+ version "3.3.0"
+ resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz"
+ integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==
+ dependencies:
+ "@types/json-schema" "^7.0.8"
+ ajv "^6.12.5"
+ ajv-keywords "^3.5.2"
+
+schema-utils@^3.1.1:
+ version "3.3.0"
+ resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz"
+ integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==
+ dependencies:
+ "@types/json-schema" "^7.0.8"
+ ajv "^6.12.5"
+ ajv-keywords "^3.5.2"
+
+schema-utils@^3.2.0:
version "3.3.0"
resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz"
integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==
@@ -10715,6 +11021,20 @@ schema-utils@^4.0.0:
ajv-formats "^2.1.1"
ajv-keywords "^5.1.0"
+schema-utils@2.7.0:
+ version "2.7.0"
+ resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz"
+ integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==
+ dependencies:
+ "@types/json-schema" "^7.0.4"
+ ajv "^6.12.2"
+ ajv-keywords "^3.4.1"
+
+"search-insights@>= 1 < 3":
+ version "2.7.0"
+ resolved "https://registry.npmjs.org/search-insights/-/search-insights-2.7.0.tgz"
+ integrity sha512-GLbVaGgzYEKMvuJbHRhLi1qoBFnjXZGZ6l4LxOYPCp4lI2jDRB3jPU9/XNhMwv6kvnA9slTreq6pvK+b3o3aqg==
+
section-matter@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz"
@@ -10761,12 +11081,42 @@ semver-truncate@^1.1.2:
dependencies:
semver "^5.3.0"
-"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.6.0, semver@^5.7.0, semver@^5.7.1:
+semver@^5.3.0:
version "5.7.2"
resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz"
integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==
-semver@^6.0.0, semver@^6.2.0, semver@^6.3.0, semver@^6.3.1:
+semver@^5.4.1:
+ version "5.7.2"
+ resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz"
+ integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==
+
+semver@^5.5.0:
+ version "5.7.2"
+ resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz"
+ integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==
+
+semver@^5.6.0, semver@^5.7.0, semver@^5.7.1:
+ version "5.7.2"
+ resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz"
+ integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==
+
+semver@^6.0.0:
+ version "6.3.1"
+ resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz"
+ integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==
+
+semver@^6.2.0:
+ version "6.3.1"
+ resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz"
+ integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==
+
+semver@^6.3.0:
+ version "6.3.1"
+ resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz"
+ integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==
+
+semver@^6.3.1:
version "6.3.1"
resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz"
integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==
@@ -10778,6 +11128,11 @@ semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.3.8, semve
dependencies:
lru-cache "^6.0.0"
+"semver@2 || 3 || 4 || 5":
+ version "5.7.2"
+ resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz"
+ integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==
+
send@0.18.0:
version "0.18.0"
resolved "https://registry.npmjs.org/send/-/send-0.18.0.tgz"
@@ -10904,6 +11259,20 @@ shallowequal@^1.1.0:
resolved "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz"
integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==
+sharp@*, sharp@^0.32.6:
+ version "0.32.6"
+ resolved "https://registry.npmjs.org/sharp/-/sharp-0.32.6.tgz"
+ integrity sha512-KyLTWwgcR9Oe4d9HwCwNM2l7+J0dUQwn/yf7S0EnTtb0eVS4RxO0eUSvxPtzT4F3SY+C4K6fqdv/DO27sJ/v/w==
+ dependencies:
+ color "^4.2.3"
+ detect-libc "^2.0.2"
+ node-addon-api "^6.1.0"
+ prebuild-install "^7.1.1"
+ semver "^7.5.4"
+ simple-get "^4.0.1"
+ tar-fs "^3.0.4"
+ tunnel-agent "^0.6.0"
+
sharp@^0.30.7:
version "0.30.7"
resolved "https://registry.npmjs.org/sharp/-/sharp-0.30.7.tgz"
@@ -10918,20 +11287,6 @@ sharp@^0.30.7:
tar-fs "^2.1.1"
tunnel-agent "^0.6.0"
-sharp@^0.32.6:
- version "0.32.6"
- resolved "https://registry.npmjs.org/sharp/-/sharp-0.32.6.tgz"
- integrity sha512-KyLTWwgcR9Oe4d9HwCwNM2l7+J0dUQwn/yf7S0EnTtb0eVS4RxO0eUSvxPtzT4F3SY+C4K6fqdv/DO27sJ/v/w==
- dependencies:
- color "^4.2.3"
- detect-libc "^2.0.2"
- node-addon-api "^6.1.0"
- prebuild-install "^7.1.1"
- semver "^7.5.4"
- simple-get "^4.0.1"
- tar-fs "^3.0.4"
- tunnel-agent "^0.6.0"
-
shebang-command@^1.2.0:
version "1.2.0"
resolved "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz"
@@ -10956,16 +11311,16 @@ shebang-regex@^3.0.0:
resolved "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz"
integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==
-shell-quote@1.7.2:
- version "1.7.2"
- resolved "https://registry.npmjs.org/shell-quote/-/shell-quote-1.7.2.tgz"
- integrity sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg==
-
shell-quote@^1.7.3:
version "1.8.1"
resolved "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz"
integrity sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==
+shell-quote@1.7.2:
+ version "1.7.2"
+ resolved "https://registry.npmjs.org/shell-quote/-/shell-quote-1.7.2.tgz"
+ integrity sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg==
+
shelljs@^0.8.4, shelljs@^0.8.5:
version "0.8.5"
resolved "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz"
@@ -11153,7 +11508,12 @@ source-map-url@^0.4.0:
resolved "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz"
integrity sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==
-source-map@^0.5.0, source-map@^0.5.6:
+source-map@^0.5.0:
+ version "0.5.7"
+ resolved "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz"
+ integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==
+
+source-map@^0.5.6:
version "0.5.7"
resolved "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz"
integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==
@@ -11271,16 +11631,16 @@ static-extend@^0.1.1:
define-property "^0.2.5"
object-copy "^0.1.0"
-statuses@2.0.1:
- version "2.0.1"
- resolved "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz"
- integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==
-
"statuses@>= 1.4.0 < 2":
version "1.5.0"
resolved "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz"
integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==
+statuses@2.0.1:
+ version "2.0.1"
+ resolved "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz"
+ integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==
+
std-env@^3.0.1:
version "3.3.3"
resolved "https://registry.npmjs.org/std-env/-/std-env-3.3.3.tgz"
@@ -11299,12 +11659,58 @@ strict-uri-encode@^1.0.0:
resolved "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz"
integrity sha512-R3f198pcvnB+5IpnBlRkphuE9n46WyVl8I39W/ZUTZLz4nqSP/oLYUrcnJrw462Ds8he4YKMov2efsTIw1BDGQ==
+string_decoder@^1.1.1:
+ version "1.3.0"
+ resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz"
+ integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
+ dependencies:
+ safe-buffer "~5.2.0"
+
+string_decoder@~1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz"
+ integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
+ dependencies:
+ safe-buffer "~5.1.0"
+
+string_decoder@0.10:
+ version "0.10.31"
+ resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz"
+ integrity sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==
+
string-template@~0.2.1:
version "0.2.1"
resolved "https://registry.npmjs.org/string-template/-/string-template-0.2.1.tgz"
integrity sha512-Yptehjogou2xm4UJbxJ4CxgZx12HBfeystp0y3x7s4Dj32ltVVG1Gg8YhKjHZkHicuKpZX/ffilA8505VbUbpw==
-"string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3:
+"string-width@^1.0.2 || 2 || 3 || 4":
+ version "4.2.3"
+ resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
+ integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
+ dependencies:
+ emoji-regex "^8.0.0"
+ is-fullwidth-code-point "^3.0.0"
+ strip-ansi "^6.0.1"
+
+string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.2:
+ version "4.2.3"
+ resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
+ integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
+ dependencies:
+ emoji-regex "^8.0.0"
+ is-fullwidth-code-point "^3.0.0"
+ strip-ansi "^6.0.1"
+
+string-width@^4.2.0:
+ version "4.2.3"
+ resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
+ integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
+ dependencies:
+ emoji-regex "^8.0.0"
+ is-fullwidth-code-point "^3.0.0"
+ strip-ansi "^6.0.1"
+
+string-width@^4.2.3:
version "4.2.3"
resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
@@ -11349,25 +11755,6 @@ string.prototype.trimstart@^1.0.7:
define-properties "^1.2.0"
es-abstract "^1.22.1"
-string_decoder@0.10:
- version "0.10.31"
- resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz"
- integrity sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==
-
-string_decoder@^1.1.1:
- version "1.3.0"
- resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz"
- integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
- dependencies:
- safe-buffer "~5.2.0"
-
-string_decoder@~1.1.1:
- version "1.1.1"
- resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz"
- integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
- dependencies:
- safe-buffer "~5.1.0"
-
stringify-object@^3.3.0:
version "3.3.0"
resolved "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz"
@@ -11377,13 +11764,6 @@ stringify-object@^3.3.0:
is-obj "^1.0.1"
is-regexp "^1.0.0"
-strip-ansi@6.0.0:
- version "6.0.0"
- resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz"
- integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==
- dependencies:
- ansi-regex "^5.0.0"
-
strip-ansi@^3.0.0:
version "3.0.1"
resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz"
@@ -11405,6 +11785,13 @@ strip-ansi@^7.0.1:
dependencies:
ansi-regex "^6.0.1"
+strip-ansi@6.0.0:
+ version "6.0.0"
+ resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz"
+ integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==
+ dependencies:
+ ansi-regex "^5.0.0"
+
strip-bom-string@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz"
@@ -11468,7 +11855,7 @@ strnum@^1.0.5:
resolved "https://registry.npmjs.org/strnum/-/strnum-1.0.5.tgz"
integrity sha512-J8bbNyKKXl5qYcR36TIO8W3mVGVHrmmxsd5PAItGkmyzwJvybiw2IVq5nqd0i4LSNSkB/sx9VHllbfFdr9k1JA==
-style-to-object@0.3.0, style-to-object@^0.3.0:
+style-to-object@^0.3.0, style-to-object@0.3.0:
version "0.3.0"
resolved "https://registry.npmjs.org/style-to-object/-/style-to-object-0.3.0.tgz"
integrity sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA==
@@ -11528,7 +11915,26 @@ svg-parser@^2.0.4:
resolved "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz"
integrity sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==
-svgo@^1.0.0, svgo@^1.3.2:
+svgo@^1.0.0:
+ version "1.3.2"
+ resolved "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz"
+ integrity sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==
+ dependencies:
+ chalk "^2.4.1"
+ coa "^2.0.2"
+ css-select "^2.0.0"
+ css-select-base-adapter "^0.1.1"
+ css-tree "1.0.0-alpha.37"
+ csso "^4.0.2"
+ js-yaml "^3.13.1"
+ mkdirp "~0.5.1"
+ object.values "^1.1.0"
+ sax "~1.2.4"
+ stable "^0.1.8"
+ unquote "~1.1.1"
+ util.promisify "~1.0.0"
+
+svgo@^1.3.2:
version "1.3.2"
resolved "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz"
integrity sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==
@@ -11664,11 +12070,16 @@ terser@^5.10.0, terser@^5.16.8:
commander "^2.20.0"
source-map-support "~0.5.20"
-text-table@0.2.0, text-table@^0.2.0:
+text-table@^0.2.0, text-table@0.2.0:
version "0.2.0"
resolved "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz"
integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==
+through@^2.3.8:
+ version "2.3.8"
+ resolved "https://registry.npmjs.org/through/-/through-2.3.8.tgz"
+ integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==
+
through2@^2.0.0:
version "2.0.5"
resolved "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz"
@@ -11677,11 +12088,6 @@ through2@^2.0.0:
readable-stream "~2.3.6"
xtend "~4.0.1"
-through@^2.3.8:
- version "2.3.8"
- resolved "https://registry.npmjs.org/through/-/through-2.3.8.tgz"
- integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==
-
thunky@^1.0.2:
version "1.1.0"
resolved "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz"
@@ -11944,6 +12350,11 @@ typedarray@^0.0.6:
resolved "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz"
integrity sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==
+"typescript@>= 2.7":
+ version "5.1.6"
+ resolved "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz"
+ integrity sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==
+
ua-parser-js@^1.0.35:
version "1.0.35"
resolved "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.35.tgz"
@@ -11998,10 +12409,10 @@ unicode-property-aliases-ecmascript@^2.0.0:
resolved "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz"
integrity sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==
-unified@9.2.0:
- version "9.2.0"
- resolved "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz"
- integrity sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==
+unified@^9.0.0, unified@^9.2.2:
+ version "9.2.2"
+ resolved "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz"
+ integrity sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==
dependencies:
bail "^1.0.0"
extend "^3.0.0"
@@ -12010,10 +12421,10 @@ unified@9.2.0:
trough "^1.0.0"
vfile "^4.0.0"
-unified@^9.0.0, unified@^9.2.2:
- version "9.2.2"
- resolved "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz"
- integrity sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==
+unified@9.2.0:
+ version "9.2.0"
+ resolved "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz"
+ integrity sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==
dependencies:
bail "^1.0.0"
extend "^3.0.0"
@@ -12049,7 +12460,7 @@ unique-string@^2.0.0:
dependencies:
crypto-random-string "^2.0.0"
-unist-builder@2.0.3, unist-builder@^2.0.0:
+unist-builder@^2.0.0, unist-builder@2.0.3:
version "2.0.3"
resolved "https://registry.npmjs.org/unist-builder/-/unist-builder-2.0.3.tgz"
integrity sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw==
@@ -12105,7 +12516,7 @@ unist-util-visit-parents@^3.0.0:
"@types/unist" "^2.0.0"
unist-util-is "^4.0.0"
-unist-util-visit@2.0.3, unist-util-visit@^2.0.0, unist-util-visit@^2.0.3:
+unist-util-visit@^2.0.0, unist-util-visit@^2.0.3, unist-util-visit@2.0.3:
version "2.0.3"
resolved "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz"
integrity sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==
@@ -12119,7 +12530,7 @@ universalify@^2.0.0:
resolved "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz"
integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==
-unpipe@1.0.0, unpipe@~1.0.0:
+unpipe@~1.0.0, unpipe@1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz"
integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==
@@ -12278,7 +12689,12 @@ utils-merge@1.0.1:
resolved "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz"
integrity sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==
-uuid@^3.0.1, uuid@^3.3.2:
+uuid@^3.0.1:
+ version "3.4.0"
+ resolved "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz"
+ integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==
+
+uuid@^3.3.2:
version "3.4.0"
resolved "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz"
integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==
@@ -12407,7 +12823,7 @@ webpack-bundle-analyzer@^4.5.0:
webpack-dev-middleware@^5.3.1:
version "5.3.4"
- resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz#eb7b39281cbce10e104eb2b8bf2b63fce49a3517"
+ resolved "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz"
integrity sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==
dependencies:
colorette "^2.0.10"
@@ -12465,7 +12881,7 @@ webpack-sources@^3.2.2, webpack-sources@^3.2.3:
resolved "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz"
integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==
-webpack@^5.73.0:
+"webpack@^4.0.0 || ^5.0.0", "webpack@^4.37.0 || ^5.0.0", webpack@^5.0.0, webpack@^5.1.0, webpack@^5.20.0, webpack@^5.73.0, "webpack@>= 4", webpack@>=2, "webpack@>=4.41.1 || 5.x", "webpack@3 || 4 || 5":
version "5.88.2"
resolved "https://registry.npmjs.org/webpack/-/webpack-5.88.2.tgz"
integrity sha512-JmcgNZ1iKj+aiR0OvTYtWQqJwq37Pf683dY9bVORwVbUrDhLhdn/PlO2sHsFHPkj7sHNQF3JwaAkp49V+Sq1tQ==
@@ -12505,7 +12921,7 @@ webpackbar@^5.0.2:
pretty-time "^1.1.0"
std-env "^3.0.1"
-websocket-driver@>=0.5.1, websocket-driver@^0.7.4:
+websocket-driver@^0.7.4, websocket-driver@>=0.5.1:
version "0.7.4"
resolved "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz"
integrity sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==
@@ -12558,7 +12974,14 @@ which-typed-array@^1.1.11, which-typed-array@^1.1.13:
gopd "^1.0.1"
has-tostringtag "^1.0.0"
-which@^1.2.9, which@^1.3.1:
+which@^1.2.9:
+ version "1.3.1"
+ resolved "https://registry.npmjs.org/which/-/which-1.3.1.tgz"
+ integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==
+ dependencies:
+ isexe "^2.0.0"
+
+which@^1.3.1:
version "1.3.1"
resolved "https://registry.npmjs.org/which/-/which-1.3.1.tgz"
integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==
diff --git a/enterprise/enterprise_callbacks/example_logging_api.py b/enterprise/enterprise_callbacks/example_logging_api.py
index 57ea99a674..c3d3f5e63f 100644
--- a/enterprise/enterprise_callbacks/example_logging_api.py
+++ b/enterprise/enterprise_callbacks/example_logging_api.py
@@ -18,10 +18,6 @@ async def log_event(request: Request):
return {"message": "Request received successfully"}
except Exception as e:
- print(f"Error processing request: {str(e)}")
- import traceback
-
- traceback.print_exc()
raise HTTPException(status_code=500, detail="Internal Server Error")
diff --git a/enterprise/enterprise_callbacks/generic_api_callback.py b/enterprise/enterprise_callbacks/generic_api_callback.py
index cf1d22e8f8..ba189b149c 100644
--- a/enterprise/enterprise_callbacks/generic_api_callback.py
+++ b/enterprise/enterprise_callbacks/generic_api_callback.py
@@ -120,6 +120,5 @@ class GenericAPILogger:
)
return response
except Exception as e:
- traceback.print_exc()
- verbose_logger.debug(f"Generic - {str(e)}\n{traceback.format_exc()}")
+ verbose_logger.error(f"Generic - {str(e)}\n{traceback.format_exc()}")
pass
diff --git a/enterprise/enterprise_hooks/banned_keywords.py b/enterprise/enterprise_hooks/banned_keywords.py
index acd390d798..4cf68b2fd9 100644
--- a/enterprise/enterprise_hooks/banned_keywords.py
+++ b/enterprise/enterprise_hooks/banned_keywords.py
@@ -82,7 +82,7 @@ class _ENTERPRISE_BannedKeywords(CustomLogger):
except HTTPException as e:
raise e
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(traceback.format_exc())
async def async_post_call_success_hook(
self,
diff --git a/enterprise/enterprise_hooks/blocked_user_list.py b/enterprise/enterprise_hooks/blocked_user_list.py
index cbc14d2c2b..8e642a026f 100644
--- a/enterprise/enterprise_hooks/blocked_user_list.py
+++ b/enterprise/enterprise_hooks/blocked_user_list.py
@@ -118,4 +118,4 @@ class _ENTERPRISE_BlockedUserList(CustomLogger):
except HTTPException as e:
raise e
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(traceback.format_exc())
diff --git a/enterprise/enterprise_hooks/llm_guard.py b/enterprise/enterprise_hooks/llm_guard.py
index 3a15ca52b9..9db10cf79c 100644
--- a/enterprise/enterprise_hooks/llm_guard.py
+++ b/enterprise/enterprise_hooks/llm_guard.py
@@ -92,7 +92,7 @@ class _ENTERPRISE_LLMGuard(CustomLogger):
},
)
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(traceback.format_exc())
raise e
def should_proceed(self, user_api_key_dict: UserAPIKeyAuth, data: dict) -> bool:
diff --git a/litellm/__init__.py b/litellm/__init__.py
index 9fb614396e..b6e6d97dc8 100644
--- a/litellm/__init__.py
+++ b/litellm/__init__.py
@@ -5,7 +5,7 @@ warnings.filterwarnings("ignore", message=".*conflict with protected namespace.*
### INIT VARIABLES ###
import threading, requests, os
from typing import Callable, List, Optional, Dict, Union, Any, Literal
-from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
+from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.caching import Cache
from litellm._logging import (
set_verbose,
@@ -60,6 +60,7 @@ _async_failure_callback: List[Callable] = (
pre_call_rules: List[Callable] = []
post_call_rules: List[Callable] = []
turn_off_message_logging: Optional[bool] = False
+redact_messages_in_exceptions: Optional[bool] = False
store_audit_logs = False # Enterprise feature, allow users to see audit logs
## end of callbacks #############
@@ -233,6 +234,7 @@ max_end_user_budget: Optional[float] = None
#### RELIABILITY ####
request_timeout: float = 6000
module_level_aclient = AsyncHTTPHandler(timeout=request_timeout)
+module_level_client = HTTPHandler(timeout=request_timeout)
num_retries: Optional[int] = None # per model endpoint
default_fallbacks: Optional[List] = None
fallbacks: Optional[List] = None
@@ -766,7 +768,7 @@ from .llms.sagemaker import SagemakerConfig
from .llms.ollama import OllamaConfig
from .llms.ollama_chat import OllamaChatConfig
from .llms.maritalk import MaritTalkConfig
-from .llms.bedrock_httpx import AmazonCohereChatConfig
+from .llms.bedrock_httpx import AmazonCohereChatConfig, AmazonConverseConfig
from .llms.bedrock import (
AmazonTitanConfig,
AmazonAI21Config,
@@ -808,6 +810,7 @@ from .exceptions import (
APIConnectionError,
APIResponseValidationError,
UnprocessableEntityError,
+ InternalServerError,
LITELLM_EXCEPTION_TYPES,
)
from .budget_manager import BudgetManager
diff --git a/litellm/_logging.py b/litellm/_logging.py
index 1ff6e45ddb..ab7a08f976 100644
--- a/litellm/_logging.py
+++ b/litellm/_logging.py
@@ -1,5 +1,6 @@
import logging, os, json
from logging import Formatter
+import traceback
set_verbose = False
json_logs = bool(os.getenv("JSON_LOGS", False))
diff --git a/litellm/caching.py b/litellm/caching.py
index c8c1736d86..d1f3387ee4 100644
--- a/litellm/caching.py
+++ b/litellm/caching.py
@@ -253,7 +253,6 @@ class RedisCache(BaseCache):
str(e),
value,
)
- traceback.print_exc()
raise e
async def async_scan_iter(self, pattern: str, count: int = 100) -> list:
@@ -313,7 +312,6 @@ class RedisCache(BaseCache):
str(e),
value,
)
- traceback.print_exc()
key = self.check_and_fix_namespace(key=key)
async with _redis_client as redis_client:
@@ -352,7 +350,6 @@ class RedisCache(BaseCache):
str(e),
value,
)
- traceback.print_exc()
async def async_set_cache_pipeline(self, cache_list, ttl=None):
"""
@@ -413,7 +410,6 @@ class RedisCache(BaseCache):
str(e),
cache_value,
)
- traceback.print_exc()
async def batch_cache_write(self, key, value, **kwargs):
print_verbose(
@@ -458,7 +454,6 @@ class RedisCache(BaseCache):
str(e),
value,
)
- traceback.print_exc()
raise e
async def flush_cache_buffer(self):
@@ -495,8 +490,9 @@ class RedisCache(BaseCache):
return self._get_cache_logic(cached_response=cached_response)
except Exception as e:
# NON blocking - notify users Redis is throwing an exception
- traceback.print_exc()
- logging.debug("LiteLLM Caching: get() - Got exception from REDIS: ", e)
+ verbose_logger.error(
+ "LiteLLM Caching: get() - Got exception from REDIS: ", e
+ )
def batch_get_cache(self, key_list) -> dict:
"""
@@ -646,10 +642,9 @@ class RedisCache(BaseCache):
error=e,
call_type="sync_ping",
)
- print_verbose(
+ verbose_logger.error(
f"LiteLLM Redis Cache PING: - Got exception from REDIS : {str(e)}"
)
- traceback.print_exc()
raise e
async def ping(self) -> bool:
@@ -683,10 +678,9 @@ class RedisCache(BaseCache):
call_type="async_ping",
)
)
- print_verbose(
+ verbose_logger.error(
f"LiteLLM Redis Cache PING: - Got exception from REDIS : {str(e)}"
)
- traceback.print_exc()
raise e
async def delete_cache_keys(self, keys):
@@ -1138,22 +1132,23 @@ class S3Cache(BaseCache):
cached_response = ast.literal_eval(cached_response)
if type(cached_response) is not dict:
cached_response = dict(cached_response)
- print_verbose(
+ verbose_logger.debug(
f"Got S3 Cache: key: {key}, cached_response {cached_response}. Type Response {type(cached_response)}"
)
return cached_response
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "NoSuchKey":
- print_verbose(
+ verbose_logger.error(
f"S3 Cache: The specified key '{key}' does not exist in the S3 bucket."
)
return None
except Exception as e:
# NON blocking - notify users S3 is throwing an exception
- traceback.print_exc()
- print_verbose(f"S3 Caching: get_cache() - Got exception from S3: {e}")
+ verbose_logger.error(
+ f"S3 Caching: get_cache() - Got exception from S3: {e}"
+ )
async def async_get_cache(self, key, **kwargs):
return self.get_cache(key=key, **kwargs)
@@ -1234,8 +1229,7 @@ class DualCache(BaseCache):
return result
except Exception as e:
- print_verbose(f"LiteLLM Cache: Excepton async add_cache: {str(e)}")
- traceback.print_exc()
+ verbose_logger.error(f"LiteLLM Cache: Excepton async add_cache: {str(e)}")
raise e
def get_cache(self, key, local_only: bool = False, **kwargs):
@@ -1262,7 +1256,7 @@ class DualCache(BaseCache):
print_verbose(f"get cache: cache result: {result}")
return result
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(traceback.format_exc())
def batch_get_cache(self, keys: list, local_only: bool = False, **kwargs):
try:
@@ -1295,7 +1289,7 @@ class DualCache(BaseCache):
print_verbose(f"async batch get cache: cache result: {result}")
return result
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(traceback.format_exc())
async def async_get_cache(self, key, local_only: bool = False, **kwargs):
# Try to fetch from in-memory cache first
@@ -1328,7 +1322,7 @@ class DualCache(BaseCache):
print_verbose(f"get cache: cache result: {result}")
return result
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(traceback.format_exc())
async def async_batch_get_cache(
self, keys: list, local_only: bool = False, **kwargs
@@ -1368,7 +1362,7 @@ class DualCache(BaseCache):
return result
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(traceback.format_exc())
async def async_set_cache(self, key, value, local_only: bool = False, **kwargs):
print_verbose(
@@ -1381,8 +1375,8 @@ class DualCache(BaseCache):
if self.redis_cache is not None and local_only == False:
await self.redis_cache.async_set_cache(key, value, **kwargs)
except Exception as e:
- print_verbose(f"LiteLLM Cache: Excepton async add_cache: {str(e)}")
- traceback.print_exc()
+ verbose_logger.error(f"LiteLLM Cache: Excepton async add_cache: {str(e)}")
+ verbose_logger.debug(traceback.format_exc())
async def async_batch_set_cache(
self, cache_list: list, local_only: bool = False, **kwargs
@@ -1404,8 +1398,8 @@ class DualCache(BaseCache):
cache_list=cache_list, ttl=kwargs.get("ttl", None)
)
except Exception as e:
- print_verbose(f"LiteLLM Cache: Excepton async add_cache: {str(e)}")
- traceback.print_exc()
+ verbose_logger.error(f"LiteLLM Cache: Excepton async add_cache: {str(e)}")
+ verbose_logger.debug(traceback.format_exc())
async def async_increment_cache(
self, key, value: float, local_only: bool = False, **kwargs
@@ -1429,8 +1423,8 @@ class DualCache(BaseCache):
return result
except Exception as e:
- print_verbose(f"LiteLLM Cache: Excepton async add_cache: {str(e)}")
- traceback.print_exc()
+ verbose_logger.error(f"LiteLLM Cache: Excepton async add_cache: {str(e)}")
+ verbose_logger.debug(traceback.format_exc())
raise e
def flush_cache(self):
@@ -1846,8 +1840,8 @@ class Cache:
)
self.cache.set_cache(cache_key, cached_data, **kwargs)
except Exception as e:
- print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}")
- traceback.print_exc()
+ verbose_logger.error(f"LiteLLM Cache: Excepton add_cache: {str(e)}")
+ verbose_logger.debug(traceback.format_exc())
pass
async def async_add_cache(self, result, *args, **kwargs):
@@ -1864,8 +1858,8 @@ class Cache:
)
await self.cache.async_set_cache(cache_key, cached_data, **kwargs)
except Exception as e:
- print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}")
- traceback.print_exc()
+ verbose_logger.error(f"LiteLLM Cache: Excepton add_cache: {str(e)}")
+ verbose_logger.debug(traceback.format_exc())
async def async_add_cache_pipeline(self, result, *args, **kwargs):
"""
@@ -1897,8 +1891,8 @@ class Cache:
)
await asyncio.gather(*tasks)
except Exception as e:
- print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}")
- traceback.print_exc()
+ verbose_logger.error(f"LiteLLM Cache: Excepton add_cache: {str(e)}")
+ verbose_logger.debug(traceback.format_exc())
async def batch_cache_write(self, result, *args, **kwargs):
cache_key, cached_data, kwargs = self._add_cache_logic(
diff --git a/litellm/exceptions.py b/litellm/exceptions.py
index f84cf31668..484e843b6d 100644
--- a/litellm/exceptions.py
+++ b/litellm/exceptions.py
@@ -638,6 +638,7 @@ LITELLM_EXCEPTION_TYPES = [
APIConnectionError,
APIResponseValidationError,
OpenAIError,
+ InternalServerError,
]
diff --git a/litellm/integrations/aispend.py b/litellm/integrations/aispend.py
index 2fe8ea0dfa..ca284e62e5 100644
--- a/litellm/integrations/aispend.py
+++ b/litellm/integrations/aispend.py
@@ -169,6 +169,5 @@ class AISpendLogger:
print_verbose(f"AISpend Logging - final data object: {data}")
except:
- # traceback.print_exc()
print_verbose(f"AISpend Logging Error - {traceback.format_exc()}")
pass
diff --git a/litellm/integrations/berrispend.py b/litellm/integrations/berrispend.py
index 7d30b706c8..d428fb54d8 100644
--- a/litellm/integrations/berrispend.py
+++ b/litellm/integrations/berrispend.py
@@ -178,6 +178,5 @@ class BerriSpendLogger:
print_verbose(f"BerriSpend Logging - final data object: {data}")
response = requests.post(url, headers=headers, json=data)
except:
- # traceback.print_exc()
print_verbose(f"BerriSpend Logging Error - {traceback.format_exc()}")
pass
diff --git a/litellm/integrations/clickhouse.py b/litellm/integrations/clickhouse.py
index 0c38b86267..f8b6b1bbf0 100644
--- a/litellm/integrations/clickhouse.py
+++ b/litellm/integrations/clickhouse.py
@@ -297,6 +297,5 @@ class ClickhouseLogger:
# make request to endpoint with payload
verbose_logger.debug(f"Clickhouse Logger - final response = {response}")
except Exception as e:
- traceback.print_exc()
verbose_logger.debug(f"Clickhouse - {str(e)}\n{traceback.format_exc()}")
pass
diff --git a/litellm/integrations/custom_logger.py b/litellm/integrations/custom_logger.py
index e192cdaea7..1d447da1f2 100644
--- a/litellm/integrations/custom_logger.py
+++ b/litellm/integrations/custom_logger.py
@@ -115,7 +115,6 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
)
print_verbose(f"Custom Logger - model call details: {kwargs}")
except:
- traceback.print_exc()
print_verbose(f"Custom Logger Error - {traceback.format_exc()}")
async def async_log_input_event(
@@ -130,7 +129,6 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
)
print_verbose(f"Custom Logger - model call details: {kwargs}")
except:
- traceback.print_exc()
print_verbose(f"Custom Logger Error - {traceback.format_exc()}")
def log_event(
@@ -146,7 +144,6 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
end_time,
)
except:
- # traceback.print_exc()
print_verbose(f"Custom Logger Error - {traceback.format_exc()}")
pass
@@ -163,6 +160,5 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
end_time,
)
except:
- # traceback.print_exc()
print_verbose(f"Custom Logger Error - {traceback.format_exc()}")
pass
diff --git a/litellm/integrations/datadog.py b/litellm/integrations/datadog.py
index 6d5e08faff..d835b3d670 100644
--- a/litellm/integrations/datadog.py
+++ b/litellm/integrations/datadog.py
@@ -134,7 +134,6 @@ class DataDogLogger:
f"Datadog Layer Logging - final response object: {response_obj}"
)
except Exception as e:
- traceback.print_exc()
verbose_logger.debug(
f"Datadog Layer Error - {str(e)}\n{traceback.format_exc()}"
)
diff --git a/litellm/integrations/dynamodb.py b/litellm/integrations/dynamodb.py
index 21ccabe4b7..847f930ece 100644
--- a/litellm/integrations/dynamodb.py
+++ b/litellm/integrations/dynamodb.py
@@ -85,6 +85,5 @@ class DyanmoDBLogger:
)
return response
except:
- traceback.print_exc()
print_verbose(f"DynamoDB Layer Error - {traceback.format_exc()}")
pass
diff --git a/litellm/integrations/helicone.py b/litellm/integrations/helicone.py
index 85e73258ea..8ea18a7d5b 100644
--- a/litellm/integrations/helicone.py
+++ b/litellm/integrations/helicone.py
@@ -112,6 +112,5 @@ class HeliconeLogger:
)
print_verbose(f"Helicone Logging - Error {response.text}")
except:
- # traceback.print_exc()
print_verbose(f"Helicone Logging Error - {traceback.format_exc()}")
pass
diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py
index 80d90f38aa..7fe2e9f227 100644
--- a/litellm/integrations/langfuse.py
+++ b/litellm/integrations/langfuse.py
@@ -220,9 +220,11 @@ class LangFuseLogger:
verbose_logger.info(f"Langfuse Layer Logging - logging success")
return {"trace_id": trace_id, "generation_id": generation_id}
- except:
- traceback.print_exc()
- verbose_logger.debug(f"Langfuse Layer Error - {traceback.format_exc()}")
+ except Exception as e:
+ verbose_logger.error(
+ "Langfuse Layer Error(): Exception occured - {}".format(str(e))
+ )
+ verbose_logger.debug(traceback.format_exc())
return {"trace_id": None, "generation_id": None}
async def _async_log_event(
diff --git a/litellm/integrations/langsmith.py b/litellm/integrations/langsmith.py
index 3e25b4ee77..48185afeea 100644
--- a/litellm/integrations/langsmith.py
+++ b/litellm/integrations/langsmith.py
@@ -44,7 +44,9 @@ class LangsmithLogger:
print_verbose(
f"Langsmith Logging - project_name: {project_name}, run_name {run_name}"
)
- langsmith_base_url = os.getenv("LANGSMITH_BASE_URL", "https://api.smith.langchain.com")
+ langsmith_base_url = os.getenv(
+ "LANGSMITH_BASE_URL", "https://api.smith.langchain.com"
+ )
try:
print_verbose(
@@ -89,9 +91,7 @@ class LangsmithLogger:
}
url = f"{langsmith_base_url}/runs"
- print_verbose(
- f"Langsmith Logging - About to send data to {url} ..."
- )
+ print_verbose(f"Langsmith Logging - About to send data to {url} ...")
response = requests.post(
url=url,
json=data,
@@ -106,6 +106,5 @@ class LangsmithLogger:
f"Langsmith Layer Logging - final response object: {response_obj}"
)
except:
- # traceback.print_exc()
print_verbose(f"Langsmith Layer Error - {traceback.format_exc()}")
pass
diff --git a/litellm/integrations/logfire_logger.py b/litellm/integrations/logfire_logger.py
index e27d848fb4..b4ab00820e 100644
--- a/litellm/integrations/logfire_logger.py
+++ b/litellm/integrations/logfire_logger.py
@@ -171,7 +171,6 @@ class LogfireLogger:
f"Logfire Layer Logging - final response object: {response_obj}"
)
except Exception as e:
- traceback.print_exc()
verbose_logger.debug(
f"Logfire Layer Error - {str(e)}\n{traceback.format_exc()}"
)
diff --git a/litellm/integrations/lunary.py b/litellm/integrations/lunary.py
index 2e16e44a14..141ea64884 100644
--- a/litellm/integrations/lunary.py
+++ b/litellm/integrations/lunary.py
@@ -14,6 +14,7 @@ def parse_usage(usage):
"prompt": usage["prompt_tokens"] if "prompt_tokens" in usage else 0,
}
+
def parse_tool_calls(tool_calls):
if tool_calls is None:
return None
@@ -26,13 +27,13 @@ def parse_tool_calls(tool_calls):
"function": {
"name": tool_call.function.name,
"arguments": tool_call.function.arguments,
- }
+ },
}
return serialized
-
+
return [clean_tool_call(tool_call) for tool_call in tool_calls]
-
+
def parse_messages(input):
@@ -176,6 +177,5 @@ class LunaryLogger:
)
except:
- # traceback.print_exc()
print_verbose(f"Lunary Logging Error - {traceback.format_exc()}")
pass
diff --git a/litellm/integrations/opentelemetry.py b/litellm/integrations/opentelemetry.py
index 3fc50848e8..aa67839141 100644
--- a/litellm/integrations/opentelemetry.py
+++ b/litellm/integrations/opentelemetry.py
@@ -14,8 +14,11 @@ if TYPE_CHECKING:
else:
Span = Any
-LITELLM_TRACER_NAME = "litellm"
-LITELLM_RESOURCE = {"service.name": "litellm"}
+
+LITELLM_TRACER_NAME = os.getenv("OTEL_TRACER_NAME", "litellm")
+LITELLM_RESOURCE = {
+ "service.name": os.getenv("OTEL_SERVICE_NAME", "litellm"),
+}
@dataclass
diff --git a/litellm/integrations/prometheus.py b/litellm/integrations/prometheus.py
index 6fbc6ca4ce..af0d1d310b 100644
--- a/litellm/integrations/prometheus.py
+++ b/litellm/integrations/prometheus.py
@@ -109,8 +109,8 @@ class PrometheusLogger:
end_user_id, user_api_key, model, user_api_team, user_id
).inc()
except Exception as e:
- traceback.print_exc()
- verbose_logger.debug(
- f"prometheus Layer Error - {str(e)}\n{traceback.format_exc()}"
+ verbose_logger.error(
+ "prometheus Layer Error(): Exception occured - {}".format(str(e))
)
+ verbose_logger.debug(traceback.format_exc())
pass
diff --git a/litellm/integrations/s3.py b/litellm/integrations/s3.py
index d131e44f0e..0796d1048b 100644
--- a/litellm/integrations/s3.py
+++ b/litellm/integrations/s3.py
@@ -180,6 +180,5 @@ class S3Logger:
print_verbose(f"s3 Layer Logging - final response object: {response_obj}")
return response
except Exception as e:
- traceback.print_exc()
verbose_logger.debug(f"s3 Layer Error - {str(e)}\n{traceback.format_exc()}")
pass
diff --git a/litellm/integrations/slack_alerting.py b/litellm/integrations/slack_alerting.py
index c98d60f1fd..21415fb6d6 100644
--- a/litellm/integrations/slack_alerting.py
+++ b/litellm/integrations/slack_alerting.py
@@ -326,8 +326,8 @@ class SlackAlerting(CustomLogger):
end_time=end_time,
)
)
- if litellm.turn_off_message_logging:
- messages = "Message not logged. `litellm.turn_off_message_logging=True`."
+ if litellm.turn_off_message_logging or litellm.redact_messages_in_exceptions:
+ messages = "Message not logged. litellm.redact_messages_in_exceptions=True"
request_info = f"\nRequest Model: `{model}`\nAPI Base: `{api_base}`\nMessages: `{messages}`"
slow_message = f"`Responses are slow - {round(time_difference_float,2)}s response time > Alerting threshold: {self.alerting_threshold}s`"
if time_difference_float > self.alerting_threshold:
@@ -567,9 +567,12 @@ class SlackAlerting(CustomLogger):
except:
messages = ""
- if litellm.turn_off_message_logging:
+ if (
+ litellm.turn_off_message_logging
+ or litellm.redact_messages_in_exceptions
+ ):
messages = (
- "Message not logged. `litellm.turn_off_message_logging=True`."
+ "Message not logged. litellm.redact_messages_in_exceptions=True"
)
request_info = f"\nRequest Model: `{model}`\nMessages: `{messages}`"
else:
diff --git a/litellm/integrations/supabase.py b/litellm/integrations/supabase.py
index 4e6bf517f3..7309342e4c 100644
--- a/litellm/integrations/supabase.py
+++ b/litellm/integrations/supabase.py
@@ -110,6 +110,5 @@ class Supabase:
)
except:
- # traceback.print_exc()
print_verbose(f"Supabase Logging Error - {traceback.format_exc()}")
pass
diff --git a/litellm/integrations/weights_biases.py b/litellm/integrations/weights_biases.py
index a56233b22f..1ac535c4f2 100644
--- a/litellm/integrations/weights_biases.py
+++ b/litellm/integrations/weights_biases.py
@@ -217,6 +217,5 @@ class WeightsBiasesLogger:
f"W&B Logging Logging - final response object: {response_obj}"
)
except:
- # traceback.print_exc()
print_verbose(f"W&B Logging Layer Error - {traceback.format_exc()}")
pass
diff --git a/litellm/llms/bedrock_httpx.py b/litellm/llms/bedrock_httpx.py
index dbd7e7c695..59945a5857 100644
--- a/litellm/llms/bedrock_httpx.py
+++ b/litellm/llms/bedrock_httpx.py
@@ -38,6 +38,8 @@ from .prompt_templates.factory import (
extract_between_tags,
parse_xml_params,
contains_tag,
+ _bedrock_converse_messages_pt,
+ _bedrock_tools_pt,
)
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from .base import BaseLLM
@@ -45,6 +47,11 @@ import httpx # type: ignore
from .bedrock import BedrockError, convert_messages_to_prompt, ModelResponseIterator
from litellm.types.llms.bedrock import *
import urllib.parse
+from litellm.types.llms.openai import (
+ ChatCompletionResponseMessage,
+ ChatCompletionToolCallChunk,
+ ChatCompletionToolCallFunctionChunk,
+)
class AmazonCohereChatConfig:
@@ -118,6 +125,8 @@ class AmazonCohereChatConfig:
"presence_penalty",
"seed",
"stop",
+ "tools",
+ "tool_choice",
]
def map_openai_params(
@@ -176,6 +185,37 @@ async def make_call(
return completion_stream
+def make_sync_call(
+ client: Optional[HTTPHandler],
+ api_base: str,
+ headers: dict,
+ data: str,
+ model: str,
+ messages: list,
+ logging_obj,
+):
+ if client is None:
+ client = HTTPHandler() # Create a new client if none provided
+
+ response = client.post(api_base, headers=headers, data=data, stream=True)
+
+ if response.status_code != 200:
+ raise BedrockError(status_code=response.status_code, message=response.read())
+
+ decoder = AWSEventStreamDecoder(model=model)
+ completion_stream = decoder.iter_bytes(response.iter_bytes(chunk_size=1024))
+
+ # LOGGING
+ logging_obj.post_call(
+ input=messages,
+ api_key="",
+ original_response=completion_stream, # Pass the completion stream for logging
+ additional_args={"complete_input_dict": data},
+ )
+
+ return completion_stream
+
+
class BedrockLLM(BaseLLM):
"""
Example call
@@ -1000,12 +1040,12 @@ class BedrockLLM(BaseLLM):
if isinstance(timeout, float) or isinstance(timeout, int):
timeout = httpx.Timeout(timeout)
_params["timeout"] = timeout
- self.client = AsyncHTTPHandler(**_params) # type: ignore
+ client = AsyncHTTPHandler(**_params) # type: ignore
else:
- self.client = client # type: ignore
+ client = client # type: ignore
try:
- response = await self.client.post(api_base, headers=headers, data=data) # type: ignore
+ response = await client.post(api_base, headers=headers, data=data) # type: ignore
response.raise_for_status()
except httpx.HTTPStatusError as err:
error_code = err.response.status_code
@@ -1069,6 +1109,745 @@ class BedrockLLM(BaseLLM):
return super().embedding(*args, **kwargs)
+class AmazonConverseConfig:
+ """
+ Reference - https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html
+ #2 - https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features
+ """
+
+ maxTokens: Optional[int]
+ stopSequences: Optional[List[str]]
+ temperature: Optional[int]
+ topP: Optional[int]
+
+ def __init__(
+ self,
+ maxTokens: Optional[int] = None,
+ stopSequences: Optional[List[str]] = None,
+ temperature: Optional[int] = None,
+ topP: Optional[int] = None,
+ ) -> None:
+ locals_ = locals()
+ for key, value in locals_.items():
+ if key != "self" and value is not None:
+ setattr(self.__class__, key, value)
+
+ @classmethod
+ def get_config(cls):
+ return {
+ k: v
+ for k, v in cls.__dict__.items()
+ if not k.startswith("__")
+ and not isinstance(
+ v,
+ (
+ types.FunctionType,
+ types.BuiltinFunctionType,
+ classmethod,
+ staticmethod,
+ ),
+ )
+ and v is not None
+ }
+
+ def get_supported_openai_params(self, model: str) -> List[str]:
+ supported_params = [
+ "max_tokens",
+ "stream",
+ "stream_options",
+ "stop",
+ "temperature",
+ "top_p",
+ "extra_headers",
+ ]
+
+ if (
+ model.startswith("anthropic")
+ or model.startswith("mistral")
+ or model.startswith("cohere")
+ ):
+ supported_params.append("tools")
+
+ if model.startswith("anthropic") or model.startswith("mistral"):
+ # only anthropic and mistral support tool choice config. otherwise (E.g. cohere) will fail the call - https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ToolChoice.html
+ supported_params.append("tool_choice")
+
+ return supported_params
+
+ def map_tool_choice_values(
+ self, model: str, tool_choice: Union[str, dict], drop_params: bool
+ ) -> Optional[ToolChoiceValuesBlock]:
+ if tool_choice == "none":
+ if litellm.drop_params is True or drop_params is True:
+ return None
+ else:
+ raise litellm.utils.UnsupportedParamsError(
+ message="Bedrock doesn't support tool_choice={}. To drop it from the call, set `litellm.drop_params = True.".format(
+ tool_choice
+ ),
+ status_code=400,
+ )
+ elif tool_choice == "required":
+ return ToolChoiceValuesBlock(any={})
+ elif tool_choice == "auto":
+ return ToolChoiceValuesBlock(auto={})
+ elif isinstance(tool_choice, dict):
+ # only supported for anthropic + mistral models - https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ToolChoice.html
+ specific_tool = SpecificToolChoiceBlock(
+ name=tool_choice.get("function", {}).get("name", "")
+ )
+ return ToolChoiceValuesBlock(tool=specific_tool)
+ else:
+ raise litellm.utils.UnsupportedParamsError(
+ message="Bedrock doesn't support tool_choice={}. Supported tool_choice values=['auto', 'required', json object]. To drop it from the call, set `litellm.drop_params = True.".format(
+ tool_choice
+ ),
+ status_code=400,
+ )
+
+ def get_supported_image_types(self) -> List[str]:
+ return ["png", "jpeg", "gif", "webp"]
+
+ def map_openai_params(
+ self,
+ model: str,
+ non_default_params: dict,
+ optional_params: dict,
+ drop_params: bool,
+ ) -> dict:
+ for param, value in non_default_params.items():
+ if param == "max_tokens":
+ optional_params["maxTokens"] = value
+ if param == "stream":
+ optional_params["stream"] = value
+ if param == "stop":
+ if isinstance(value, str):
+ value = [value]
+ optional_params["stop_sequences"] = value
+ if param == "temperature":
+ optional_params["temperature"] = value
+ if param == "top_p":
+ optional_params["topP"] = value
+ if param == "tools":
+ optional_params["tools"] = value
+ if param == "tool_choice":
+ _tool_choice_value = self.map_tool_choice_values(
+ model=model, tool_choice=value, drop_params=drop_params # type: ignore
+ )
+ if _tool_choice_value is not None:
+ optional_params["tool_choice"] = _tool_choice_value
+ return optional_params
+
+
+class BedrockConverseLLM(BaseLLM):
+ def __init__(self) -> None:
+ super().__init__()
+
+ def process_response(
+ self,
+ model: str,
+ response: Union[requests.Response, httpx.Response],
+ model_response: ModelResponse,
+ stream: bool,
+ logging_obj: Logging,
+ optional_params: dict,
+ api_key: str,
+ data: Union[dict, str],
+ messages: List,
+ print_verbose,
+ encoding,
+ ) -> Union[ModelResponse, CustomStreamWrapper]:
+
+ ## LOGGING
+ logging_obj.post_call(
+ input=messages,
+ api_key=api_key,
+ original_response=response.text,
+ additional_args={"complete_input_dict": data},
+ )
+ print_verbose(f"raw model_response: {response.text}")
+
+ ## RESPONSE OBJECT
+ try:
+ completion_response = ConverseResponseBlock(**response.json()) # type: ignore
+ except Exception as e:
+ raise BedrockError(
+ message="Received={}, Error converting to valid response block={}. File an issue if litellm error - https://github.com/BerriAI/litellm/issues".format(
+ response.text, str(e)
+ ),
+ status_code=422,
+ )
+
+ """
+ Bedrock Response Object has optional message block
+
+ completion_response["output"].get("message", None)
+
+ A message block looks like this (Example 1):
+ "output": {
+ "message": {
+ "role": "assistant",
+ "content": [
+ {
+ "text": "Is there anything else you'd like to talk about? Perhaps I can help with some economic questions or provide some information about economic concepts?"
+ }
+ ]
+ }
+ },
+ (Example 2):
+ "output": {
+ "message": {
+ "role": "assistant",
+ "content": [
+ {
+ "toolUse": {
+ "toolUseId": "tooluse_hbTgdi0CSLq_hM4P8csZJA",
+ "name": "top_song",
+ "input": {
+ "sign": "WZPZ"
+ }
+ }
+ }
+ ]
+ }
+ }
+
+ """
+ message: Optional[MessageBlock] = completion_response["output"]["message"]
+ chat_completion_message: ChatCompletionResponseMessage = {"role": "assistant"}
+ content_str = ""
+ tools: List[ChatCompletionToolCallChunk] = []
+ if message is not None:
+ for content in message["content"]:
+ """
+ - Content is either a tool response or text
+ """
+ if "text" in content:
+ content_str += content["text"]
+ if "toolUse" in content:
+ _function_chunk = ChatCompletionToolCallFunctionChunk(
+ name=content["toolUse"]["name"],
+ arguments=json.dumps(content["toolUse"]["input"]),
+ )
+ _tool_response_chunk = ChatCompletionToolCallChunk(
+ id=content["toolUse"]["toolUseId"],
+ type="function",
+ function=_function_chunk,
+ )
+ tools.append(_tool_response_chunk)
+ chat_completion_message["content"] = content_str
+ chat_completion_message["tool_calls"] = tools
+
+ ## CALCULATING USAGE - bedrock returns usage in the headers
+ input_tokens = completion_response["usage"]["inputTokens"]
+ output_tokens = completion_response["usage"]["outputTokens"]
+ total_tokens = completion_response["usage"]["totalTokens"]
+
+ model_response.choices = [
+ litellm.Choices(
+ finish_reason=map_finish_reason(completion_response["stopReason"]),
+ index=0,
+ message=litellm.Message(**chat_completion_message),
+ )
+ ]
+ model_response["created"] = int(time.time())
+ model_response["model"] = model
+ usage = Usage(
+ prompt_tokens=input_tokens,
+ completion_tokens=output_tokens,
+ total_tokens=total_tokens,
+ )
+ setattr(model_response, "usage", usage)
+
+ return model_response
+
+ def encode_model_id(self, model_id: str) -> str:
+ """
+ Double encode the model ID to ensure it matches the expected double-encoded format.
+ Args:
+ model_id (str): The model ID to encode.
+ Returns:
+ str: The double-encoded model ID.
+ """
+ return urllib.parse.quote(model_id, safe="")
+
+ def get_credentials(
+ self,
+ aws_access_key_id: Optional[str] = None,
+ aws_secret_access_key: Optional[str] = None,
+ aws_region_name: Optional[str] = None,
+ aws_session_name: Optional[str] = None,
+ aws_profile_name: Optional[str] = None,
+ aws_role_name: Optional[str] = None,
+ aws_web_identity_token: Optional[str] = None,
+ ):
+ """
+ Return a boto3.Credentials object
+ """
+ import boto3
+
+ ## CHECK IS 'os.environ/' passed in
+ params_to_check: List[Optional[str]] = [
+ aws_access_key_id,
+ aws_secret_access_key,
+ aws_region_name,
+ aws_session_name,
+ aws_profile_name,
+ aws_role_name,
+ aws_web_identity_token,
+ ]
+
+ # Iterate over parameters and update if needed
+ for i, param in enumerate(params_to_check):
+ if param and param.startswith("os.environ/"):
+ _v = get_secret(param)
+ if _v is not None and isinstance(_v, str):
+ params_to_check[i] = _v
+ # Assign updated values back to parameters
+ (
+ aws_access_key_id,
+ aws_secret_access_key,
+ aws_region_name,
+ aws_session_name,
+ aws_profile_name,
+ aws_role_name,
+ aws_web_identity_token,
+ ) = params_to_check
+
+ ### CHECK STS ###
+ if (
+ aws_web_identity_token is not None
+ and aws_role_name is not None
+ and aws_session_name is not None
+ ):
+ oidc_token = get_secret(aws_web_identity_token)
+
+ if oidc_token is None:
+ raise BedrockError(
+ message="OIDC token could not be retrieved from secret manager.",
+ status_code=401,
+ )
+
+ sts_client = boto3.client("sts")
+
+ # https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html
+ # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sts/client/assume_role_with_web_identity.html
+ sts_response = sts_client.assume_role_with_web_identity(
+ RoleArn=aws_role_name,
+ RoleSessionName=aws_session_name,
+ WebIdentityToken=oidc_token,
+ DurationSeconds=3600,
+ )
+
+ session = boto3.Session(
+ aws_access_key_id=sts_response["Credentials"]["AccessKeyId"],
+ aws_secret_access_key=sts_response["Credentials"]["SecretAccessKey"],
+ aws_session_token=sts_response["Credentials"]["SessionToken"],
+ region_name=aws_region_name,
+ )
+
+ return session.get_credentials()
+ elif aws_role_name is not None and aws_session_name is not None:
+ sts_client = boto3.client(
+ "sts",
+ aws_access_key_id=aws_access_key_id, # [OPTIONAL]
+ aws_secret_access_key=aws_secret_access_key, # [OPTIONAL]
+ )
+
+ sts_response = sts_client.assume_role(
+ RoleArn=aws_role_name, RoleSessionName=aws_session_name
+ )
+
+ # Extract the credentials from the response and convert to Session Credentials
+ sts_credentials = sts_response["Credentials"]
+ from botocore.credentials import Credentials
+
+ credentials = Credentials(
+ access_key=sts_credentials["AccessKeyId"],
+ secret_key=sts_credentials["SecretAccessKey"],
+ token=sts_credentials["SessionToken"],
+ )
+ return credentials
+ elif aws_profile_name is not None: ### CHECK SESSION ###
+ # uses auth values from AWS profile usually stored in ~/.aws/credentials
+ client = boto3.Session(profile_name=aws_profile_name)
+
+ return client.get_credentials()
+ else:
+ session = boto3.Session(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ region_name=aws_region_name,
+ )
+
+ return session.get_credentials()
+
+ async def async_streaming(
+ self,
+ model: str,
+ messages: list,
+ api_base: str,
+ model_response: ModelResponse,
+ print_verbose: Callable,
+ data: str,
+ timeout: Optional[Union[float, httpx.Timeout]],
+ encoding,
+ logging_obj,
+ stream,
+ optional_params: dict,
+ litellm_params=None,
+ logger_fn=None,
+ headers={},
+ client: Optional[AsyncHTTPHandler] = None,
+ ) -> CustomStreamWrapper:
+ streaming_response = CustomStreamWrapper(
+ completion_stream=None,
+ make_call=partial(
+ make_call,
+ client=client,
+ api_base=api_base,
+ headers=headers,
+ data=data,
+ model=model,
+ messages=messages,
+ logging_obj=logging_obj,
+ ),
+ model=model,
+ custom_llm_provider="bedrock",
+ logging_obj=logging_obj,
+ )
+ return streaming_response
+
+ async def async_completion(
+ self,
+ model: str,
+ messages: list,
+ api_base: str,
+ model_response: ModelResponse,
+ print_verbose: Callable,
+ data: str,
+ timeout: Optional[Union[float, httpx.Timeout]],
+ encoding,
+ logging_obj,
+ stream,
+ optional_params: dict,
+ litellm_params=None,
+ logger_fn=None,
+ headers={},
+ client: Optional[AsyncHTTPHandler] = None,
+ ) -> Union[ModelResponse, CustomStreamWrapper]:
+ if client is None:
+ _params = {}
+ if timeout is not None:
+ if isinstance(timeout, float) or isinstance(timeout, int):
+ timeout = httpx.Timeout(timeout)
+ _params["timeout"] = timeout
+ client = AsyncHTTPHandler(**_params) # type: ignore
+ else:
+ client = client # type: ignore
+
+ try:
+ response = await client.post(api_base, headers=headers, data=data) # type: ignore
+ response.raise_for_status()
+ except httpx.HTTPStatusError as err:
+ error_code = err.response.status_code
+ raise BedrockError(status_code=error_code, message=err.response.text)
+ except httpx.TimeoutException as e:
+ raise BedrockError(status_code=408, message="Timeout error occurred.")
+
+ return self.process_response(
+ model=model,
+ response=response,
+ model_response=model_response,
+ stream=stream if isinstance(stream, bool) else False,
+ logging_obj=logging_obj,
+ api_key="",
+ data=data,
+ messages=messages,
+ print_verbose=print_verbose,
+ optional_params=optional_params,
+ encoding=encoding,
+ )
+
+ def completion(
+ self,
+ model: str,
+ messages: list,
+ custom_prompt_dict: dict,
+ model_response: ModelResponse,
+ print_verbose: Callable,
+ encoding,
+ logging_obj,
+ optional_params: dict,
+ acompletion: bool,
+ timeout: Optional[Union[float, httpx.Timeout]],
+ litellm_params=None,
+ logger_fn=None,
+ extra_headers: Optional[dict] = None,
+ client: Optional[Union[AsyncHTTPHandler, HTTPHandler]] = None,
+ ):
+ try:
+ import boto3
+
+ from botocore.auth import SigV4Auth
+ from botocore.awsrequest import AWSRequest
+ from botocore.credentials import Credentials
+ except ImportError:
+ raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.")
+
+ ## SETUP ##
+ stream = optional_params.pop("stream", None)
+ modelId = optional_params.pop("model_id", None)
+ if modelId is not None:
+ modelId = self.encode_model_id(model_id=modelId)
+ else:
+ modelId = model
+
+ provider = model.split(".")[0]
+
+ ## CREDENTIALS ##
+ # pop aws_secret_access_key, aws_access_key_id, aws_region_name from kwargs, since completion calls fail with them
+ aws_secret_access_key = optional_params.pop("aws_secret_access_key", None)
+ aws_access_key_id = optional_params.pop("aws_access_key_id", None)
+ aws_region_name = optional_params.pop("aws_region_name", None)
+ aws_role_name = optional_params.pop("aws_role_name", None)
+ aws_session_name = optional_params.pop("aws_session_name", None)
+ aws_profile_name = optional_params.pop("aws_profile_name", None)
+ aws_bedrock_runtime_endpoint = optional_params.pop(
+ "aws_bedrock_runtime_endpoint", None
+ ) # https://bedrock-runtime.{region_name}.amazonaws.com
+ aws_web_identity_token = optional_params.pop("aws_web_identity_token", None)
+
+ ### SET REGION NAME ###
+ if aws_region_name is None:
+ # check env #
+ litellm_aws_region_name = get_secret("AWS_REGION_NAME", None)
+
+ if litellm_aws_region_name is not None and isinstance(
+ litellm_aws_region_name, str
+ ):
+ aws_region_name = litellm_aws_region_name
+
+ standard_aws_region_name = get_secret("AWS_REGION", None)
+ if standard_aws_region_name is not None and isinstance(
+ standard_aws_region_name, str
+ ):
+ aws_region_name = standard_aws_region_name
+
+ if aws_region_name is None:
+ aws_region_name = "us-west-2"
+
+ credentials: Credentials = self.get_credentials(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ aws_region_name=aws_region_name,
+ aws_session_name=aws_session_name,
+ aws_profile_name=aws_profile_name,
+ aws_role_name=aws_role_name,
+ aws_web_identity_token=aws_web_identity_token,
+ )
+
+ ### SET RUNTIME ENDPOINT ###
+ endpoint_url = ""
+ env_aws_bedrock_runtime_endpoint = get_secret("AWS_BEDROCK_RUNTIME_ENDPOINT")
+ if aws_bedrock_runtime_endpoint is not None and isinstance(
+ aws_bedrock_runtime_endpoint, str
+ ):
+ endpoint_url = aws_bedrock_runtime_endpoint
+ elif env_aws_bedrock_runtime_endpoint and isinstance(
+ env_aws_bedrock_runtime_endpoint, str
+ ):
+ endpoint_url = env_aws_bedrock_runtime_endpoint
+ else:
+ endpoint_url = f"https://bedrock-runtime.{aws_region_name}.amazonaws.com"
+
+ if (stream is not None and stream is True) and provider != "ai21":
+ endpoint_url = f"{endpoint_url}/model/{modelId}/converse-stream"
+ else:
+ endpoint_url = f"{endpoint_url}/model/{modelId}/converse"
+
+ sigv4 = SigV4Auth(credentials, "bedrock", aws_region_name)
+
+ # Separate system prompt from rest of message
+ system_prompt_indices = []
+ system_content_blocks: List[SystemContentBlock] = []
+ for idx, message in enumerate(messages):
+ if message["role"] == "system":
+ _system_content_block = SystemContentBlock(text=message["content"])
+ system_content_blocks.append(_system_content_block)
+ system_prompt_indices.append(idx)
+ if len(system_prompt_indices) > 0:
+ for idx in reversed(system_prompt_indices):
+ messages.pop(idx)
+
+ inference_params = copy.deepcopy(optional_params)
+ additional_request_keys = []
+ additional_request_params = {}
+ supported_converse_params = AmazonConverseConfig.__annotations__.keys()
+ supported_tool_call_params = ["tools", "tool_choice"]
+ ## TRANSFORMATION ##
+ # send all model-specific params in 'additional_request_params'
+ for k, v in inference_params.items():
+ if (
+ k not in supported_converse_params
+ and k not in supported_tool_call_params
+ ):
+ additional_request_params[k] = v
+ additional_request_keys.append(k)
+ for key in additional_request_keys:
+ inference_params.pop(key, None)
+
+ bedrock_messages: List[MessageBlock] = _bedrock_converse_messages_pt(
+ messages=messages
+ )
+ bedrock_tools: List[ToolBlock] = _bedrock_tools_pt(
+ inference_params.pop("tools", [])
+ )
+ bedrock_tool_config: Optional[ToolConfigBlock] = None
+ if len(bedrock_tools) > 0:
+ tool_choice_values: ToolChoiceValuesBlock = inference_params.pop(
+ "tool_choice", None
+ )
+ bedrock_tool_config = ToolConfigBlock(
+ tools=bedrock_tools,
+ )
+ if tool_choice_values is not None:
+ bedrock_tool_config["toolChoice"] = tool_choice_values
+
+ _data: RequestObject = {
+ "messages": bedrock_messages,
+ "additionalModelRequestFields": additional_request_params,
+ "system": system_content_blocks,
+ "inferenceConfig": InferenceConfig(**inference_params),
+ }
+ if bedrock_tool_config is not None:
+ _data["toolConfig"] = bedrock_tool_config
+ data = json.dumps(_data)
+ ## COMPLETION CALL
+
+ headers = {"Content-Type": "application/json"}
+ if extra_headers is not None:
+ headers = {"Content-Type": "application/json", **extra_headers}
+ request = AWSRequest(
+ method="POST", url=endpoint_url, data=data, headers=headers
+ )
+ sigv4.add_auth(request)
+ prepped = request.prepare()
+
+ ## LOGGING
+ logging_obj.pre_call(
+ input=messages,
+ api_key="",
+ additional_args={
+ "complete_input_dict": data,
+ "api_base": prepped.url,
+ "headers": prepped.headers,
+ },
+ )
+
+ ### ROUTING (ASYNC, STREAMING, SYNC)
+ if acompletion:
+ if isinstance(client, HTTPHandler):
+ client = None
+ if stream is True and provider != "ai21":
+ return self.async_streaming(
+ model=model,
+ messages=messages,
+ data=data,
+ api_base=prepped.url,
+ model_response=model_response,
+ print_verbose=print_verbose,
+ encoding=encoding,
+ logging_obj=logging_obj,
+ optional_params=optional_params,
+ stream=True,
+ litellm_params=litellm_params,
+ logger_fn=logger_fn,
+ headers=prepped.headers,
+ timeout=timeout,
+ client=client,
+ ) # type: ignore
+ ### ASYNC COMPLETION
+ return self.async_completion(
+ model=model,
+ messages=messages,
+ data=data,
+ api_base=prepped.url,
+ model_response=model_response,
+ print_verbose=print_verbose,
+ encoding=encoding,
+ logging_obj=logging_obj,
+ optional_params=optional_params,
+ stream=stream, # type: ignore
+ litellm_params=litellm_params,
+ logger_fn=logger_fn,
+ headers=prepped.headers,
+ timeout=timeout,
+ client=client,
+ ) # type: ignore
+
+ if (stream is not None and stream is True) and provider != "ai21":
+
+ streaming_response = CustomStreamWrapper(
+ completion_stream=None,
+ make_call=partial(
+ make_sync_call,
+ client=None,
+ api_base=prepped.url,
+ headers=prepped.headers, # type: ignore
+ data=data,
+ model=model,
+ messages=messages,
+ logging_obj=logging_obj,
+ ),
+ model=model,
+ custom_llm_provider="bedrock",
+ logging_obj=logging_obj,
+ )
+
+ ## LOGGING
+ logging_obj.post_call(
+ input=messages,
+ api_key="",
+ original_response=streaming_response,
+ additional_args={"complete_input_dict": data},
+ )
+ return streaming_response
+ ### COMPLETION
+
+ if client is None or isinstance(client, AsyncHTTPHandler):
+ _params = {}
+ if timeout is not None:
+ if isinstance(timeout, float) or isinstance(timeout, int):
+ timeout = httpx.Timeout(timeout)
+ _params["timeout"] = timeout
+ client = HTTPHandler(**_params) # type: ignore
+ else:
+ client = client
+ try:
+ response = client.post(url=prepped.url, headers=prepped.headers, data=data) # type: ignore
+ response.raise_for_status()
+ except httpx.HTTPStatusError as err:
+ error_code = err.response.status_code
+ raise BedrockError(status_code=error_code, message=response.text)
+ except httpx.TimeoutException:
+ raise BedrockError(status_code=408, message="Timeout error occurred.")
+
+ return self.process_response(
+ model=model,
+ response=response,
+ model_response=model_response,
+ stream=stream,
+ logging_obj=logging_obj,
+ optional_params=optional_params,
+ api_key="",
+ data=data,
+ messages=messages,
+ print_verbose=print_verbose,
+ encoding=encoding,
+ )
+
+
def get_response_stream_shape():
from botocore.model import ServiceModel
from botocore.loaders import Loader
@@ -1086,6 +1865,31 @@ class AWSEventStreamDecoder:
self.model = model
self.parser = EventStreamJSONParser()
+ def converse_chunk_parser(self, chunk_data: dict) -> GenericStreamingChunk:
+ text = ""
+ tool_str = ""
+ is_finished = False
+ finish_reason = ""
+ usage: Optional[ConverseTokenUsageBlock] = None
+ if "delta" in chunk_data:
+ delta_obj = ContentBlockDeltaEvent(**chunk_data["delta"])
+ if "text" in delta_obj:
+ text = delta_obj["text"]
+ elif "toolUse" in delta_obj:
+ tool_str = delta_obj["toolUse"]["input"]
+ elif "stopReason" in chunk_data:
+ finish_reason = map_finish_reason(chunk_data.get("stopReason", "stop"))
+ elif "usage" in chunk_data:
+ usage = ConverseTokenUsageBlock(**chunk_data["usage"]) # type: ignore
+ response = GenericStreamingChunk(
+ text=text,
+ tool_str=tool_str,
+ is_finished=is_finished,
+ finish_reason=finish_reason,
+ usage=usage,
+ )
+ return response
+
def _chunk_parser(self, chunk_data: dict) -> GenericStreamingChunk:
text = ""
is_finished = False
@@ -1098,19 +1902,8 @@ class AWSEventStreamDecoder:
is_finished = True
finish_reason = "stop"
######## bedrock.anthropic mappings ###############
- elif "completion" in chunk_data: # not claude-3
- text = chunk_data["completion"] # bedrock.anthropic
- stop_reason = chunk_data.get("stop_reason", None)
- if stop_reason != None:
- is_finished = True
- finish_reason = stop_reason
elif "delta" in chunk_data:
- if chunk_data["delta"].get("text", None) is not None:
- text = chunk_data["delta"]["text"]
- stop_reason = chunk_data["delta"].get("stop_reason", None)
- if stop_reason != None:
- is_finished = True
- finish_reason = stop_reason
+ return self.converse_chunk_parser(chunk_data=chunk_data)
######## bedrock.mistral mappings ###############
elif "outputs" in chunk_data:
if (
@@ -1137,11 +1930,11 @@ class AWSEventStreamDecoder:
is_finished = True
finish_reason = chunk_data["completionReason"]
return GenericStreamingChunk(
- **{
- "text": text,
- "is_finished": is_finished,
- "finish_reason": finish_reason,
- }
+ text=text,
+ is_finished=is_finished,
+ finish_reason=finish_reason,
+ tool_str="",
+ usage=None,
)
def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[GenericStreamingChunk]:
@@ -1178,9 +1971,14 @@ class AWSEventStreamDecoder:
parsed_response = self.parser.parse(response_dict, get_response_stream_shape())
if response_dict["status_code"] != 200:
raise ValueError(f"Bad response code, expected 200: {response_dict}")
+ if "chunk" in parsed_response:
+ chunk = parsed_response.get("chunk")
+ if not chunk:
+ return None
+ return chunk.get("bytes").decode() # type: ignore[no-any-return]
+ else:
+ chunk = response_dict.get("body")
+ if not chunk:
+ return None
- chunk = parsed_response.get("chunk")
- if not chunk:
- return None
-
- return chunk.get("bytes").decode() # type: ignore[no-any-return]
+ return chunk.decode() # type: ignore[no-any-return]
diff --git a/litellm/llms/custom_httpx/http_handler.py b/litellm/llms/custom_httpx/http_handler.py
index b91aaee2ae..5ec9c79bb2 100644
--- a/litellm/llms/custom_httpx/http_handler.py
+++ b/litellm/llms/custom_httpx/http_handler.py
@@ -156,12 +156,13 @@ class HTTPHandler:
self,
url: str,
data: Optional[Union[dict, str]] = None,
+ json: Optional[Union[dict, str]] = None,
params: Optional[dict] = None,
headers: Optional[dict] = None,
stream: bool = False,
):
req = self.client.build_request(
- "POST", url, data=data, params=params, headers=headers # type: ignore
+ "POST", url, data=data, json=json, params=params, headers=headers # type: ignore
)
response = self.client.send(req, stream=stream)
return response
diff --git a/litellm/llms/gemini.py b/litellm/llms/gemini.py
index a55b39aef9..cfdf39eca2 100644
--- a/litellm/llms/gemini.py
+++ b/litellm/llms/gemini.py
@@ -1,13 +1,14 @@
-import os, types, traceback, copy, asyncio
-import json
-from enum import Enum
+import types
+import traceback
+import copy
import time
from typing import Callable, Optional
-from litellm.utils import ModelResponse, get_secret, Choices, Message, Usage
+from litellm.utils import ModelResponse, Choices, Message, Usage
import litellm
-import sys, httpx
+import httpx
from .prompt_templates.factory import prompt_factory, custom_prompt, get_system_prompt
from packaging.version import Version
+from litellm import verbose_logger
class GeminiError(Exception):
@@ -264,7 +265,8 @@ def completion(
choices_list.append(choice_obj)
model_response["choices"] = choices_list
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error("LiteLLM.gemini.py: Exception occured - {}".format(str(e)))
+ verbose_logger.debug(traceback.format_exc())
raise GeminiError(
message=traceback.format_exc(), status_code=response.status_code
)
@@ -356,7 +358,8 @@ async def async_completion(
choices_list.append(choice_obj)
model_response["choices"] = choices_list
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error("LiteLLM.gemini.py: Exception occured - {}".format(str(e)))
+ verbose_logger.debug(traceback.format_exc())
raise GeminiError(
message=traceback.format_exc(), status_code=response.status_code
)
diff --git a/litellm/llms/ollama.py b/litellm/llms/ollama.py
index 76687839d8..e7dd1d5f55 100644
--- a/litellm/llms/ollama.py
+++ b/litellm/llms/ollama.py
@@ -7,6 +7,7 @@ import litellm
from litellm.types.utils import ProviderField
import httpx, aiohttp, asyncio # type: ignore
from .prompt_templates.factory import prompt_factory, custom_prompt
+from litellm import verbose_logger
class OllamaError(Exception):
@@ -137,6 +138,7 @@ class OllamaConfig:
)
]
+
def get_supported_openai_params(
self,
):
@@ -151,10 +153,12 @@ class OllamaConfig:
"response_format",
]
+
# ollama wants plain base64 jpeg/png files as images. strip any leading dataURI
# and convert to jpeg if necessary.
def _convert_image(image):
import base64, io
+
try:
from PIL import Image
except:
@@ -404,7 +408,13 @@ async def ollama_async_streaming(url, data, model_response, encoding, logging_ob
async for transformed_chunk in streamwrapper:
yield transformed_chunk
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "LiteLLM.ollama.py::ollama_async_streaming(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
+
raise e
@@ -468,7 +478,12 @@ async def ollama_acompletion(url, data, model_response, encoding, logging_obj):
)
return model_response
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "LiteLLM.ollama.py::ollama_acompletion(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
raise e
diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py
index a058077227..a7439bbcc0 100644
--- a/litellm/llms/ollama_chat.py
+++ b/litellm/llms/ollama_chat.py
@@ -1,11 +1,15 @@
from itertools import chain
-import requests, types, time
-import json, uuid
+import requests
+import types
+import time
+import json
+import uuid
import traceback
from typing import Optional
+from litellm import verbose_logger
import litellm
-import httpx, aiohttp, asyncio
-from .prompt_templates.factory import prompt_factory, custom_prompt
+import httpx
+import aiohttp
class OllamaError(Exception):
@@ -299,7 +303,10 @@ def get_ollama_response(
tool_calls=[
{
"id": f"call_{str(uuid.uuid4())}",
- "function": {"name": function_call["name"], "arguments": json.dumps(function_call["arguments"])},
+ "function": {
+ "name": function_call["name"],
+ "arguments": json.dumps(function_call["arguments"]),
+ },
"type": "function",
}
],
@@ -307,7 +314,9 @@ def get_ollama_response(
model_response["choices"][0]["message"] = message
model_response["choices"][0]["finish_reason"] = "tool_calls"
else:
- model_response["choices"][0]["message"]["content"] = response_json["message"]["content"]
+ model_response["choices"][0]["message"]["content"] = response_json["message"][
+ "content"
+ ]
model_response["created"] = int(time.time())
model_response["model"] = "ollama/" + model
prompt_tokens = response_json.get("prompt_eval_count", litellm.token_counter(messages=messages)) # type: ignore
@@ -361,7 +370,10 @@ def ollama_completion_stream(url, api_key, data, logging_obj):
tool_calls=[
{
"id": f"call_{str(uuid.uuid4())}",
- "function": {"name": function_call["name"], "arguments": json.dumps(function_call["arguments"])},
+ "function": {
+ "name": function_call["name"],
+ "arguments": json.dumps(function_call["arguments"]),
+ },
"type": "function",
}
],
@@ -410,9 +422,10 @@ async def ollama_async_streaming(
first_chunk_content = first_chunk.choices[0].delta.content or ""
response_content = first_chunk_content + "".join(
[
- chunk.choices[0].delta.content
- async for chunk in streamwrapper
- if chunk.choices[0].delta.content]
+ chunk.choices[0].delta.content
+ async for chunk in streamwrapper
+ if chunk.choices[0].delta.content
+ ]
)
function_call = json.loads(response_content)
delta = litellm.utils.Delta(
@@ -420,7 +433,10 @@ async def ollama_async_streaming(
tool_calls=[
{
"id": f"call_{str(uuid.uuid4())}",
- "function": {"name": function_call["name"], "arguments": json.dumps(function_call["arguments"])},
+ "function": {
+ "name": function_call["name"],
+ "arguments": json.dumps(function_call["arguments"]),
+ },
"type": "function",
}
],
@@ -433,7 +449,8 @@ async def ollama_async_streaming(
async for transformed_chunk in streamwrapper:
yield transformed_chunk
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error("LiteLLM.gemini(): Exception occured - {}".format(str(e)))
+ verbose_logger.debug(traceback.format_exc())
async def ollama_acompletion(
@@ -483,7 +500,10 @@ async def ollama_acompletion(
tool_calls=[
{
"id": f"call_{str(uuid.uuid4())}",
- "function": {"name": function_call["name"], "arguments": json.dumps(function_call["arguments"])},
+ "function": {
+ "name": function_call["name"],
+ "arguments": json.dumps(function_call["arguments"]),
+ },
"type": "function",
}
],
@@ -491,7 +511,9 @@ async def ollama_acompletion(
model_response["choices"][0]["message"] = message
model_response["choices"][0]["finish_reason"] = "tool_calls"
else:
- model_response["choices"][0]["message"]["content"] = response_json["message"]["content"]
+ model_response["choices"][0]["message"]["content"] = response_json[
+ "message"
+ ]["content"]
model_response["created"] = int(time.time())
model_response["model"] = "ollama_chat/" + data["model"]
@@ -509,5 +531,9 @@ async def ollama_acompletion(
)
return model_response
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "LiteLLM.ollama_acompletion(): Exception occured - {}".format(str(e))
+ )
+ verbose_logger.debug(traceback.format_exc())
+
raise e
diff --git a/litellm/llms/palm.py b/litellm/llms/palm.py
index f15be43db4..4d9953e77a 100644
--- a/litellm/llms/palm.py
+++ b/litellm/llms/palm.py
@@ -1,11 +1,12 @@
-import os, types, traceback, copy
-import json
-from enum import Enum
+import types
+import traceback
+import copy
import time
from typing import Callable, Optional
-from litellm.utils import ModelResponse, get_secret, Choices, Message, Usage
+from litellm.utils import ModelResponse, Choices, Message, Usage
import litellm
-import sys, httpx
+import httpx
+from litellm import verbose_logger
class PalmError(Exception):
@@ -165,7 +166,10 @@ def completion(
choices_list.append(choice_obj)
model_response["choices"] = choices_list
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.llms.palm.py::completion(): Exception occured - {}".format(str(e))
+ )
+ verbose_logger.debug(traceback.format_exc())
raise PalmError(
message=traceback.format_exc(), status_code=response.status_code
)
diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py
index 10f3f16ed4..6bf03b52d4 100644
--- a/litellm/llms/prompt_templates/factory.py
+++ b/litellm/llms/prompt_templates/factory.py
@@ -3,14 +3,7 @@ import requests, traceback
import json, re, xml.etree.ElementTree as ET
from jinja2 import Template, exceptions, meta, BaseLoader
from jinja2.sandbox import ImmutableSandboxedEnvironment
-from typing import (
- Any,
- List,
- Mapping,
- MutableMapping,
- Optional,
- Sequence,
-)
+from typing import Any, List, Mapping, MutableMapping, Optional, Sequence, Tuple
import litellm
import litellm.types
from litellm.types.completion import (
@@ -24,7 +17,7 @@ from litellm.types.completion import (
import litellm.types.llms
from litellm.types.llms.anthropic import *
import uuid
-
+from litellm.types.llms.bedrock import MessageBlock as BedrockMessageBlock
import litellm.types.llms.vertex_ai
@@ -1460,9 +1453,7 @@ def _load_image_from_url(image_url):
try:
from PIL import Image
except:
- raise Exception(
- "gemini image conversion failed please run `pip install Pillow`"
- )
+ raise Exception("image conversion failed please run `pip install Pillow`")
from io import BytesIO
try:
@@ -1613,6 +1604,380 @@ def azure_text_pt(messages: list):
return prompt
+###### AMAZON BEDROCK #######
+
+from litellm.types.llms.bedrock import (
+ ToolResultContentBlock as BedrockToolResultContentBlock,
+ ToolResultBlock as BedrockToolResultBlock,
+ ToolConfigBlock as BedrockToolConfigBlock,
+ ToolUseBlock as BedrockToolUseBlock,
+ ImageSourceBlock as BedrockImageSourceBlock,
+ ImageBlock as BedrockImageBlock,
+ ContentBlock as BedrockContentBlock,
+ ToolInputSchemaBlock as BedrockToolInputSchemaBlock,
+ ToolSpecBlock as BedrockToolSpecBlock,
+ ToolBlock as BedrockToolBlock,
+ ToolChoiceValuesBlock as BedrockToolChoiceValuesBlock,
+)
+
+
+def get_image_details(image_url) -> Tuple[str, str]:
+ try:
+ import base64
+
+ # Send a GET request to the image URL
+ response = requests.get(image_url)
+ response.raise_for_status() # Raise an exception for HTTP errors
+
+ # Check the response's content type to ensure it is an image
+ content_type = response.headers.get("content-type")
+ if not content_type or "image" not in content_type:
+ raise ValueError(
+ f"URL does not point to a valid image (content-type: {content_type})"
+ )
+
+ # Convert the image content to base64 bytes
+ base64_bytes = base64.b64encode(response.content).decode("utf-8")
+
+ # Get mime-type
+ mime_type = content_type.split("/")[
+ 1
+ ] # Extract mime-type from content-type header
+
+ return base64_bytes, mime_type
+
+ except requests.RequestException as e:
+ raise Exception(f"Request failed: {e}")
+ except Exception as e:
+ raise e
+
+
+def _process_bedrock_converse_image_block(image_url: str) -> BedrockImageBlock:
+ if "base64" in image_url:
+ # Case 1: Images with base64 encoding
+ import base64, re
+
+ # base 64 is passed as data:image/jpeg;base64,
+ image_metadata, img_without_base_64 = image_url.split(",")
+
+ # read mime_type from img_without_base_64=data:image/jpeg;base64
+ # Extract MIME type using regular expression
+ mime_type_match = re.match(r"data:(.*?);base64", image_metadata)
+ if mime_type_match:
+ mime_type = mime_type_match.group(1)
+ image_format = mime_type.split("/")[1]
+ else:
+ mime_type = "image/jpeg"
+ image_format = "jpeg"
+ _blob = BedrockImageSourceBlock(bytes=img_without_base_64)
+ supported_image_formats = (
+ litellm.AmazonConverseConfig().get_supported_image_types()
+ )
+ if image_format in supported_image_formats:
+ return BedrockImageBlock(source=_blob, format=image_format) # type: ignore
+ else:
+ # Handle the case when the image format is not supported
+ raise ValueError(
+ "Unsupported image format: {}. Supported formats: {}".format(
+ image_format, supported_image_formats
+ )
+ )
+ elif "https:/" in image_url:
+ # Case 2: Images with direct links
+ image_bytes, image_format = get_image_details(image_url)
+ _blob = BedrockImageSourceBlock(bytes=image_bytes)
+ supported_image_formats = (
+ litellm.AmazonConverseConfig().get_supported_image_types()
+ )
+ if image_format in supported_image_formats:
+ return BedrockImageBlock(source=_blob, format=image_format) # type: ignore
+ else:
+ # Handle the case when the image format is not supported
+ raise ValueError(
+ "Unsupported image format: {}. Supported formats: {}".format(
+ image_format, supported_image_formats
+ )
+ )
+ else:
+ raise ValueError(
+ "Unsupported image type. Expected either image url or base64 encoded string - \
+ e.g. 'data:image/jpeg;base64,'"
+ )
+
+
+def _convert_to_bedrock_tool_call_invoke(
+ tool_calls: list,
+) -> List[BedrockContentBlock]:
+ """
+ OpenAI tool invokes:
+ {
+ "role": "assistant",
+ "content": null,
+ "tool_calls": [
+ {
+ "id": "call_abc123",
+ "type": "function",
+ "function": {
+ "name": "get_current_weather",
+ "arguments": "{\n\"location\": \"Boston, MA\"\n}"
+ }
+ }
+ ]
+ },
+ """
+ """
+ Bedrock tool invokes:
+ [
+ {
+ "role": "assistant",
+ "toolUse": {
+ "input": {"location": "Boston, MA", ..},
+ "name": "get_current_weather",
+ "toolUseId": "call_abc123"
+ }
+ }
+ ]
+ """
+ """
+ - json.loads argument
+ - extract name
+ - extract id
+ """
+
+ try:
+ _parts_list: List[BedrockContentBlock] = []
+ for tool in tool_calls:
+ if "function" in tool:
+ id = tool["id"]
+ name = tool["function"].get("name", "")
+ arguments = tool["function"].get("arguments", "")
+ arguments_dict = json.loads(arguments)
+ bedrock_tool = BedrockToolUseBlock(
+ input=arguments_dict, name=name, toolUseId=id
+ )
+ bedrock_content_block = BedrockContentBlock(toolUse=bedrock_tool)
+ _parts_list.append(bedrock_content_block)
+ return _parts_list
+ except Exception as e:
+ raise Exception(
+ "Unable to convert openai tool calls={} to bedrock tool calls. Received error={}".format(
+ tool_calls, str(e)
+ )
+ )
+
+
+def _convert_to_bedrock_tool_call_result(
+ message: dict,
+) -> BedrockMessageBlock:
+ """
+ OpenAI message with a tool result looks like:
+ {
+ "tool_call_id": "tool_1",
+ "role": "tool",
+ "name": "get_current_weather",
+ "content": "function result goes here",
+ },
+
+ OpenAI message with a function call result looks like:
+ {
+ "role": "function",
+ "name": "get_current_weather",
+ "content": "function result goes here",
+ }
+ """
+ """
+ Bedrock result looks like this:
+ {
+ "role": "user",
+ "content": [
+ {
+ "toolResult": {
+ "toolUseId": "tooluse_kZJMlvQmRJ6eAyJE5GIl7Q",
+ "content": [
+ {
+ "json": {
+ "song": "Elemental Hotel",
+ "artist": "8 Storey Hike"
+ }
+ }
+ ]
+ }
+ }
+ ]
+ }
+ """
+ """
+ -
+ """
+ content = message.get("content", "")
+ name = message.get("name", "")
+ id = message.get("tool_call_id", str(uuid.uuid4()))
+
+ tool_result_content_block = BedrockToolResultContentBlock(text=content)
+ tool_result = BedrockToolResultBlock(
+ content=[tool_result_content_block],
+ toolUseId=id,
+ )
+ content_block = BedrockContentBlock(toolResult=tool_result)
+
+ return BedrockMessageBlock(role="user", content=[content_block])
+
+
+def _bedrock_converse_messages_pt(messages: List) -> List[BedrockMessageBlock]:
+ """
+ Converts given messages from OpenAI format to Bedrock format
+
+ - Roles must alternate b/w 'user' and 'model' (same as anthropic -> merge consecutive roles)
+ - Please ensure that function response turn comes immediately after a function call turn
+ """
+
+ contents: List[BedrockMessageBlock] = []
+ msg_i = 0
+ while msg_i < len(messages):
+ user_content: List[BedrockContentBlock] = []
+ init_msg_i = msg_i
+ ## MERGE CONSECUTIVE USER CONTENT ##
+ while msg_i < len(messages) and messages[msg_i]["role"] == "user":
+ if isinstance(messages[msg_i]["content"], list):
+ _parts: List[BedrockContentBlock] = []
+ for element in messages[msg_i]["content"]:
+ if isinstance(element, dict):
+ if element["type"] == "text":
+ _part = BedrockContentBlock(text=element["text"])
+ _parts.append(_part)
+ elif element["type"] == "image_url":
+ image_url = element["image_url"]["url"]
+ _part = _process_bedrock_converse_image_block( # type: ignore
+ image_url=image_url
+ )
+ _parts.append(BedrockContentBlock(image=_part)) # type: ignore
+ user_content.extend(_parts)
+ else:
+ _part = BedrockContentBlock(text=messages[msg_i]["content"])
+ user_content.append(_part)
+
+ msg_i += 1
+
+ if user_content:
+ contents.append(BedrockMessageBlock(role="user", content=user_content))
+ assistant_content: List[BedrockContentBlock] = []
+ ## MERGE CONSECUTIVE ASSISTANT CONTENT ##
+ while msg_i < len(messages) and messages[msg_i]["role"] == "assistant":
+ if isinstance(messages[msg_i]["content"], list):
+ assistants_parts: List[BedrockContentBlock] = []
+ for element in messages[msg_i]["content"]:
+ if isinstance(element, dict):
+ if element["type"] == "text":
+ assistants_part = BedrockContentBlock(text=element["text"])
+ assistants_parts.append(assistants_part)
+ elif element["type"] == "image_url":
+ image_url = element["image_url"]["url"]
+ assistants_part = _process_bedrock_converse_image_block( # type: ignore
+ image_url=image_url
+ )
+ assistants_parts.append(
+ BedrockContentBlock(image=assistants_part) # type: ignore
+ )
+ assistant_content.extend(assistants_parts)
+ elif messages[msg_i].get(
+ "tool_calls", []
+ ): # support assistant tool invoke convertion
+ assistant_content.extend(
+ _convert_to_bedrock_tool_call_invoke(messages[msg_i]["tool_calls"])
+ )
+ else:
+ assistant_text = (
+ messages[msg_i].get("content") or ""
+ ) # either string or none
+ if assistant_text:
+ assistant_content.append(BedrockContentBlock(text=assistant_text))
+
+ msg_i += 1
+
+ if assistant_content:
+ contents.append(
+ BedrockMessageBlock(role="assistant", content=assistant_content)
+ )
+
+ ## APPEND TOOL CALL MESSAGES ##
+ if msg_i < len(messages) and messages[msg_i]["role"] == "tool":
+ tool_call_result = _convert_to_bedrock_tool_call_result(messages[msg_i])
+ contents.append(tool_call_result)
+ msg_i += 1
+ if msg_i == init_msg_i: # prevent infinite loops
+ raise Exception(
+ "Invalid Message passed in - {}. File an issue https://github.com/BerriAI/litellm/issues".format(
+ messages[msg_i]
+ )
+ )
+
+ return contents
+
+
+def _bedrock_tools_pt(tools: List) -> List[BedrockToolBlock]:
+ """
+ OpenAI tools looks like:
+ tools = [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_current_weather",
+ "description": "Get the current weather in a given location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA",
+ },
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
+ },
+ "required": ["location"],
+ },
+ }
+ }
+ ]
+ """
+ """
+ Bedrock toolConfig looks like:
+ "tools": [
+ {
+ "toolSpec": {
+ "name": "top_song",
+ "description": "Get the most popular song played on a radio station.",
+ "inputSchema": {
+ "json": {
+ "type": "object",
+ "properties": {
+ "sign": {
+ "type": "string",
+ "description": "The call sign for the radio station for which you want the most popular song. Example calls signs are WZPZ, and WKRP."
+ }
+ },
+ "required": [
+ "sign"
+ ]
+ }
+ }
+ }
+ }
+ ]
+ """
+ tool_block_list: List[BedrockToolBlock] = []
+ for tool in tools:
+ parameters = tool.get("function", {}).get("parameters", None)
+ name = tool.get("function", {}).get("name", "")
+ description = tool.get("function", {}).get("description", "")
+ tool_input_schema = BedrockToolInputSchemaBlock(json=parameters)
+ tool_spec = BedrockToolSpecBlock(
+ inputSchema=tool_input_schema, name=name, description=description
+ )
+ tool_block = BedrockToolBlock(toolSpec=tool_spec)
+ tool_block_list.append(tool_block)
+
+ return tool_block_list
+
+
# Function call template
def function_call_prompt(messages: list, functions: list):
function_prompt = """Produce JSON OUTPUT ONLY! Adhere to this format {"name": "function_name", "arguments":{"argument_name": "argument_value"}} The following functions are available to you:"""
diff --git a/litellm/llms/vertex_ai.py b/litellm/llms/vertex_ai.py
index dc79e7e4e9..bd9cfaa8d6 100644
--- a/litellm/llms/vertex_ai.py
+++ b/litellm/llms/vertex_ai.py
@@ -12,6 +12,7 @@ from litellm.llms.prompt_templates.factory import (
convert_to_gemini_tool_call_result,
convert_to_gemini_tool_call_invoke,
)
+from litellm.types.files import get_file_mime_type_for_file_type, get_file_type_from_extension, is_gemini_1_5_accepted_file_type, is_video_file_type
class VertexAIError(Exception):
@@ -297,29 +298,31 @@ def _convert_gemini_role(role: str) -> Literal["user", "model"]:
def _process_gemini_image(image_url: str) -> PartType:
try:
- if ".mp4" in image_url and "gs://" in image_url:
- # Case 1: Videos with Cloud Storage URIs
- part_mime = "video/mp4"
- _file_data = FileDataType(mime_type=part_mime, file_uri=image_url)
- return PartType(file_data=_file_data)
- elif ".pdf" in image_url and "gs://" in image_url:
- # Case 2: PDF's with Cloud Storage URIs
- part_mime = "application/pdf"
- _file_data = FileDataType(mime_type=part_mime, file_uri=image_url)
- return PartType(file_data=_file_data)
- elif "gs://" in image_url:
- # Case 3: Images with Cloud Storage URIs
- # The supported MIME types for images include image/png and image/jpeg.
- part_mime = "image/png" if "png" in image_url else "image/jpeg"
- _file_data = FileDataType(mime_type=part_mime, file_uri=image_url)
- return PartType(file_data=_file_data)
+ # GCS URIs
+ if "gs://" in image_url:
+ # Figure out file type
+ extension_with_dot = os.path.splitext(image_url)[-1] # Ex: ".png"
+ extension = extension_with_dot[1:] # Ex: "png"
+
+ file_type = get_file_type_from_extension(extension)
+
+ # Validate the file type is supported by Gemini
+ if not is_gemini_1_5_accepted_file_type(file_type):
+ raise Exception(f"File type not supported by gemini - {file_type}")
+
+ mime_type = get_file_mime_type_for_file_type(file_type)
+ file_data = FileDataType(mime_type=mime_type, file_uri=image_url)
+
+ return PartType(file_data=file_data)
+
+ # Direct links
elif "https:/" in image_url:
- # Case 4: Images with direct links
image = _load_image_from_url(image_url)
_blob = BlobType(data=image.data, mime_type=image._mime_type)
return PartType(inline_data=_blob)
+
+ # Base64 encoding
elif "base64" in image_url:
- # Case 5: Images with base64 encoding
import base64, re
# base 64 is passed as data:image/jpeg;base64,
@@ -426,112 +429,6 @@ def _gemini_convert_messages_with_history(messages: list) -> List[ContentType]:
return contents
-def _gemini_vision_convert_messages(messages: list):
- """
- Converts given messages for GPT-4 Vision to Gemini format.
-
- Args:
- messages (list): The messages to convert. Each message can be a dictionary with a "content" key. The content can be a string or a list of elements. If it is a string, it will be concatenated to the prompt. If it is a list, each element will be processed based on its type:
- - If the element is a dictionary with a "type" key equal to "text", its "text" value will be concatenated to the prompt.
- - If the element is a dictionary with a "type" key equal to "image_url", its "image_url" value will be added to the list of images.
-
- Returns:
- tuple: A tuple containing the prompt (a string) and the processed images (a list of objects representing the images).
-
- Raises:
- VertexAIError: If the import of the 'vertexai' module fails, indicating that 'google-cloud-aiplatform' needs to be installed.
- Exception: If any other exception occurs during the execution of the function.
-
- Note:
- This function is based on the code from the 'gemini/getting-started/intro_gemini_python.ipynb' notebook in the 'generative-ai' repository on GitHub.
- The supported MIME types for images include 'image/png' and 'image/jpeg'.
-
- Examples:
- >>> messages = [
- ... {"content": "Hello, world!"},
- ... {"content": [{"type": "text", "text": "This is a text message."}, {"type": "image_url", "image_url": "example.com/image.png"}]},
- ... ]
- >>> _gemini_vision_convert_messages(messages)
- ('Hello, world!This is a text message.', [, ])
- """
- try:
- import vertexai
- except:
- raise VertexAIError(
- status_code=400,
- message="vertexai import failed please run `pip install google-cloud-aiplatform`",
- )
- try:
- from vertexai.preview.language_models import (
- ChatModel,
- CodeChatModel,
- InputOutputTextPair,
- )
- from vertexai.language_models import TextGenerationModel, CodeGenerationModel
- from vertexai.preview.generative_models import (
- GenerativeModel,
- Part,
- GenerationConfig,
- Image,
- )
-
- # given messages for gpt-4 vision, convert them for gemini
- # https://github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/getting-started/intro_gemini_python.ipynb
- prompt = ""
- images = []
- for message in messages:
- if isinstance(message["content"], str):
- prompt += message["content"]
- elif isinstance(message["content"], list):
- # see https://docs.litellm.ai/docs/providers/openai#openai-vision-models
- for element in message["content"]:
- if isinstance(element, dict):
- if element["type"] == "text":
- prompt += element["text"]
- elif element["type"] == "image_url":
- image_url = element["image_url"]["url"]
- images.append(image_url)
- # processing images passed to gemini
- processed_images = []
- for img in images:
- if "gs://" in img:
- # Case 1: Images with Cloud Storage URIs
- # The supported MIME types for images include image/png and image/jpeg.
- part_mime = "image/png" if "png" in img else "image/jpeg"
- google_clooud_part = Part.from_uri(img, mime_type=part_mime)
- processed_images.append(google_clooud_part)
- elif "https:/" in img:
- # Case 2: Images with direct links
- image = _load_image_from_url(img)
- processed_images.append(image)
- elif ".mp4" in img and "gs://" in img:
- # Case 3: Videos with Cloud Storage URIs
- part_mime = "video/mp4"
- google_clooud_part = Part.from_uri(img, mime_type=part_mime)
- processed_images.append(google_clooud_part)
- elif "base64" in img:
- # Case 4: Images with base64 encoding
- import base64, re
-
- # base 64 is passed as data:image/jpeg;base64,
- image_metadata, img_without_base_64 = img.split(",")
-
- # read mime_type from img_without_base_64=data:image/jpeg;base64
- # Extract MIME type using regular expression
- mime_type_match = re.match(r"data:(.*?);base64", image_metadata)
-
- if mime_type_match:
- mime_type = mime_type_match.group(1)
- else:
- mime_type = "image/jpeg"
- decoded_img = base64.b64decode(img_without_base_64)
- processed_image = Part.from_data(data=decoded_img, mime_type=mime_type)
- processed_images.append(processed_image)
- return prompt, processed_images
- except Exception as e:
- raise e
-
-
def _get_client_cache_key(model: str, vertex_project: str, vertex_location: str):
_cache_key = f"{model}-{vertex_project}-{vertex_location}"
return _cache_key
@@ -647,9 +544,9 @@ def completion(
prompt = " ".join(
[
- message["content"]
+ message.get("content")
for message in messages
- if isinstance(message["content"], str)
+ if isinstance(message.get("content", None), str)
]
)
diff --git a/litellm/main.py b/litellm/main.py
index 596f85f334..2b1712d91b 100644
--- a/litellm/main.py
+++ b/litellm/main.py
@@ -79,7 +79,7 @@ from .llms.anthropic import AnthropicChatCompletion
from .llms.anthropic_text import AnthropicTextCompletion
from .llms.huggingface_restapi import Huggingface
from .llms.predibase import PredibaseChatCompletion
-from .llms.bedrock_httpx import BedrockLLM
+from .llms.bedrock_httpx import BedrockLLM, BedrockConverseLLM
from .llms.vertex_httpx import VertexLLM
from .llms.triton import TritonChatCompletion
from .llms.prompt_templates.factory import (
@@ -122,6 +122,7 @@ huggingface = Huggingface()
predibase_chat_completions = PredibaseChatCompletion()
triton_chat_completions = TritonChatCompletion()
bedrock_chat_completion = BedrockLLM()
+bedrock_converse_chat_completion = BedrockConverseLLM()
vertex_chat_completion = VertexLLM()
####### COMPLETION ENDPOINTS ################
@@ -364,7 +365,10 @@ async def acompletion(
) # sets the logging event loop if the user does sync streaming (e.g. on proxy for sagemaker calls)
return response
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.acompletion(): Exception occured - {}".format(str(e))
+ )
+ verbose_logger.debug(traceback.format_exc())
custom_llm_provider = custom_llm_provider or "openai"
raise exception_type(
model=model,
@@ -477,7 +481,10 @@ def mock_completion(
except Exception as e:
if isinstance(e, openai.APIError):
raise e
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.mock_completion(): Exception occured - {}".format(str(e))
+ )
+ verbose_logger.debug(traceback.format_exc())
raise Exception("Mock completion response failed")
@@ -2100,22 +2107,40 @@ def completion(
logging_obj=logging,
)
else:
- response = bedrock_chat_completion.completion(
- model=model,
- messages=messages,
- custom_prompt_dict=custom_prompt_dict,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- logging_obj=logging,
- extra_headers=extra_headers,
- timeout=timeout,
- acompletion=acompletion,
- client=client,
- )
+ if model.startswith("anthropic"):
+ response = bedrock_converse_chat_completion.completion(
+ model=model,
+ messages=messages,
+ custom_prompt_dict=custom_prompt_dict,
+ model_response=model_response,
+ print_verbose=print_verbose,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
+ logger_fn=logger_fn,
+ encoding=encoding,
+ logging_obj=logging,
+ extra_headers=extra_headers,
+ timeout=timeout,
+ acompletion=acompletion,
+ client=client,
+ )
+ else:
+ response = bedrock_chat_completion.completion(
+ model=model,
+ messages=messages,
+ custom_prompt_dict=custom_prompt_dict,
+ model_response=model_response,
+ print_verbose=print_verbose,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
+ logger_fn=logger_fn,
+ encoding=encoding,
+ logging_obj=logging,
+ extra_headers=extra_headers,
+ timeout=timeout,
+ acompletion=acompletion,
+ client=client,
+ )
if optional_params.get("stream", False):
## LOGGING
logging.post_call(
@@ -4433,7 +4458,10 @@ async def ahealth_check(
response = {} # args like remaining ratelimit etc.
return response
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.ahealth_check(): Exception occured - {}".format(str(e))
+ )
+ verbose_logger.debug(traceback.format_exc())
stack_trace = traceback.format_exc()
if isinstance(stack_trace, str):
stack_trace = stack_trace[:1000]
diff --git a/litellm/proxy/_logging.py b/litellm/proxy/_logging.py
index 22cbd88cb7..f453cef395 100644
--- a/litellm/proxy/_logging.py
+++ b/litellm/proxy/_logging.py
@@ -1,6 +1,7 @@
import json
import logging
from logging import Formatter
+import sys
class JsonFormatter(Formatter):
diff --git a/litellm/proxy/_super_secret_config.yaml b/litellm/proxy/_super_secret_config.yaml
index 72479bd5d5..7fa1bbc19c 100644
--- a/litellm/proxy/_super_secret_config.yaml
+++ b/litellm/proxy/_super_secret_config.yaml
@@ -56,8 +56,10 @@ router_settings:
litellm_settings:
success_callback: ["langfuse"]
- json_logs: true
general_settings:
alerting: ["email"]
+ key_management_system: "aws_kms"
+ key_management_settings:
+ hosted_keys: ["LITELLM_MASTER_KEY"]
diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py
index 05efc7495d..3bdbda7579 100644
--- a/litellm/proxy/_types.py
+++ b/litellm/proxy/_types.py
@@ -955,6 +955,7 @@ class KeyManagementSystem(enum.Enum):
AZURE_KEY_VAULT = "azure_key_vault"
AWS_SECRET_MANAGER = "aws_secret_manager"
LOCAL = "local"
+ AWS_KMS = "aws_kms"
class KeyManagementSettings(LiteLLMBase):
diff --git a/litellm/proxy/auth/auth_checks.py b/litellm/proxy/auth/auth_checks.py
index b9813afc91..17f0822e61 100644
--- a/litellm/proxy/auth/auth_checks.py
+++ b/litellm/proxy/auth/auth_checks.py
@@ -106,6 +106,40 @@ def common_checks(
raise Exception(
f"'user' param not passed in. 'enforce_user_param'={general_settings['enforce_user_param']}"
)
+ if general_settings.get("enforced_params", None) is not None:
+ # Enterprise ONLY Feature
+ # we already validate if user is premium_user when reading the config
+ # Add an extra premium_usercheck here too, just incase
+ from litellm.proxy.proxy_server import premium_user, CommonProxyErrors
+
+ if premium_user is not True:
+ raise ValueError(
+ "Trying to use `enforced_params`"
+ + CommonProxyErrors.not_premium_user.value
+ )
+
+ if route in LiteLLMRoutes.openai_routes.value:
+ # loop through each enforced param
+ # example enforced_params ['user', 'metadata', 'metadata.generation_name']
+ for enforced_param in general_settings["enforced_params"]:
+ _enforced_params = enforced_param.split(".")
+ if len(_enforced_params) == 1:
+ if _enforced_params[0] not in request_body:
+ raise ValueError(
+ f"BadRequest please pass param={_enforced_params[0]} in request body. This is a required param"
+ )
+ elif len(_enforced_params) == 2:
+ # this is a scenario where user requires request['metadata']['generation_name'] to exist
+ if _enforced_params[0] not in request_body:
+ raise ValueError(
+ f"BadRequest please pass param={_enforced_params[0]} in request body. This is a required param"
+ )
+ if _enforced_params[1] not in request_body[_enforced_params[0]]:
+ raise ValueError(
+ f"BadRequest please pass param=[{_enforced_params[0]}][{_enforced_params[1]}] in request body. This is a required param"
+ )
+
+ pass
# 7. [OPTIONAL] If 'litellm.max_budget' is set (>0), is proxy under budget
if (
litellm.max_budget > 0
diff --git a/litellm/proxy/hooks/azure_content_safety.py b/litellm/proxy/hooks/azure_content_safety.py
index 5b5139f8c7..47ba36a683 100644
--- a/litellm/proxy/hooks/azure_content_safety.py
+++ b/litellm/proxy/hooks/azure_content_safety.py
@@ -88,7 +88,7 @@ class _PROXY_AzureContentSafety(
verbose_proxy_logger.debug(
"Error in Azure Content-Safety: %s", traceback.format_exc()
)
- traceback.print_exc()
+ verbose_proxy_logger.debug(traceback.format_exc())
raise
result = self._compute_result(response)
@@ -123,7 +123,12 @@ class _PROXY_AzureContentSafety(
except HTTPException as e:
raise e
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.hooks.azure_content_safety.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
async def async_post_call_success_hook(
self,
diff --git a/litellm/proxy/hooks/batch_redis_get.py b/litellm/proxy/hooks/batch_redis_get.py
index 64541c1bff..d506109b81 100644
--- a/litellm/proxy/hooks/batch_redis_get.py
+++ b/litellm/proxy/hooks/batch_redis_get.py
@@ -94,7 +94,12 @@ class _PROXY_BatchRedisRequests(CustomLogger):
except HTTPException as e:
raise e
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.hooks.batch_redis_get.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
async def async_get_cache(self, *args, **kwargs):
"""
diff --git a/litellm/proxy/hooks/cache_control_check.py b/litellm/proxy/hooks/cache_control_check.py
index 3160fe97ad..89971a0bf7 100644
--- a/litellm/proxy/hooks/cache_control_check.py
+++ b/litellm/proxy/hooks/cache_control_check.py
@@ -1,13 +1,13 @@
# What this does?
## Checks if key is allowed to use the cache controls passed in to the completion() call
-from typing import Optional
import litellm
+from litellm import verbose_logger
from litellm.caching import DualCache
from litellm.proxy._types import UserAPIKeyAuth
from litellm.integrations.custom_logger import CustomLogger
from fastapi import HTTPException
-import json, traceback
+import traceback
class _PROXY_CacheControlCheck(CustomLogger):
@@ -54,4 +54,9 @@ class _PROXY_CacheControlCheck(CustomLogger):
except HTTPException as e:
raise e
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.proxy.hooks.cache_control_check.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
diff --git a/litellm/proxy/hooks/max_budget_limiter.py b/litellm/proxy/hooks/max_budget_limiter.py
index 442cc53e37..c4b328bab0 100644
--- a/litellm/proxy/hooks/max_budget_limiter.py
+++ b/litellm/proxy/hooks/max_budget_limiter.py
@@ -1,10 +1,10 @@
-from typing import Optional
+from litellm import verbose_logger
import litellm
from litellm.caching import DualCache
from litellm.proxy._types import UserAPIKeyAuth
from litellm.integrations.custom_logger import CustomLogger
from fastapi import HTTPException
-import json, traceback
+import traceback
class _PROXY_MaxBudgetLimiter(CustomLogger):
@@ -44,4 +44,9 @@ class _PROXY_MaxBudgetLimiter(CustomLogger):
except HTTPException as e:
raise e
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.proxy.hooks.max_budget_limiter.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
diff --git a/litellm/proxy/hooks/presidio_pii_masking.py b/litellm/proxy/hooks/presidio_pii_masking.py
index 95a6e9c3c8..e64e69c457 100644
--- a/litellm/proxy/hooks/presidio_pii_masking.py
+++ b/litellm/proxy/hooks/presidio_pii_masking.py
@@ -8,8 +8,8 @@
# Tell us how we can improve! - Krrish & Ishaan
-from typing import Optional, Literal, Union
-import litellm, traceback, sys, uuid, json
+from typing import Optional, Union
+import litellm, traceback, uuid, json # noqa: E401
from litellm.caching import DualCache
from litellm.proxy._types import UserAPIKeyAuth
from litellm.integrations.custom_logger import CustomLogger
@@ -21,8 +21,8 @@ from litellm.utils import (
ImageResponse,
StreamingChoices,
)
-from datetime import datetime
-import aiohttp, asyncio
+import aiohttp
+import asyncio
class _OPTIONAL_PresidioPIIMasking(CustomLogger):
@@ -138,7 +138,12 @@ class _OPTIONAL_PresidioPIIMasking(CustomLogger):
else:
raise Exception(f"Invalid anonymizer response: {redacted_text}")
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.hooks.presidio_pii_masking.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
raise e
async def async_pre_call_hook(
diff --git a/litellm/proxy/hooks/prompt_injection_detection.py b/litellm/proxy/hooks/prompt_injection_detection.py
index 08dbedd8c8..ed33e3b519 100644
--- a/litellm/proxy/hooks/prompt_injection_detection.py
+++ b/litellm/proxy/hooks/prompt_injection_detection.py
@@ -204,7 +204,12 @@ class _OPTIONAL_PromptInjectionDetection(CustomLogger):
return e.detail["error"]
raise e
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.hooks.prompt_injection_detection.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
async def async_moderation_hook(
self,
diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml
index 88fc0e9136..df6dfd1394 100644
--- a/litellm/proxy/proxy_config.yaml
+++ b/litellm/proxy/proxy_config.yaml
@@ -21,7 +21,14 @@ model_list:
general_settings:
master_key: sk-1234
+ alerting: ["slack"]
litellm_settings:
callbacks: ["otel"]
- store_audit_logs: true
\ No newline at end of file
+ store_audit_logs: true
+ redact_messages_in_exceptions: True
+ enforced_params:
+ - user
+ - metadata
+ - metadata.generation_name
+
diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py
index 25e46269e1..2071ef63bc 100644
--- a/litellm/proxy/proxy_server.py
+++ b/litellm/proxy/proxy_server.py
@@ -111,6 +111,7 @@ from litellm.proxy.utils import (
encrypt_value,
decrypt_value,
_to_ns,
+ get_error_message_str,
)
from litellm import (
CreateBatchRequest,
@@ -120,7 +121,10 @@ from litellm import (
CreateFileRequest,
)
from litellm.proxy.secret_managers.google_kms import load_google_kms
-from litellm.proxy.secret_managers.aws_secret_manager import load_aws_secret_manager
+from litellm.proxy.secret_managers.aws_secret_manager import (
+ load_aws_secret_manager,
+ load_aws_kms,
+)
import pydantic
from litellm.proxy._types import *
from litellm.caching import DualCache, RedisCache
@@ -133,7 +137,10 @@ from litellm.router import (
AssistantsTypedDict,
)
from litellm.router import ModelInfo as RouterModelInfo
-from litellm._logging import verbose_router_logger, verbose_proxy_logger
+from litellm._logging import (
+ verbose_router_logger,
+ verbose_proxy_logger,
+)
from litellm.proxy.auth.handle_jwt import JWTHandler
from litellm.proxy.auth.litellm_license import LicenseCheck
from litellm.proxy.auth.model_checks import (
@@ -1515,7 +1522,12 @@ async def user_api_key_auth(
else:
raise Exception()
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.user_api_key_auth(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, litellm.BudgetExceededError):
raise ProxyException(
message=e.message, type="auth_error", param=None, code=400
@@ -2782,10 +2794,12 @@ class ProxyConfig:
load_google_kms(use_google_kms=True)
elif (
key_management_system
- == KeyManagementSystem.AWS_SECRET_MANAGER.value
+ == KeyManagementSystem.AWS_SECRET_MANAGER.value # noqa: F405
):
### LOAD FROM AWS SECRET MANAGER ###
load_aws_secret_manager(use_aws_secret_manager=True)
+ elif key_management_system == KeyManagementSystem.AWS_KMS.value:
+ load_aws_kms(use_aws_kms=True)
else:
raise ValueError("Invalid Key Management System selected")
key_management_settings = general_settings.get(
@@ -2819,6 +2833,7 @@ class ProxyConfig:
master_key = general_settings.get(
"master_key", litellm.get_secret("LITELLM_MASTER_KEY", None)
)
+
if master_key and master_key.startswith("os.environ/"):
master_key = litellm.get_secret(master_key)
if not isinstance(master_key, str):
@@ -2909,6 +2924,16 @@ class ProxyConfig:
)
health_check_interval = general_settings.get("health_check_interval", 300)
+ ## check if user has set a premium feature in general_settings
+ if (
+ general_settings.get("enforced_params") is not None
+ and premium_user is not True
+ ):
+ raise ValueError(
+ "Trying to use `enforced_params`"
+ + CommonProxyErrors.not_premium_user.value
+ )
+
router_params: dict = {
"cache_responses": litellm.cache
!= None, # cache if user passed in cache values
@@ -3522,7 +3547,12 @@ async def generate_key_helper_fn(
)
key_data["token_id"] = getattr(create_key_response, "token", None)
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.generate_key_helper_fn(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise e
raise HTTPException(
@@ -3561,7 +3591,12 @@ async def delete_verification_token(tokens: List, user_id: Optional[str] = None)
else:
raise Exception("DB not connected. prisma_client is None")
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.delete_verification_token(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
raise e
return deleted_tokens
@@ -3722,7 +3757,12 @@ async def async_assistants_data_generator(
done_message = "[DONE]"
yield f"data: {done_message}\n\n"
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.async_assistants_data_generator(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict,
original_exception=e,
@@ -3732,9 +3772,6 @@ async def async_assistants_data_generator(
f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`"
)
router_model_names = llm_router.model_names if llm_router is not None else []
- if user_debug:
- traceback.print_exc()
-
if isinstance(e, HTTPException):
raise e
else:
@@ -3774,7 +3811,12 @@ async def async_data_generator(
done_message = "[DONE]"
yield f"data: {done_message}\n\n"
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.async_data_generator(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict,
original_exception=e,
@@ -3784,8 +3826,6 @@ async def async_data_generator(
f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`"
)
router_model_names = llm_router.model_names if llm_router is not None else []
- if user_debug:
- traceback.print_exc()
if isinstance(e, HTTPException):
raise e
@@ -3846,6 +3886,18 @@ def on_backoff(details):
verbose_proxy_logger.debug("Backing off... this was attempt # %s", details["tries"])
+def giveup(e):
+ result = not (
+ isinstance(e, ProxyException)
+ and getattr(e, "message", None) is not None
+ and isinstance(e.message, str)
+ and "Max parallel request limit reached" in e.message
+ )
+ if result:
+ verbose_proxy_logger.info(json.dumps({"event": "giveup", "exception": str(e)}))
+ return result
+
+
@router.on_event("startup")
async def startup_event():
global prisma_client, master_key, use_background_health_checks, llm_router, llm_model_list, general_settings, proxy_budget_rescheduler_min_time, proxy_budget_rescheduler_max_time, litellm_proxy_admin_name, db_writer_client, store_model_in_db
@@ -4130,12 +4182,8 @@ def model_list(
max_tries=litellm.num_retries or 3, # maximum number of retries
max_time=litellm.request_timeout or 60, # maximum total time to retry for
on_backoff=on_backoff, # specifying the function to call on backoff
- giveup=lambda e: not (
- isinstance(e, ProxyException)
- and getattr(e, "message", None) is not None
- and isinstance(e.message, str)
- and "Max parallel request limit reached" in e.message
- ), # the result of the logical expression is on the second position
+ giveup=giveup,
+ logger=verbose_proxy_logger,
)
async def chat_completion(
request: Request,
@@ -4144,6 +4192,7 @@ async def chat_completion(
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
):
global general_settings, user_debug, proxy_logging_obj, llm_model_list
+
data = {}
try:
body = await request.body()
@@ -4434,7 +4483,12 @@ async def chat_completion(
return _chat_response
except Exception as e:
data["litellm_status"] = "fail" # used for alerting
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.chat_completion(): Exception occured - {}".format(
+ get_error_message_str(e=e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
@@ -4445,8 +4499,6 @@ async def chat_completion(
litellm_debug_info,
)
router_model_names = llm_router.model_names if llm_router is not None else []
- if user_debug:
- traceback.print_exc()
if isinstance(e, HTTPException):
raise ProxyException(
@@ -4678,15 +4730,12 @@ async def completion(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- verbose_proxy_logger.debug("EXCEPTION RAISED IN PROXY MAIN.PY")
- litellm_debug_info = getattr(e, "litellm_debug_info", "")
- verbose_proxy_logger.debug(
- "\033[1;31mAn error occurred: %s %s\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`",
- e,
- litellm_debug_info,
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.completion(): Exception occured - {}".format(
+ str(e)
+ )
)
- traceback.print_exc()
- error_traceback = traceback.format_exc()
+ verbose_proxy_logger.debug(traceback.format_exc())
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -4896,7 +4945,12 @@ async def embeddings(
e,
litellm_debug_info,
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.embeddings(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e)),
@@ -5075,7 +5129,12 @@ async def image_generation(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.image_generation(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e)),
@@ -5253,7 +5312,12 @@ async def audio_speech(
)
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.audio_speech(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
raise e
@@ -5442,7 +5506,12 @@ async def audio_transcriptions(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.audio_transcription(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e.detail)),
@@ -5451,7 +5520,6 @@ async def audio_transcriptions(
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
else:
- error_traceback = traceback.format_exc()
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -5579,7 +5647,12 @@ async def get_assistants(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.get_assistants(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e.detail)),
@@ -5588,7 +5661,6 @@ async def get_assistants(
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
else:
- error_traceback = traceback.format_exc()
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -5708,7 +5780,12 @@ async def create_threads(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.create_threads(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e.detail)),
@@ -5717,7 +5794,6 @@ async def create_threads(
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
else:
- error_traceback = traceback.format_exc()
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -5836,7 +5912,12 @@ async def get_thread(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.get_thread(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e.detail)),
@@ -5845,7 +5926,6 @@ async def get_thread(
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
else:
- error_traceback = traceback.format_exc()
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -5967,7 +6047,12 @@ async def add_messages(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.add_messages(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e.detail)),
@@ -5976,7 +6061,6 @@ async def add_messages(
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
else:
- error_traceback = traceback.format_exc()
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -6094,7 +6178,12 @@ async def get_messages(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.get_messages(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e.detail)),
@@ -6103,7 +6192,6 @@ async def get_messages(
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
else:
- error_traceback = traceback.format_exc()
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -6235,7 +6323,12 @@ async def run_thread(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.run_thread(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e.detail)),
@@ -6244,7 +6337,6 @@ async def run_thread(
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
else:
- error_traceback = traceback.format_exc()
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -6383,7 +6475,12 @@ async def create_batch(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.create_batch(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e.detail)),
@@ -6392,7 +6489,6 @@ async def create_batch(
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
else:
- error_traceback = traceback.format_exc()
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -6526,7 +6622,12 @@ async def retrieve_batch(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.retrieve_batch(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e.detail)),
@@ -6679,7 +6780,12 @@ async def create_file(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.create_file(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e.detail)),
@@ -6688,7 +6794,6 @@ async def create_file(
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
else:
- error_traceback = traceback.format_exc()
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -6864,7 +6969,12 @@ async def moderations(
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.moderations(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "message", str(e)),
@@ -6873,7 +6983,6 @@ async def moderations(
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
else:
- error_traceback = traceback.format_exc()
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
@@ -7184,7 +7293,12 @@ async def generate_key_fn(
return GenerateKeyResponse(**response)
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.generate_key_fn(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
@@ -9639,7 +9753,12 @@ async def user_info(
}
return response_data
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.user_info(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
@@ -9734,7 +9853,12 @@ async def user_update(data: UpdateUserRequest):
return response
# update based on remaining passed in values
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.user_update(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
@@ -9787,7 +9911,12 @@ async def user_request_model(request: Request):
return {"status": "success"}
# update based on remaining passed in values
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.user_request_model(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
@@ -9829,7 +9958,12 @@ async def user_get_requests():
return {"requests": response}
# update based on remaining passed in values
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.user_get_requests(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
@@ -10219,7 +10353,12 @@ async def update_end_user(
# update based on remaining passed in values
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.update_end_user(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Internal Server Error({str(e)})"),
@@ -10303,7 +10442,12 @@ async def delete_end_user(
# update based on remaining passed in values
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.delete_end_user(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Internal Server Error({str(e)})"),
@@ -11606,7 +11750,12 @@ async def add_new_model(
return model_response
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.add_new_model(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
@@ -11720,7 +11869,12 @@ async def update_model(
return model_response
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.update_model(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
@@ -13954,7 +14108,12 @@ async def update_config(config_info: ConfigYAML):
return {"message": "Config updated successfully"}
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.update_config(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
@@ -14427,7 +14586,12 @@ async def get_config():
"available_callbacks": all_available_callbacks,
}
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.get_config(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
@@ -14678,7 +14842,12 @@ async def health_services_endpoint(
}
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.health_services_endpoint(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
@@ -14757,7 +14926,12 @@ async def health_endpoint(
"unhealthy_count": len(unhealthy_endpoints),
}
except Exception as e:
- traceback.print_exc()
+ verbose_proxy_logger.error(
+ "litellm.proxy.proxy_server.py::health_endpoint(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_proxy_logger.debug(traceback.format_exc())
raise e
diff --git a/litellm/proxy/secret_managers/aws_secret_manager.py b/litellm/proxy/secret_managers/aws_secret_manager.py
index a40b1dffa2..8dd6772cf7 100644
--- a/litellm/proxy/secret_managers/aws_secret_manager.py
+++ b/litellm/proxy/secret_managers/aws_secret_manager.py
@@ -8,7 +8,8 @@ Requires:
* `pip install boto3>=1.28.57`
"""
-import litellm, os
+import litellm
+import os
from typing import Optional
from litellm.proxy._types import KeyManagementSystem
@@ -38,3 +39,21 @@ def load_aws_secret_manager(use_aws_secret_manager: Optional[bool]):
except Exception as e:
raise e
+
+
+def load_aws_kms(use_aws_kms: Optional[bool]):
+ if use_aws_kms is None or use_aws_kms is False:
+ return
+ try:
+ import boto3
+
+ validate_environment()
+
+ # Create a Secrets Manager client
+ kms_client = boto3.client("kms", region_name=os.getenv("AWS_REGION_NAME"))
+
+ litellm.secret_manager_client = kms_client
+ litellm._key_management_system = KeyManagementSystem.AWS_KMS
+
+ except Exception as e:
+ raise e
diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py
index 8ae811f112..0b5dcd85ba 100644
--- a/litellm/proxy/utils.py
+++ b/litellm/proxy/utils.py
@@ -2889,3 +2889,16 @@ missing_keys_html_form = """
def _to_ns(dt):
return int(dt.timestamp() * 1e9)
+
+def get_error_message_str(e: Exception) -> str:
+ error_message = ""
+ if isinstance(e, HTTPException):
+ if isinstance(e.detail, str):
+ error_message = e.detail
+ elif isinstance(e.detail, dict):
+ error_message = json.dumps(e.detail)
+ else:
+ error_message = str(e)
+ else:
+ error_message = str(e)
+ return error_message
diff --git a/litellm/py.typed b/litellm/py.typed
new file mode 100644
index 0000000000..5686005abc
--- /dev/null
+++ b/litellm/py.typed
@@ -0,0 +1,2 @@
+# Marker file to instruct type checkers to look for inline type annotations in this package.
+# See PEP 561 for more information.
diff --git a/litellm/router.py b/litellm/router.py
index e3fed496f1..0187b441f7 100644
--- a/litellm/router.py
+++ b/litellm/router.py
@@ -220,8 +220,6 @@ class Router:
[]
) # names of models under litellm_params. ex. azure/chatgpt-v-2
self.deployment_latency_map = {}
- ### SCHEDULER ###
- self.scheduler = Scheduler(polling_interval=polling_interval)
### CACHING ###
cache_type: Literal["local", "redis"] = "local" # default to an in-memory cache
redis_cache = None
@@ -259,6 +257,10 @@ class Router:
redis_cache=redis_cache, in_memory_cache=InMemoryCache()
) # use a dual cache (Redis+In-Memory) for tracking cooldowns, usage, etc.
+ ### SCHEDULER ###
+ self.scheduler = Scheduler(
+ polling_interval=polling_interval, redis_cache=redis_cache
+ )
self.default_deployment = None # use this to track the users default deployment, when they want to use model = *
self.default_max_parallel_requests = default_max_parallel_requests
@@ -2096,8 +2098,8 @@ class Router:
except Exception as e:
raise e
except Exception as e:
- verbose_router_logger.debug(f"An exception occurred - {str(e)}")
- traceback.print_exc()
+ verbose_router_logger.error(f"An exception occurred - {str(e)}")
+ verbose_router_logger.debug(traceback.format_exc())
raise original_exception
async def async_function_with_retries(self, *args, **kwargs):
@@ -4048,6 +4050,12 @@ class Router:
for idx in reversed(invalid_model_indices):
_returned_deployments.pop(idx)
+ ## ORDER FILTERING ## -> if user set 'order' in deployments, return deployments with lowest order (e.g. order=1 > order=2)
+ if len(_returned_deployments) > 0:
+ _returned_deployments = litellm.utils._get_order_filtered_deployments(
+ _returned_deployments
+ )
+
return _returned_deployments
def _common_checks_available_deployment(
diff --git a/litellm/router_strategy/lowest_cost.py b/litellm/router_strategy/lowest_cost.py
index 1670490e16..46cbb2181e 100644
--- a/litellm/router_strategy/lowest_cost.py
+++ b/litellm/router_strategy/lowest_cost.py
@@ -1,11 +1,9 @@
#### What this does ####
# picks based on response time (for streaming, this is time to first token)
-from pydantic import BaseModel, Extra, Field, root_validator
-import os, requests, random # type: ignore
+from pydantic import BaseModel
from typing import Optional, Union, List, Dict
from datetime import datetime, timedelta
-import random
-
+from litellm import verbose_logger
import traceback
from litellm.caching import DualCache
from litellm.integrations.custom_logger import CustomLogger
@@ -119,7 +117,12 @@ class LowestCostLoggingHandler(CustomLogger):
if self.test_flag:
self.logged_success += 1
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.proxy.hooks.prompt_injection_detection.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
pass
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
@@ -201,7 +204,12 @@ class LowestCostLoggingHandler(CustomLogger):
if self.test_flag:
self.logged_success += 1
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.proxy.hooks.prompt_injection_detection.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
pass
async def async_get_available_deployments(
diff --git a/litellm/router_strategy/lowest_latency.py b/litellm/router_strategy/lowest_latency.py
index 1e4b151ada..5d71847510 100644
--- a/litellm/router_strategy/lowest_latency.py
+++ b/litellm/router_strategy/lowest_latency.py
@@ -1,16 +1,16 @@
#### What this does ####
# picks based on response time (for streaming, this is time to first token)
-from pydantic import BaseModel, Extra, Field, root_validator # type: ignore
-import dotenv, os, requests, random # type: ignore
+from pydantic import BaseModel
+import random
from typing import Optional, Union, List, Dict
from datetime import datetime, timedelta
-import random
import traceback
from litellm.caching import DualCache
from litellm.integrations.custom_logger import CustomLogger
from litellm import ModelResponse
from litellm import token_counter
import litellm
+from litellm import verbose_logger
class LiteLLMBase(BaseModel):
@@ -165,7 +165,12 @@ class LowestLatencyLoggingHandler(CustomLogger):
if self.test_flag:
self.logged_success += 1
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.proxy.hooks.prompt_injection_detection.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
pass
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
@@ -229,7 +234,12 @@ class LowestLatencyLoggingHandler(CustomLogger):
# do nothing if it's not a timeout error
return
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.proxy.hooks.prompt_injection_detection.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
pass
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
@@ -352,7 +362,12 @@ class LowestLatencyLoggingHandler(CustomLogger):
if self.test_flag:
self.logged_success += 1
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.router_strategy.lowest_latency.py::async_log_success_event(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
pass
def get_available_deployments(
diff --git a/litellm/router_strategy/lowest_tpm_rpm.py b/litellm/router_strategy/lowest_tpm_rpm.py
index 15460051b8..a21c69abf7 100644
--- a/litellm/router_strategy/lowest_tpm_rpm.py
+++ b/litellm/router_strategy/lowest_tpm_rpm.py
@@ -11,6 +11,7 @@ from litellm.integrations.custom_logger import CustomLogger
from litellm._logging import verbose_router_logger
from litellm.utils import print_verbose
+
class LiteLLMBase(BaseModel):
"""
Implements default functions, all pydantic objects should have.
@@ -23,16 +24,20 @@ class LiteLLMBase(BaseModel):
# if using pydantic v1
return self.dict()
+
class RoutingArgs(LiteLLMBase):
- ttl: int = 1 * 60 # 1min (RPM/TPM expire key)
-
+ ttl: int = 1 * 60 # 1min (RPM/TPM expire key)
+
+
class LowestTPMLoggingHandler(CustomLogger):
test_flag: bool = False
logged_success: int = 0
logged_failure: int = 0
default_cache_time_seconds: int = 1 * 60 * 60 # 1 hour
- def __init__(self, router_cache: DualCache, model_list: list, routing_args: dict = {}):
+ def __init__(
+ self, router_cache: DualCache, model_list: list, routing_args: dict = {}
+ ):
self.router_cache = router_cache
self.model_list = model_list
self.routing_args = RoutingArgs(**routing_args)
@@ -72,19 +77,28 @@ class LowestTPMLoggingHandler(CustomLogger):
request_count_dict = self.router_cache.get_cache(key=tpm_key) or {}
request_count_dict[id] = request_count_dict.get(id, 0) + total_tokens
- self.router_cache.set_cache(key=tpm_key, value=request_count_dict, ttl=self.routing_args.ttl)
+ self.router_cache.set_cache(
+ key=tpm_key, value=request_count_dict, ttl=self.routing_args.ttl
+ )
## RPM
request_count_dict = self.router_cache.get_cache(key=rpm_key) or {}
request_count_dict[id] = request_count_dict.get(id, 0) + 1
- self.router_cache.set_cache(key=rpm_key, value=request_count_dict, ttl=self.routing_args.ttl)
+ self.router_cache.set_cache(
+ key=rpm_key, value=request_count_dict, ttl=self.routing_args.ttl
+ )
### TESTING ###
if self.test_flag:
self.logged_success += 1
except Exception as e:
- traceback.print_exc()
+ verbose_router_logger.error(
+ "litellm.router_strategy.lowest_tpm_rpm.py::async_log_success_event(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_router_logger.debug(traceback.format_exc())
pass
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
@@ -123,19 +137,28 @@ class LowestTPMLoggingHandler(CustomLogger):
request_count_dict = self.router_cache.get_cache(key=tpm_key) or {}
request_count_dict[id] = request_count_dict.get(id, 0) + total_tokens
- self.router_cache.set_cache(key=tpm_key, value=request_count_dict, ttl=self.routing_args.ttl)
+ self.router_cache.set_cache(
+ key=tpm_key, value=request_count_dict, ttl=self.routing_args.ttl
+ )
## RPM
request_count_dict = self.router_cache.get_cache(key=rpm_key) or {}
request_count_dict[id] = request_count_dict.get(id, 0) + 1
- self.router_cache.set_cache(key=rpm_key, value=request_count_dict, ttl=self.routing_args.ttl)
+ self.router_cache.set_cache(
+ key=rpm_key, value=request_count_dict, ttl=self.routing_args.ttl
+ )
### TESTING ###
if self.test_flag:
self.logged_success += 1
except Exception as e:
- traceback.print_exc()
+ verbose_router_logger.error(
+ "litellm.router_strategy.lowest_tpm_rpm.py::async_log_success_event(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_router_logger.debug(traceback.format_exc())
pass
def get_available_deployments(
diff --git a/litellm/router_strategy/lowest_tpm_rpm_v2.py b/litellm/router_strategy/lowest_tpm_rpm_v2.py
index 40e75031ad..e3b8c8b770 100644
--- a/litellm/router_strategy/lowest_tpm_rpm_v2.py
+++ b/litellm/router_strategy/lowest_tpm_rpm_v2.py
@@ -1,19 +1,19 @@
#### What this does ####
# identifies lowest tpm deployment
from pydantic import BaseModel
-import dotenv, os, requests, random
+import random
from typing import Optional, Union, List, Dict
-import datetime as datetime_og
-from datetime import datetime
-import traceback, asyncio, httpx
+import traceback
+import httpx
import litellm
from litellm import token_counter
from litellm.caching import DualCache
from litellm.integrations.custom_logger import CustomLogger
-from litellm._logging import verbose_router_logger
+from litellm._logging import verbose_router_logger, verbose_logger
from litellm.utils import print_verbose, get_utc_datetime
from litellm.types.router import RouterErrors
+
class LiteLLMBase(BaseModel):
"""
Implements default functions, all pydantic objects should have.
@@ -22,12 +22,14 @@ class LiteLLMBase(BaseModel):
def json(self, **kwargs):
try:
return self.model_dump() # noqa
- except:
+ except Exception as e:
# if using pydantic v1
return self.dict()
+
class RoutingArgs(LiteLLMBase):
- ttl: int = 1 * 60 # 1min (RPM/TPM expire key)
+ ttl: int = 1 * 60 # 1min (RPM/TPM expire key)
+
class LowestTPMLoggingHandler_v2(CustomLogger):
"""
@@ -47,7 +49,9 @@ class LowestTPMLoggingHandler_v2(CustomLogger):
logged_failure: int = 0
default_cache_time_seconds: int = 1 * 60 * 60 # 1 hour
- def __init__(self, router_cache: DualCache, model_list: list, routing_args: dict = {}):
+ def __init__(
+ self, router_cache: DualCache, model_list: list, routing_args: dict = {}
+ ):
self.router_cache = router_cache
self.model_list = model_list
self.routing_args = RoutingArgs(**routing_args)
@@ -104,7 +108,9 @@ class LowestTPMLoggingHandler_v2(CustomLogger):
)
else:
# if local result below limit, check redis ## prevent unnecessary redis checks
- result = self.router_cache.increment_cache(key=rpm_key, value=1, ttl=self.routing_args.ttl)
+ result = self.router_cache.increment_cache(
+ key=rpm_key, value=1, ttl=self.routing_args.ttl
+ )
if result is not None and result > deployment_rpm:
raise litellm.RateLimitError(
message="Deployment over defined rpm limit={}. current usage={}".format(
@@ -244,12 +250,19 @@ class LowestTPMLoggingHandler_v2(CustomLogger):
# update cache
## TPM
- self.router_cache.increment_cache(key=tpm_key, value=total_tokens, ttl=self.routing_args.ttl)
+ self.router_cache.increment_cache(
+ key=tpm_key, value=total_tokens, ttl=self.routing_args.ttl
+ )
### TESTING ###
if self.test_flag:
self.logged_success += 1
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.proxy.hooks.prompt_injection_detection.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
pass
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
@@ -295,7 +308,12 @@ class LowestTPMLoggingHandler_v2(CustomLogger):
if self.test_flag:
self.logged_success += 1
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.proxy.hooks.prompt_injection_detection.py::async_pre_call_hook(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
pass
def _common_checks_available_deployment(
diff --git a/litellm/scheduler.py b/litellm/scheduler.py
index aad4ba4e3e..8ee7f0e078 100644
--- a/litellm/scheduler.py
+++ b/litellm/scheduler.py
@@ -1,13 +1,14 @@
-import heapq, time
+import heapq
from pydantic import BaseModel
from typing import Optional
import enum
-from litellm.caching import DualCache
+from litellm.caching import DualCache, RedisCache
from litellm import print_verbose
class SchedulerCacheKeys(enum.Enum):
queue = "scheduler:queue"
+ default_in_memory_ttl = 5 # cache queue in-memory for 5s when redis cache available
class DefaultPriorities(enum.Enum):
@@ -25,18 +26,24 @@ class FlowItem(BaseModel):
class Scheduler:
cache: DualCache
- def __init__(self, polling_interval: Optional[float] = None):
+ def __init__(
+ self,
+ polling_interval: Optional[float] = None,
+ redis_cache: Optional[RedisCache] = None,
+ ):
"""
polling_interval: float or null - frequency of polling queue. Default is 3ms.
"""
self.queue: list = []
- self.cache = DualCache()
+ default_in_memory_ttl: Optional[float] = None
+ if redis_cache is not None:
+ # if redis-cache available frequently poll that instead of using in-memory.
+ default_in_memory_ttl = SchedulerCacheKeys.default_in_memory_ttl.value
+ self.cache = DualCache(
+ redis_cache=redis_cache, default_in_memory_ttl=default_in_memory_ttl
+ )
self.polling_interval = polling_interval or 0.03 # default to 3ms
- def update_variables(self, cache: Optional[DualCache] = None):
- if cache is not None:
- self.cache = cache
-
async def add_request(self, request: FlowItem):
# We use the priority directly, as lower values indicate higher priority
# get the queue
diff --git a/litellm/tests/log.txt b/litellm/tests/log.txt
deleted file mode 100644
index ea07ca7e12..0000000000
--- a/litellm/tests/log.txt
+++ /dev/null
@@ -1,4274 +0,0 @@
-============================= test session starts ==============================
-platform darwin -- Python 3.11.4, pytest-8.2.0, pluggy-1.5.0
-rootdir: /Users/krrishdholakia/Documents/litellm
-configfile: pyproject.toml
-plugins: asyncio-0.23.6, mock-3.14.0, anyio-4.2.0
-asyncio: mode=Mode.STRICT
-collected 1 item
-
-test_amazing_vertex_completion.py F [100%]
-
-=================================== FAILURES ===================================
-____________________________ test_gemini_pro_vision ____________________________
-
-model = 'gemini-1.5-flash-preview-0514'
-messages = [{'content': [{'text': 'Whats in this image?', 'type': 'text'}, {'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}, 'type': 'image_url'}], 'role': 'user'}]
-model_response = ModelResponse(id='chatcmpl-722df0e7-4e2d-44e6-9e2c-49823faa0189', choices=[Choices(finish_reason='stop', index=0, mess... role='assistant'))], created=1716145725, model=None, object='chat.completion', system_fingerprint=None, usage=Usage())
-print_verbose =
-encoding =
-logging_obj =
-vertex_project = None, vertex_location = None, vertex_credentials = None
-optional_params = {}
-litellm_params = {'acompletion': False, 'api_base': '', 'api_key': None, 'completion_call_id': None, ...}
-logger_fn = None, acompletion = False
-
- def completion(
- model: str,
- messages: list,
- model_response: ModelResponse,
- print_verbose: Callable,
- encoding,
- logging_obj,
- vertex_project=None,
- vertex_location=None,
- vertex_credentials=None,
- optional_params=None,
- litellm_params=None,
- logger_fn=None,
- acompletion: bool = False,
- ):
- try:
- import vertexai
- except:
- raise VertexAIError(
- status_code=400,
- message="vertexai import failed please run `pip install google-cloud-aiplatform`",
- )
-
- if not (
- hasattr(vertexai, "preview") or hasattr(vertexai.preview, "language_models")
- ):
- raise VertexAIError(
- status_code=400,
- message="""Upgrade vertex ai. Run `pip install "google-cloud-aiplatform>=1.38"`""",
- )
- try:
- from vertexai.preview.language_models import (
- ChatModel,
- CodeChatModel,
- InputOutputTextPair,
- )
- from vertexai.language_models import TextGenerationModel, CodeGenerationModel
- from vertexai.preview.generative_models import (
- GenerativeModel,
- Part,
- GenerationConfig,
- )
- from google.cloud import aiplatform # type: ignore
- from google.protobuf import json_format # type: ignore
- from google.protobuf.struct_pb2 import Value # type: ignore
- from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types # type: ignore
- import google.auth # type: ignore
- import proto # type: ignore
-
- ## Load credentials with the correct quota project ref: https://github.com/googleapis/python-aiplatform/issues/2557#issuecomment-1709284744
- print_verbose(
- f"VERTEX AI: vertex_project={vertex_project}; vertex_location={vertex_location}"
- )
- if vertex_credentials is not None and isinstance(vertex_credentials, str):
- import google.oauth2.service_account
-
- json_obj = json.loads(vertex_credentials)
-
- creds = google.oauth2.service_account.Credentials.from_service_account_info(
- json_obj,
- scopes=["https://www.googleapis.com/auth/cloud-platform"],
- )
- else:
- creds, _ = google.auth.default(quota_project_id=vertex_project)
- print_verbose(
- f"VERTEX AI: creds={creds}; google application credentials: {os.getenv('GOOGLE_APPLICATION_CREDENTIALS')}"
- )
- vertexai.init(
- project=vertex_project, location=vertex_location, credentials=creds
- )
-
- ## Load Config
- config = litellm.VertexAIConfig.get_config()
- for k, v in config.items():
- if k not in optional_params:
- optional_params[k] = v
-
- ## Process safety settings into format expected by vertex AI
- safety_settings = None
- if "safety_settings" in optional_params:
- safety_settings = optional_params.pop("safety_settings")
- if not isinstance(safety_settings, list):
- raise ValueError("safety_settings must be a list")
- if len(safety_settings) > 0 and not isinstance(safety_settings[0], dict):
- raise ValueError("safety_settings must be a list of dicts")
- safety_settings = [
- gapic_content_types.SafetySetting(x) for x in safety_settings
- ]
-
- # vertexai does not use an API key, it looks for credentials.json in the environment
-
- prompt = " ".join(
- [
- message["content"]
- for message in messages
- if isinstance(message["content"], str)
- ]
- )
-
- mode = ""
-
- request_str = ""
- response_obj = None
- async_client = None
- instances = None
- client_options = {
- "api_endpoint": f"{vertex_location}-aiplatform.googleapis.com"
- }
- if (
- model in litellm.vertex_language_models
- or model in litellm.vertex_vision_models
- ):
- llm_model = GenerativeModel(model)
- mode = "vision"
- request_str += f"llm_model = GenerativeModel({model})\n"
- elif model in litellm.vertex_chat_models:
- llm_model = ChatModel.from_pretrained(model)
- mode = "chat"
- request_str += f"llm_model = ChatModel.from_pretrained({model})\n"
- elif model in litellm.vertex_text_models:
- llm_model = TextGenerationModel.from_pretrained(model)
- mode = "text"
- request_str += f"llm_model = TextGenerationModel.from_pretrained({model})\n"
- elif model in litellm.vertex_code_text_models:
- llm_model = CodeGenerationModel.from_pretrained(model)
- mode = "text"
- request_str += f"llm_model = CodeGenerationModel.from_pretrained({model})\n"
- elif model in litellm.vertex_code_chat_models: # vertex_code_llm_models
- llm_model = CodeChatModel.from_pretrained(model)
- mode = "chat"
- request_str += f"llm_model = CodeChatModel.from_pretrained({model})\n"
- elif model == "private":
- mode = "private"
- model = optional_params.pop("model_id", None)
- # private endpoint requires a dict instead of JSON
- instances = [optional_params.copy()]
- instances[0]["prompt"] = prompt
- llm_model = aiplatform.PrivateEndpoint(
- endpoint_name=model,
- project=vertex_project,
- location=vertex_location,
- )
- request_str += f"llm_model = aiplatform.PrivateEndpoint(endpoint_name={model}, project={vertex_project}, location={vertex_location})\n"
- else: # assume vertex model garden on public endpoint
- mode = "custom"
-
- instances = [optional_params.copy()]
- instances[0]["prompt"] = prompt
- instances = [
- json_format.ParseDict(instance_dict, Value())
- for instance_dict in instances
- ]
- # Will determine the API used based on async parameter
- llm_model = None
-
- # NOTE: async prediction and streaming under "private" mode isn't supported by aiplatform right now
- if acompletion == True:
- data = {
- "llm_model": llm_model,
- "mode": mode,
- "prompt": prompt,
- "logging_obj": logging_obj,
- "request_str": request_str,
- "model": model,
- "model_response": model_response,
- "encoding": encoding,
- "messages": messages,
- "print_verbose": print_verbose,
- "client_options": client_options,
- "instances": instances,
- "vertex_location": vertex_location,
- "vertex_project": vertex_project,
- "safety_settings": safety_settings,
- **optional_params,
- }
- if optional_params.get("stream", False) is True:
- # async streaming
- return async_streaming(**data)
-
- return async_completion(**data)
-
- if mode == "vision":
- print_verbose("\nMaking VertexAI Gemini Pro / Pro Vision Call")
- print_verbose(f"\nProcessing input messages = {messages}")
- tools = optional_params.pop("tools", None)
- content = _gemini_convert_messages_text(messages=messages)
- stream = optional_params.pop("stream", False)
- if stream == True:
- request_str += f"response = llm_model.generate_content({content}, generation_config=GenerationConfig(**{optional_params}), safety_settings={safety_settings}, stream={stream})\n"
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
-
- model_response = llm_model.generate_content(
- contents={"content": content},
- generation_config=optional_params,
- safety_settings=safety_settings,
- stream=True,
- tools=tools,
- )
-
- return model_response
-
- request_str += f"response = llm_model.generate_content({content})\n"
- ## LOGGING
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
-
- ## LLM Call
-> response = llm_model.generate_content(
- contents=content,
- generation_config=optional_params,
- safety_settings=safety_settings,
- tools=tools,
- )
-
-../llms/vertex_ai.py:740:
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-../proxy/myenv/lib/python3.11/site-packages/vertexai/generative_models/_generative_models.py:405: in generate_content
- return self._generate_content(
-../proxy/myenv/lib/python3.11/site-packages/vertexai/generative_models/_generative_models.py:487: in _generate_content
- request = self._prepare_request(
-../proxy/myenv/lib/python3.11/site-packages/vertexai/generative_models/_generative_models.py:274: in _prepare_request
- contents = [
-../proxy/myenv/lib/python3.11/site-packages/vertexai/generative_models/_generative_models.py:275: in
- gapic_content_types.Content(content_dict) for content_dict in contents
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-
-self = <[AttributeError('Unknown field for Content: _pb') raised in repr()] Content object at 0x1646aaa90>
-mapping = {'parts': [{'text': 'Whats in this image?'}, file_data {
- mime_type: "image/jpeg"
- file_uri: "gs://cloud-samples-data/generative-ai/image/boats.jpeg"
-}
-], 'role': 'user'}
-ignore_unknown_fields = False, kwargs = {}
-params = {'parts': [text: "Whats in this image?"
-, file_data {
- mime_type: "image/jpeg"
- file_uri: "gs://cloud-samples-data/generative-ai/image/boats.jpeg"
-}
-], 'role': 'user'}
-marshal = , key = 'parts'
-value = [{'text': 'Whats in this image?'}, file_data {
- mime_type: "image/jpeg"
- file_uri: "gs://cloud-samples-data/generative-ai/image/boats.jpeg"
-}
-]
-pb_value = [text: "Whats in this image?"
-, file_data {
- mime_type: "image/jpeg"
- file_uri: "gs://cloud-samples-data/generative-ai/image/boats.jpeg"
-}
-]
-
- def __init__(
- self,
- mapping=None,
- *,
- ignore_unknown_fields=False,
- **kwargs,
- ):
- # We accept several things for `mapping`:
- # * An instance of this class.
- # * An instance of the underlying protobuf descriptor class.
- # * A dict
- # * Nothing (keyword arguments only).
- if mapping is None:
- if not kwargs:
- # Special fast path for empty construction.
- super().__setattr__("_pb", self._meta.pb())
- return
-
- mapping = kwargs
- elif isinstance(mapping, self._meta.pb):
- # Make a copy of the mapping.
- # This is a constructor for a new object, so users will assume
- # that it will not have side effects on the arguments being
- # passed in.
- #
- # The `wrap` method on the metaclass is the public API for taking
- # ownership of the passed in protobuf object.
- mapping = copy.deepcopy(mapping)
- if kwargs:
- mapping.MergeFrom(self._meta.pb(**kwargs))
-
- super().__setattr__("_pb", mapping)
- return
- elif isinstance(mapping, type(self)):
- # Just use the above logic on mapping's underlying pb.
- self.__init__(mapping=mapping._pb, **kwargs)
- return
- elif isinstance(mapping, collections.abc.Mapping):
- # Can't have side effects on mapping.
- mapping = copy.copy(mapping)
- # kwargs entries take priority for duplicate keys.
- mapping.update(kwargs)
- else:
- # Sanity check: Did we get something not a map? Error if so.
- raise TypeError(
- "Invalid constructor input for %s: %r"
- % (
- self.__class__.__name__,
- mapping,
- )
- )
-
- params = {}
- # Update the mapping to address any values that need to be
- # coerced.
- marshal = self._meta.marshal
- for key, value in mapping.items():
- (key, pb_type) = self._get_pb_type_from_key(key)
- if pb_type is None:
- if ignore_unknown_fields:
- continue
-
- raise ValueError(
- "Unknown field for {}: {}".format(self.__class__.__name__, key)
- )
-
- try:
- pb_value = marshal.to_proto(pb_type, value)
- except ValueError:
- # Underscores may be appended to field names
- # that collide with python or proto-plus keywords.
- # In case a key only exists with a `_` suffix, coerce the key
- # to include the `_` suffix. It's not possible to
- # natively define the same field with a trailing underscore in protobuf.
- # See related issue
- # https://github.com/googleapis/python-api-core/issues/227
- if isinstance(value, dict):
- if _upb:
- # In UPB, pb_type is MessageMeta which doesn't expose attrs like it used to in Python/CPP.
- keys_to_update = [
- item
- for item in value
- if item not in pb_type.DESCRIPTOR.fields_by_name
- and f"{item}_" in pb_type.DESCRIPTOR.fields_by_name
- ]
- else:
- keys_to_update = [
- item
- for item in value
- if not hasattr(pb_type, item)
- and hasattr(pb_type, f"{item}_")
- ]
- for item in keys_to_update:
- value[f"{item}_"] = value.pop(item)
-
- pb_value = marshal.to_proto(pb_type, value)
-
- if pb_value is not None:
- params[key] = pb_value
-
- # Create the internal protocol buffer.
-> super().__setattr__("_pb", self._meta.pb(**params))
-E TypeError: Parameter to MergeFrom() must be instance of same class: expected got .
-
-../proxy/myenv/lib/python3.11/site-packages/proto/message.py:615: TypeError
-
-During handling of the above exception, another exception occurred:
-
-model = 'gemini-1.5-flash-preview-0514'
-messages = [{'content': [{'text': 'Whats in this image?', 'type': 'text'}, {'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}, 'type': 'image_url'}], 'role': 'user'}]
-timeout = 600.0, temperature = None, top_p = None, n = None, stream = None
-stream_options = None, stop = None, max_tokens = None, presence_penalty = None
-frequency_penalty = None, logit_bias = None, user = None, response_format = None
-seed = None, tools = None, tool_choice = None, logprobs = None
-top_logprobs = None, deployment_id = None, extra_headers = None
-functions = None, function_call = None, base_url = None, api_version = None
-api_key = None, model_list = None
-kwargs = {'litellm_call_id': '7f48b7ab-47b3-4beb-b2b5-fa298be49d3f', 'litellm_logging_obj': }
-args = {'acompletion': False, 'api_base': None, 'api_key': None, 'api_version': None, ...}
-api_base = None, mock_response = None, force_timeout = 600, logger_fn = None
-verbose = False, custom_llm_provider = 'vertex_ai'
-
- @client
- def completion(
- model: str,
- # Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create
- messages: List = [],
- timeout: Optional[Union[float, str, httpx.Timeout]] = None,
- temperature: Optional[float] = None,
- top_p: Optional[float] = None,
- n: Optional[int] = None,
- stream: Optional[bool] = None,
- stream_options: Optional[dict] = None,
- stop=None,
- max_tokens: Optional[int] = None,
- presence_penalty: Optional[float] = None,
- frequency_penalty: Optional[float] = None,
- logit_bias: Optional[dict] = None,
- user: Optional[str] = None,
- # openai v1.0+ new params
- response_format: Optional[dict] = None,
- seed: Optional[int] = None,
- tools: Optional[List] = None,
- tool_choice: Optional[str] = None,
- logprobs: Optional[bool] = None,
- top_logprobs: Optional[int] = None,
- deployment_id=None,
- extra_headers: Optional[dict] = None,
- # soon to be deprecated params by OpenAI
- functions: Optional[List] = None,
- function_call: Optional[str] = None,
- # set api_base, api_version, api_key
- base_url: Optional[str] = None,
- api_version: Optional[str] = None,
- api_key: Optional[str] = None,
- model_list: Optional[list] = None, # pass in a list of api_base,keys, etc.
- # Optional liteLLM function params
- **kwargs,
- ) -> Union[ModelResponse, CustomStreamWrapper]:
- """
- Perform a completion() using any of litellm supported llms (example gpt-4, gpt-3.5-turbo, claude-2, command-nightly)
- Parameters:
- model (str): The name of the language model to use for text completion. see all supported LLMs: https://docs.litellm.ai/docs/providers/
- messages (List): A list of message objects representing the conversation context (default is an empty list).
-
- OPTIONAL PARAMS
- functions (List, optional): A list of functions to apply to the conversation messages (default is an empty list).
- function_call (str, optional): The name of the function to call within the conversation (default is an empty string).
- temperature (float, optional): The temperature parameter for controlling the randomness of the output (default is 1.0).
- top_p (float, optional): The top-p parameter for nucleus sampling (default is 1.0).
- n (int, optional): The number of completions to generate (default is 1).
- stream (bool, optional): If True, return a streaming response (default is False).
- stream_options (dict, optional): A dictionary containing options for the streaming response. Only set this when you set stream: true.
- stop(string/list, optional): - Up to 4 sequences where the LLM API will stop generating further tokens.
- max_tokens (integer, optional): The maximum number of tokens in the generated completion (default is infinity).
- presence_penalty (float, optional): It is used to penalize new tokens based on their existence in the text so far.
- frequency_penalty: It is used to penalize new tokens based on their frequency in the text so far.
- logit_bias (dict, optional): Used to modify the probability of specific tokens appearing in the completion.
- user (str, optional): A unique identifier representing your end-user. This can help the LLM provider to monitor and detect abuse.
- logprobs (bool, optional): Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message
- top_logprobs (int, optional): An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
- metadata (dict, optional): Pass in additional metadata to tag your completion calls - eg. prompt version, details, etc.
- api_base (str, optional): Base URL for the API (default is None).
- api_version (str, optional): API version (default is None).
- api_key (str, optional): API key (default is None).
- model_list (list, optional): List of api base, version, keys
- extra_headers (dict, optional): Additional headers to include in the request.
-
- LITELLM Specific Params
- mock_response (str, optional): If provided, return a mock completion response for testing or debugging purposes (default is None).
- custom_llm_provider (str, optional): Used for Non-OpenAI LLMs, Example usage for bedrock, set model="amazon.titan-tg1-large" and custom_llm_provider="bedrock"
- max_retries (int, optional): The number of retries to attempt (default is 0).
- Returns:
- ModelResponse: A response object containing the generated completion and associated metadata.
-
- Note:
- - This function is used to perform completions() using the specified language model.
- - It supports various optional parameters for customizing the completion behavior.
- - If 'mock_response' is provided, a mock completion response is returned for testing or debugging.
- """
- ######### unpacking kwargs #####################
- args = locals()
- api_base = kwargs.get("api_base", None)
- mock_response = kwargs.get("mock_response", None)
- force_timeout = kwargs.get("force_timeout", 600) ## deprecated
- logger_fn = kwargs.get("logger_fn", None)
- verbose = kwargs.get("verbose", False)
- custom_llm_provider = kwargs.get("custom_llm_provider", None)
- litellm_logging_obj = kwargs.get("litellm_logging_obj", None)
- id = kwargs.get("id", None)
- metadata = kwargs.get("metadata", None)
- model_info = kwargs.get("model_info", None)
- proxy_server_request = kwargs.get("proxy_server_request", None)
- fallbacks = kwargs.get("fallbacks", None)
- headers = kwargs.get("headers", None) or extra_headers
- num_retries = kwargs.get("num_retries", None) ## deprecated
- max_retries = kwargs.get("max_retries", None)
- context_window_fallback_dict = kwargs.get("context_window_fallback_dict", None)
- organization = kwargs.get("organization", None)
- ### CUSTOM MODEL COST ###
- input_cost_per_token = kwargs.get("input_cost_per_token", None)
- output_cost_per_token = kwargs.get("output_cost_per_token", None)
- input_cost_per_second = kwargs.get("input_cost_per_second", None)
- output_cost_per_second = kwargs.get("output_cost_per_second", None)
- ### CUSTOM PROMPT TEMPLATE ###
- initial_prompt_value = kwargs.get("initial_prompt_value", None)
- roles = kwargs.get("roles", None)
- final_prompt_value = kwargs.get("final_prompt_value", None)
- bos_token = kwargs.get("bos_token", None)
- eos_token = kwargs.get("eos_token", None)
- preset_cache_key = kwargs.get("preset_cache_key", None)
- hf_model_name = kwargs.get("hf_model_name", None)
- supports_system_message = kwargs.get("supports_system_message", None)
- ### TEXT COMPLETION CALLS ###
- text_completion = kwargs.get("text_completion", False)
- atext_completion = kwargs.get("atext_completion", False)
- ### ASYNC CALLS ###
- acompletion = kwargs.get("acompletion", False)
- client = kwargs.get("client", None)
- ### Admin Controls ###
- no_log = kwargs.get("no-log", False)
- ######## end of unpacking kwargs ###########
- openai_params = [
- "functions",
- "function_call",
- "temperature",
- "temperature",
- "top_p",
- "n",
- "stream",
- "stream_options",
- "stop",
- "max_tokens",
- "presence_penalty",
- "frequency_penalty",
- "logit_bias",
- "user",
- "request_timeout",
- "api_base",
- "api_version",
- "api_key",
- "deployment_id",
- "organization",
- "base_url",
- "default_headers",
- "timeout",
- "response_format",
- "seed",
- "tools",
- "tool_choice",
- "max_retries",
- "logprobs",
- "top_logprobs",
- "extra_headers",
- ]
- litellm_params = [
- "metadata",
- "acompletion",
- "atext_completion",
- "text_completion",
- "caching",
- "mock_response",
- "api_key",
- "api_version",
- "api_base",
- "force_timeout",
- "logger_fn",
- "verbose",
- "custom_llm_provider",
- "litellm_logging_obj",
- "litellm_call_id",
- "use_client",
- "id",
- "fallbacks",
- "azure",
- "headers",
- "model_list",
- "num_retries",
- "context_window_fallback_dict",
- "retry_policy",
- "roles",
- "final_prompt_value",
- "bos_token",
- "eos_token",
- "request_timeout",
- "complete_response",
- "self",
- "client",
- "rpm",
- "tpm",
- "max_parallel_requests",
- "input_cost_per_token",
- "output_cost_per_token",
- "input_cost_per_second",
- "output_cost_per_second",
- "hf_model_name",
- "model_info",
- "proxy_server_request",
- "preset_cache_key",
- "caching_groups",
- "ttl",
- "cache",
- "no-log",
- "base_model",
- "stream_timeout",
- "supports_system_message",
- "region_name",
- "allowed_model_region",
- "model_config",
- ]
-
- default_params = openai_params + litellm_params
- non_default_params = {
- k: v for k, v in kwargs.items() if k not in default_params
- } # model-specific params - pass them straight to the model/provider
-
- try:
- if base_url is not None:
- api_base = base_url
- if max_retries is not None: # openai allows openai.OpenAI(max_retries=3)
- num_retries = max_retries
- logging = litellm_logging_obj
- fallbacks = fallbacks or litellm.model_fallbacks
- if fallbacks is not None:
- return completion_with_fallbacks(**args)
- if model_list is not None:
- deployments = [
- m["litellm_params"] for m in model_list if m["model_name"] == model
- ]
- return batch_completion_models(deployments=deployments, **args)
- if litellm.model_alias_map and model in litellm.model_alias_map:
- model = litellm.model_alias_map[
- model
- ] # update the model to the actual value if an alias has been passed in
- model_response = ModelResponse()
- setattr(model_response, "usage", litellm.Usage())
- if (
- kwargs.get("azure", False) == True
- ): # don't remove flag check, to remain backwards compatible for repos like Codium
- custom_llm_provider = "azure"
- if deployment_id != None: # azure llms
- model = deployment_id
- custom_llm_provider = "azure"
- model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(
- model=model,
- custom_llm_provider=custom_llm_provider,
- api_base=api_base,
- api_key=api_key,
- )
- if model_response is not None and hasattr(model_response, "_hidden_params"):
- model_response._hidden_params["custom_llm_provider"] = custom_llm_provider
- model_response._hidden_params["region_name"] = kwargs.get(
- "aws_region_name", None
- ) # support region-based pricing for bedrock
-
- ### TIMEOUT LOGIC ###
- timeout = timeout or kwargs.get("request_timeout", 600) or 600
- # set timeout for 10 minutes by default
- if isinstance(timeout, httpx.Timeout) and not supports_httpx_timeout(
- custom_llm_provider
- ):
- timeout = timeout.read or 600 # default 10 min timeout
- elif not isinstance(timeout, httpx.Timeout):
- timeout = float(timeout) # type: ignore
-
- ### REGISTER CUSTOM MODEL PRICING -- IF GIVEN ###
- if input_cost_per_token is not None and output_cost_per_token is not None:
- litellm.register_model(
- {
- f"{custom_llm_provider}/{model}": {
- "input_cost_per_token": input_cost_per_token,
- "output_cost_per_token": output_cost_per_token,
- "litellm_provider": custom_llm_provider,
- },
- model: {
- "input_cost_per_token": input_cost_per_token,
- "output_cost_per_token": output_cost_per_token,
- "litellm_provider": custom_llm_provider,
- },
- }
- )
- elif (
- input_cost_per_second is not None
- ): # time based pricing just needs cost in place
- output_cost_per_second = output_cost_per_second
- litellm.register_model(
- {
- f"{custom_llm_provider}/{model}": {
- "input_cost_per_second": input_cost_per_second,
- "output_cost_per_second": output_cost_per_second,
- "litellm_provider": custom_llm_provider,
- },
- model: {
- "input_cost_per_second": input_cost_per_second,
- "output_cost_per_second": output_cost_per_second,
- "litellm_provider": custom_llm_provider,
- },
- }
- )
- ### BUILD CUSTOM PROMPT TEMPLATE -- IF GIVEN ###
- custom_prompt_dict = {} # type: ignore
- if (
- initial_prompt_value
- or roles
- or final_prompt_value
- or bos_token
- or eos_token
- ):
- custom_prompt_dict = {model: {}}
- if initial_prompt_value:
- custom_prompt_dict[model]["initial_prompt_value"] = initial_prompt_value
- if roles:
- custom_prompt_dict[model]["roles"] = roles
- if final_prompt_value:
- custom_prompt_dict[model]["final_prompt_value"] = final_prompt_value
- if bos_token:
- custom_prompt_dict[model]["bos_token"] = bos_token
- if eos_token:
- custom_prompt_dict[model]["eos_token"] = eos_token
-
- if (
- supports_system_message is not None
- and isinstance(supports_system_message, bool)
- and supports_system_message == False
- ):
- messages = map_system_message_pt(messages=messages)
- model_api_key = get_api_key(
- llm_provider=custom_llm_provider, dynamic_api_key=api_key
- ) # get the api key from the environment if required for the model
-
- if dynamic_api_key is not None:
- api_key = dynamic_api_key
- # check if user passed in any of the OpenAI optional params
- optional_params = get_optional_params(
- functions=functions,
- function_call=function_call,
- temperature=temperature,
- top_p=top_p,
- n=n,
- stream=stream,
- stream_options=stream_options,
- stop=stop,
- max_tokens=max_tokens,
- presence_penalty=presence_penalty,
- frequency_penalty=frequency_penalty,
- logit_bias=logit_bias,
- user=user,
- # params to identify the model
- model=model,
- custom_llm_provider=custom_llm_provider,
- response_format=response_format,
- seed=seed,
- tools=tools,
- tool_choice=tool_choice,
- max_retries=max_retries,
- logprobs=logprobs,
- top_logprobs=top_logprobs,
- extra_headers=extra_headers,
- **non_default_params,
- )
-
- if litellm.add_function_to_prompt and optional_params.get(
- "functions_unsupported_model", None
- ): # if user opts to add it to prompt, when API doesn't support function calling
- functions_unsupported_model = optional_params.pop(
- "functions_unsupported_model"
- )
- messages = function_call_prompt(
- messages=messages, functions=functions_unsupported_model
- )
-
- # For logging - save the values of the litellm-specific params passed in
- litellm_params = get_litellm_params(
- acompletion=acompletion,
- api_key=api_key,
- force_timeout=force_timeout,
- logger_fn=logger_fn,
- verbose=verbose,
- custom_llm_provider=custom_llm_provider,
- api_base=api_base,
- litellm_call_id=kwargs.get("litellm_call_id", None),
- model_alias_map=litellm.model_alias_map,
- completion_call_id=id,
- metadata=metadata,
- model_info=model_info,
- proxy_server_request=proxy_server_request,
- preset_cache_key=preset_cache_key,
- no_log=no_log,
- input_cost_per_second=input_cost_per_second,
- input_cost_per_token=input_cost_per_token,
- output_cost_per_second=output_cost_per_second,
- output_cost_per_token=output_cost_per_token,
- )
- logging.update_environment_variables(
- model=model,
- user=user,
- optional_params=optional_params,
- litellm_params=litellm_params,
- )
- if mock_response:
- return mock_completion(
- model,
- messages,
- stream=stream,
- mock_response=mock_response,
- logging=logging,
- acompletion=acompletion,
- )
- if custom_llm_provider == "azure":
- # azure configs
- api_type = get_secret("AZURE_API_TYPE") or "azure"
-
- api_base = api_base or litellm.api_base or get_secret("AZURE_API_BASE")
-
- api_version = (
- api_version or litellm.api_version or get_secret("AZURE_API_VERSION")
- )
-
- api_key = (
- api_key
- or litellm.api_key
- or litellm.azure_key
- or get_secret("AZURE_OPENAI_API_KEY")
- or get_secret("AZURE_API_KEY")
- )
-
- azure_ad_token = optional_params.get("extra_body", {}).pop(
- "azure_ad_token", None
- ) or get_secret("AZURE_AD_TOKEN")
-
- headers = headers or litellm.headers
-
- ## LOAD CONFIG - if set
- config = litellm.AzureOpenAIConfig.get_config()
- for k, v in config.items():
- if (
- k not in optional_params
- ): # completion(top_k=3) > azure_config(top_k=3) <- allows for dynamic variables to be passed in
- optional_params[k] = v
-
- ## COMPLETION CALL
- response = azure_chat_completions.completion(
- model=model,
- messages=messages,
- headers=headers,
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- api_type=api_type,
- azure_ad_token=azure_ad_token,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- logging_obj=logging,
- acompletion=acompletion,
- timeout=timeout, # type: ignore
- client=client, # pass AsyncAzureOpenAI, AzureOpenAI client
- )
-
- if optional_params.get("stream", False) or acompletion == True:
- ## LOGGING
- logging.post_call(
- input=messages,
- api_key=api_key,
- original_response=response,
- additional_args={
- "headers": headers,
- "api_version": api_version,
- "api_base": api_base,
- },
- )
- elif custom_llm_provider == "azure_text":
- # azure configs
- api_type = get_secret("AZURE_API_TYPE") or "azure"
-
- api_base = api_base or litellm.api_base or get_secret("AZURE_API_BASE")
-
- api_version = (
- api_version or litellm.api_version or get_secret("AZURE_API_VERSION")
- )
-
- api_key = (
- api_key
- or litellm.api_key
- or litellm.azure_key
- or get_secret("AZURE_OPENAI_API_KEY")
- or get_secret("AZURE_API_KEY")
- )
-
- azure_ad_token = optional_params.get("extra_body", {}).pop(
- "azure_ad_token", None
- ) or get_secret("AZURE_AD_TOKEN")
-
- headers = headers or litellm.headers
-
- ## LOAD CONFIG - if set
- config = litellm.AzureOpenAIConfig.get_config()
- for k, v in config.items():
- if (
- k not in optional_params
- ): # completion(top_k=3) > azure_config(top_k=3) <- allows for dynamic variables to be passed in
- optional_params[k] = v
-
- ## COMPLETION CALL
- response = azure_text_completions.completion(
- model=model,
- messages=messages,
- headers=headers,
- api_key=api_key,
- api_base=api_base,
- api_version=api_version,
- api_type=api_type,
- azure_ad_token=azure_ad_token,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- logging_obj=logging,
- acompletion=acompletion,
- timeout=timeout,
- client=client, # pass AsyncAzureOpenAI, AzureOpenAI client
- )
-
- if optional_params.get("stream", False) or acompletion == True:
- ## LOGGING
- logging.post_call(
- input=messages,
- api_key=api_key,
- original_response=response,
- additional_args={
- "headers": headers,
- "api_version": api_version,
- "api_base": api_base,
- },
- )
- elif (
- model in litellm.open_ai_chat_completion_models
- or custom_llm_provider == "custom_openai"
- or custom_llm_provider == "deepinfra"
- or custom_llm_provider == "perplexity"
- or custom_llm_provider == "groq"
- or custom_llm_provider == "deepseek"
- or custom_llm_provider == "anyscale"
- or custom_llm_provider == "mistral"
- or custom_llm_provider == "openai"
- or custom_llm_provider == "together_ai"
- or custom_llm_provider in litellm.openai_compatible_providers
- or "ft:gpt-3.5-turbo" in model # finetune gpt-3.5-turbo
- ): # allow user to make an openai call with a custom base
- # note: if a user sets a custom base - we should ensure this works
- # allow for the setting of dynamic and stateful api-bases
- api_base = (
- api_base # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
- or litellm.api_base
- or get_secret("OPENAI_API_BASE")
- or "https://api.openai.com/v1"
- )
- openai.organization = (
- organization
- or litellm.organization
- or get_secret("OPENAI_ORGANIZATION")
- or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105
- )
- # set API KEY
- api_key = (
- api_key
- or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there
- or litellm.openai_key
- or get_secret("OPENAI_API_KEY")
- )
-
- headers = headers or litellm.headers
-
- ## LOAD CONFIG - if set
- config = litellm.OpenAIConfig.get_config()
- for k, v in config.items():
- if (
- k not in optional_params
- ): # completion(top_k=3) > openai_config(top_k=3) <- allows for dynamic variables to be passed in
- optional_params[k] = v
-
- ## COMPLETION CALL
- try:
- response = openai_chat_completions.completion(
- model=model,
- messages=messages,
- headers=headers,
- model_response=model_response,
- print_verbose=print_verbose,
- api_key=api_key,
- api_base=api_base,
- acompletion=acompletion,
- logging_obj=logging,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- timeout=timeout, # type: ignore
- custom_prompt_dict=custom_prompt_dict,
- client=client, # pass AsyncOpenAI, OpenAI client
- organization=organization,
- custom_llm_provider=custom_llm_provider,
- )
- except Exception as e:
- ## LOGGING - log the original exception returned
- logging.post_call(
- input=messages,
- api_key=api_key,
- original_response=str(e),
- additional_args={"headers": headers},
- )
- raise e
-
- if optional_params.get("stream", False):
- ## LOGGING
- logging.post_call(
- input=messages,
- api_key=api_key,
- original_response=response,
- additional_args={"headers": headers},
- )
- elif (
- custom_llm_provider == "text-completion-openai"
- or "ft:babbage-002" in model
- or "ft:davinci-002" in model # support for finetuned completion models
- ):
- openai.api_type = "openai"
-
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("OPENAI_API_BASE")
- or "https://api.openai.com/v1"
- )
-
- openai.api_version = None
- # set API KEY
-
- api_key = (
- api_key
- or litellm.api_key
- or litellm.openai_key
- or get_secret("OPENAI_API_KEY")
- )
-
- headers = headers or litellm.headers
-
- ## LOAD CONFIG - if set
- config = litellm.OpenAITextCompletionConfig.get_config()
- for k, v in config.items():
- if (
- k not in optional_params
- ): # completion(top_k=3) > openai_text_config(top_k=3) <- allows for dynamic variables to be passed in
- optional_params[k] = v
- if litellm.organization:
- openai.organization = litellm.organization
-
- if (
- len(messages) > 0
- and "content" in messages[0]
- and type(messages[0]["content"]) == list
- ):
- # text-davinci-003 can accept a string or array, if it's an array, assume the array is set in messages[0]['content']
- # https://platform.openai.com/docs/api-reference/completions/create
- prompt = messages[0]["content"]
- else:
- prompt = " ".join([message["content"] for message in messages]) # type: ignore
-
- ## COMPLETION CALL
- _response = openai_text_completions.completion(
- model=model,
- messages=messages,
- model_response=model_response,
- print_verbose=print_verbose,
- api_key=api_key,
- api_base=api_base,
- acompletion=acompletion,
- client=client, # pass AsyncOpenAI, OpenAI client
- logging_obj=logging,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- timeout=timeout, # type: ignore
- )
-
- if (
- optional_params.get("stream", False) == False
- and acompletion == False
- and text_completion == False
- ):
- # convert to chat completion response
- _response = litellm.OpenAITextCompletionConfig().convert_to_chat_model_response_object(
- response_object=_response, model_response_object=model_response
- )
-
- if optional_params.get("stream", False) or acompletion == True:
- ## LOGGING
- logging.post_call(
- input=messages,
- api_key=api_key,
- original_response=_response,
- additional_args={"headers": headers},
- )
- response = _response
- elif (
- "replicate" in model
- or custom_llm_provider == "replicate"
- or model in litellm.replicate_models
- ):
- # Setting the relevant API KEY for replicate, replicate defaults to using os.environ.get("REPLICATE_API_TOKEN")
- replicate_key = None
- replicate_key = (
- api_key
- or litellm.replicate_key
- or litellm.api_key
- or get_secret("REPLICATE_API_KEY")
- or get_secret("REPLICATE_API_TOKEN")
- )
-
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("REPLICATE_API_BASE")
- or "https://api.replicate.com/v1"
- )
-
- custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict
-
- model_response = replicate.completion( # type: ignore
- model=model,
- messages=messages,
- api_base=api_base,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding, # for calculating input/output tokens
- api_key=replicate_key,
- logging_obj=logging,
- custom_prompt_dict=custom_prompt_dict,
- acompletion=acompletion,
- )
-
- if optional_params.get("stream", False) == True:
- ## LOGGING
- logging.post_call(
- input=messages,
- api_key=replicate_key,
- original_response=model_response,
- )
-
- response = model_response
- elif (
- "clarifai" in model
- or custom_llm_provider == "clarifai"
- or model in litellm.clarifai_models
- ):
- clarifai_key = None
- clarifai_key = (
- api_key
- or litellm.clarifai_key
- or litellm.api_key
- or get_secret("CLARIFAI_API_KEY")
- or get_secret("CLARIFAI_API_TOKEN")
- )
-
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("CLARIFAI_API_BASE")
- or "https://api.clarifai.com/v2"
- )
-
- custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict
- model_response = clarifai.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- acompletion=acompletion,
- logger_fn=logger_fn,
- encoding=encoding, # for calculating input/output tokens
- api_key=clarifai_key,
- logging_obj=logging,
- custom_prompt_dict=custom_prompt_dict,
- )
-
- if "stream" in optional_params and optional_params["stream"] == True:
- # don't try to access stream object,
- ## LOGGING
- logging.post_call(
- input=messages,
- api_key=api_key,
- original_response=model_response,
- )
-
- if optional_params.get("stream", False) or acompletion == True:
- ## LOGGING
- logging.post_call(
- input=messages,
- api_key=clarifai_key,
- original_response=model_response,
- )
- response = model_response
-
- elif custom_llm_provider == "anthropic":
- api_key = (
- api_key
- or litellm.anthropic_key
- or litellm.api_key
- or os.environ.get("ANTHROPIC_API_KEY")
- )
- custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict
-
- if (model == "claude-2") or (model == "claude-instant-1"):
- # call anthropic /completion, only use this route for claude-2, claude-instant-1
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("ANTHROPIC_API_BASE")
- or "https://api.anthropic.com/v1/complete"
- )
- response = anthropic_text_completions.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- acompletion=acompletion,
- custom_prompt_dict=litellm.custom_prompt_dict,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding, # for calculating input/output tokens
- api_key=api_key,
- logging_obj=logging,
- headers=headers,
- )
- else:
- # call /messages
- # default route for all anthropic models
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("ANTHROPIC_API_BASE")
- or "https://api.anthropic.com/v1/messages"
- )
- response = anthropic_chat_completions.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- acompletion=acompletion,
- custom_prompt_dict=litellm.custom_prompt_dict,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding, # for calculating input/output tokens
- api_key=api_key,
- logging_obj=logging,
- headers=headers,
- )
- if optional_params.get("stream", False) or acompletion == True:
- ## LOGGING
- logging.post_call(
- input=messages,
- api_key=api_key,
- original_response=response,
- )
- response = response
- elif custom_llm_provider == "nlp_cloud":
- nlp_cloud_key = (
- api_key
- or litellm.nlp_cloud_key
- or get_secret("NLP_CLOUD_API_KEY")
- or litellm.api_key
- )
-
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("NLP_CLOUD_API_BASE")
- or "https://api.nlpcloud.io/v1/gpu/"
- )
-
- response = nlp_cloud.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- api_key=nlp_cloud_key,
- logging_obj=logging,
- )
-
- if "stream" in optional_params and optional_params["stream"] == True:
- # don't try to access stream object,
- response = CustomStreamWrapper(
- response,
- model,
- custom_llm_provider="nlp_cloud",
- logging_obj=logging,
- )
-
- if optional_params.get("stream", False) or acompletion == True:
- ## LOGGING
- logging.post_call(
- input=messages,
- api_key=api_key,
- original_response=response,
- )
-
- response = response
- elif custom_llm_provider == "aleph_alpha":
- aleph_alpha_key = (
- api_key
- or litellm.aleph_alpha_key
- or get_secret("ALEPH_ALPHA_API_KEY")
- or get_secret("ALEPHALPHA_API_KEY")
- or litellm.api_key
- )
-
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("ALEPH_ALPHA_API_BASE")
- or "https://api.aleph-alpha.com/complete"
- )
-
- model_response = aleph_alpha.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- default_max_tokens_to_sample=litellm.max_tokens,
- api_key=aleph_alpha_key,
- logging_obj=logging, # model call logging done inside the class as we make need to modify I/O to fit aleph alpha's requirements
- )
-
- if "stream" in optional_params and optional_params["stream"] == True:
- # don't try to access stream object,
- response = CustomStreamWrapper(
- model_response,
- model,
- custom_llm_provider="aleph_alpha",
- logging_obj=logging,
- )
- return response
- response = model_response
- elif custom_llm_provider == "cohere":
- cohere_key = (
- api_key
- or litellm.cohere_key
- or get_secret("COHERE_API_KEY")
- or get_secret("CO_API_KEY")
- or litellm.api_key
- )
-
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("COHERE_API_BASE")
- or "https://api.cohere.ai/v1/generate"
- )
-
- model_response = cohere.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- api_key=cohere_key,
- logging_obj=logging, # model call logging done inside the class as we make need to modify I/O to fit aleph alpha's requirements
- )
-
- if "stream" in optional_params and optional_params["stream"] == True:
- # don't try to access stream object,
- response = CustomStreamWrapper(
- model_response,
- model,
- custom_llm_provider="cohere",
- logging_obj=logging,
- )
- return response
- response = model_response
- elif custom_llm_provider == "cohere_chat":
- cohere_key = (
- api_key
- or litellm.cohere_key
- or get_secret("COHERE_API_KEY")
- or get_secret("CO_API_KEY")
- or litellm.api_key
- )
-
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("COHERE_API_BASE")
- or "https://api.cohere.ai/v1/chat"
- )
-
- model_response = cohere_chat.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- api_key=cohere_key,
- logging_obj=logging, # model call logging done inside the class as we make need to modify I/O to fit aleph alpha's requirements
- )
-
- if "stream" in optional_params and optional_params["stream"] == True:
- # don't try to access stream object,
- response = CustomStreamWrapper(
- model_response,
- model,
- custom_llm_provider="cohere_chat",
- logging_obj=logging,
- )
- return response
- response = model_response
- elif custom_llm_provider == "maritalk":
- maritalk_key = (
- api_key
- or litellm.maritalk_key
- or get_secret("MARITALK_API_KEY")
- or litellm.api_key
- )
-
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("MARITALK_API_BASE")
- or "https://chat.maritaca.ai/api/chat/inference"
- )
-
- model_response = maritalk.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- api_key=maritalk_key,
- logging_obj=logging,
- )
-
- if "stream" in optional_params and optional_params["stream"] == True:
- # don't try to access stream object,
- response = CustomStreamWrapper(
- model_response,
- model,
- custom_llm_provider="maritalk",
- logging_obj=logging,
- )
- return response
- response = model_response
- elif custom_llm_provider == "huggingface":
- custom_llm_provider = "huggingface"
- huggingface_key = (
- api_key
- or litellm.huggingface_key
- or os.environ.get("HF_TOKEN")
- or os.environ.get("HUGGINGFACE_API_KEY")
- or litellm.api_key
- )
- hf_headers = headers or litellm.headers
-
- custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict
- model_response = huggingface.completion(
- model=model,
- messages=messages,
- api_base=api_base, # type: ignore
- headers=hf_headers,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- api_key=huggingface_key,
- acompletion=acompletion,
- logging_obj=logging,
- custom_prompt_dict=custom_prompt_dict,
- timeout=timeout, # type: ignore
- )
- if (
- "stream" in optional_params
- and optional_params["stream"] == True
- and acompletion is False
- ):
- # don't try to access stream object,
- response = CustomStreamWrapper(
- model_response,
- model,
- custom_llm_provider="huggingface",
- logging_obj=logging,
- )
- return response
- response = model_response
- elif custom_llm_provider == "oobabooga":
- custom_llm_provider = "oobabooga"
- model_response = oobabooga.completion(
- model=model,
- messages=messages,
- model_response=model_response,
- api_base=api_base, # type: ignore
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- api_key=None,
- logger_fn=logger_fn,
- encoding=encoding,
- logging_obj=logging,
- )
- if "stream" in optional_params and optional_params["stream"] == True:
- # don't try to access stream object,
- response = CustomStreamWrapper(
- model_response,
- model,
- custom_llm_provider="oobabooga",
- logging_obj=logging,
- )
- return response
- response = model_response
- elif custom_llm_provider == "openrouter":
- api_base = api_base or litellm.api_base or "https://openrouter.ai/api/v1"
-
- api_key = (
- api_key
- or litellm.api_key
- or litellm.openrouter_key
- or get_secret("OPENROUTER_API_KEY")
- or get_secret("OR_API_KEY")
- )
-
- openrouter_site_url = get_secret("OR_SITE_URL") or "https://litellm.ai"
-
- openrouter_app_name = get_secret("OR_APP_NAME") or "liteLLM"
-
- headers = (
- headers
- or litellm.headers
- or {
- "HTTP-Referer": openrouter_site_url,
- "X-Title": openrouter_app_name,
- }
- )
-
- ## Load Config
- config = openrouter.OpenrouterConfig.get_config()
- for k, v in config.items():
- if k == "extra_body":
- # we use openai 'extra_body' to pass openrouter specific params - transforms, route, models
- if "extra_body" in optional_params:
- optional_params[k].update(v)
- else:
- optional_params[k] = v
- elif k not in optional_params:
- optional_params[k] = v
-
- data = {"model": model, "messages": messages, **optional_params}
-
- ## COMPLETION CALL
- response = openai_chat_completions.completion(
- model=model,
- messages=messages,
- headers=headers,
- api_key=api_key,
- api_base=api_base,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- logging_obj=logging,
- acompletion=acompletion,
- timeout=timeout, # type: ignore
- )
- ## LOGGING
- logging.post_call(
- input=messages, api_key=openai.api_key, original_response=response
- )
- elif (
- custom_llm_provider == "together_ai"
- or ("togethercomputer" in model)
- or (model in litellm.together_ai_models)
- ):
- """
- Deprecated. We now do together ai calls via the openai client - https://docs.together.ai/docs/openai-api-compatibility
- """
- custom_llm_provider = "together_ai"
- together_ai_key = (
- api_key
- or litellm.togetherai_api_key
- or get_secret("TOGETHER_AI_TOKEN")
- or get_secret("TOGETHERAI_API_KEY")
- or litellm.api_key
- )
-
- api_base = (
- api_base
- or litellm.api_base
- or get_secret("TOGETHERAI_API_BASE")
- or "https://api.together.xyz/inference"
- )
-
- custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict
-
- model_response = together_ai.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- api_key=together_ai_key,
- logging_obj=logging,
- custom_prompt_dict=custom_prompt_dict,
- )
- if (
- "stream_tokens" in optional_params
- and optional_params["stream_tokens"] == True
- ):
- # don't try to access stream object,
- response = CustomStreamWrapper(
- model_response,
- model,
- custom_llm_provider="together_ai",
- logging_obj=logging,
- )
- return response
- response = model_response
- elif custom_llm_provider == "palm":
- palm_api_key = api_key or get_secret("PALM_API_KEY") or litellm.api_key
-
- # palm does not support streaming as yet :(
- model_response = palm.completion(
- model=model,
- messages=messages,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- api_key=palm_api_key,
- logging_obj=logging,
- )
- # fake palm streaming
- if "stream" in optional_params and optional_params["stream"] == True:
- # fake streaming for palm
- resp_string = model_response["choices"][0]["message"]["content"]
- response = CustomStreamWrapper(
- resp_string, model, custom_llm_provider="palm", logging_obj=logging
- )
- return response
- response = model_response
- elif custom_llm_provider == "gemini":
- gemini_api_key = (
- api_key
- or get_secret("GEMINI_API_KEY")
- or get_secret("PALM_API_KEY") # older palm api key should also work
- or litellm.api_key
- )
-
- # palm does not support streaming as yet :(
- model_response = gemini.completion(
- model=model,
- messages=messages,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- api_key=gemini_api_key,
- logging_obj=logging,
- acompletion=acompletion,
- custom_prompt_dict=custom_prompt_dict,
- )
- if (
- "stream" in optional_params
- and optional_params["stream"] == True
- and acompletion == False
- ):
- response = CustomStreamWrapper(
- iter(model_response),
- model,
- custom_llm_provider="gemini",
- logging_obj=logging,
- )
- return response
- response = model_response
- elif custom_llm_provider == "vertex_ai":
- vertex_ai_project = (
- optional_params.pop("vertex_project", None)
- or optional_params.pop("vertex_ai_project", None)
- or litellm.vertex_project
- or get_secret("VERTEXAI_PROJECT")
- )
- vertex_ai_location = (
- optional_params.pop("vertex_location", None)
- or optional_params.pop("vertex_ai_location", None)
- or litellm.vertex_location
- or get_secret("VERTEXAI_LOCATION")
- )
- vertex_credentials = (
- optional_params.pop("vertex_credentials", None)
- or optional_params.pop("vertex_ai_credentials", None)
- or get_secret("VERTEXAI_CREDENTIALS")
- )
- new_params = deepcopy(optional_params)
- if "claude-3" in model:
- model_response = vertex_ai_anthropic.completion(
- model=model,
- messages=messages,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=new_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- vertex_location=vertex_ai_location,
- vertex_project=vertex_ai_project,
- vertex_credentials=vertex_credentials,
- logging_obj=logging,
- acompletion=acompletion,
- )
- else:
-> model_response = vertex_ai.completion(
- model=model,
- messages=messages,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=new_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding,
- vertex_location=vertex_ai_location,
- vertex_project=vertex_ai_project,
- vertex_credentials=vertex_credentials,
- logging_obj=logging,
- acompletion=acompletion,
- )
-
-../main.py:1824:
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-
-model = 'gemini-1.5-flash-preview-0514'
-messages = [{'content': [{'text': 'Whats in this image?', 'type': 'text'}, {'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}, 'type': 'image_url'}], 'role': 'user'}]
-model_response = ModelResponse(id='chatcmpl-722df0e7-4e2d-44e6-9e2c-49823faa0189', choices=[Choices(finish_reason='stop', index=0, mess... role='assistant'))], created=1716145725, model=None, object='chat.completion', system_fingerprint=None, usage=Usage())
-print_verbose =
-encoding =
-logging_obj =
-vertex_project = None, vertex_location = None, vertex_credentials = None
-optional_params = {}
-litellm_params = {'acompletion': False, 'api_base': '', 'api_key': None, 'completion_call_id': None, ...}
-logger_fn = None, acompletion = False
-
- def completion(
- model: str,
- messages: list,
- model_response: ModelResponse,
- print_verbose: Callable,
- encoding,
- logging_obj,
- vertex_project=None,
- vertex_location=None,
- vertex_credentials=None,
- optional_params=None,
- litellm_params=None,
- logger_fn=None,
- acompletion: bool = False,
- ):
- try:
- import vertexai
- except:
- raise VertexAIError(
- status_code=400,
- message="vertexai import failed please run `pip install google-cloud-aiplatform`",
- )
-
- if not (
- hasattr(vertexai, "preview") or hasattr(vertexai.preview, "language_models")
- ):
- raise VertexAIError(
- status_code=400,
- message="""Upgrade vertex ai. Run `pip install "google-cloud-aiplatform>=1.38"`""",
- )
- try:
- from vertexai.preview.language_models import (
- ChatModel,
- CodeChatModel,
- InputOutputTextPair,
- )
- from vertexai.language_models import TextGenerationModel, CodeGenerationModel
- from vertexai.preview.generative_models import (
- GenerativeModel,
- Part,
- GenerationConfig,
- )
- from google.cloud import aiplatform # type: ignore
- from google.protobuf import json_format # type: ignore
- from google.protobuf.struct_pb2 import Value # type: ignore
- from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types # type: ignore
- import google.auth # type: ignore
- import proto # type: ignore
-
- ## Load credentials with the correct quota project ref: https://github.com/googleapis/python-aiplatform/issues/2557#issuecomment-1709284744
- print_verbose(
- f"VERTEX AI: vertex_project={vertex_project}; vertex_location={vertex_location}"
- )
- if vertex_credentials is not None and isinstance(vertex_credentials, str):
- import google.oauth2.service_account
-
- json_obj = json.loads(vertex_credentials)
-
- creds = google.oauth2.service_account.Credentials.from_service_account_info(
- json_obj,
- scopes=["https://www.googleapis.com/auth/cloud-platform"],
- )
- else:
- creds, _ = google.auth.default(quota_project_id=vertex_project)
- print_verbose(
- f"VERTEX AI: creds={creds}; google application credentials: {os.getenv('GOOGLE_APPLICATION_CREDENTIALS')}"
- )
- vertexai.init(
- project=vertex_project, location=vertex_location, credentials=creds
- )
-
- ## Load Config
- config = litellm.VertexAIConfig.get_config()
- for k, v in config.items():
- if k not in optional_params:
- optional_params[k] = v
-
- ## Process safety settings into format expected by vertex AI
- safety_settings = None
- if "safety_settings" in optional_params:
- safety_settings = optional_params.pop("safety_settings")
- if not isinstance(safety_settings, list):
- raise ValueError("safety_settings must be a list")
- if len(safety_settings) > 0 and not isinstance(safety_settings[0], dict):
- raise ValueError("safety_settings must be a list of dicts")
- safety_settings = [
- gapic_content_types.SafetySetting(x) for x in safety_settings
- ]
-
- # vertexai does not use an API key, it looks for credentials.json in the environment
-
- prompt = " ".join(
- [
- message["content"]
- for message in messages
- if isinstance(message["content"], str)
- ]
- )
-
- mode = ""
-
- request_str = ""
- response_obj = None
- async_client = None
- instances = None
- client_options = {
- "api_endpoint": f"{vertex_location}-aiplatform.googleapis.com"
- }
- if (
- model in litellm.vertex_language_models
- or model in litellm.vertex_vision_models
- ):
- llm_model = GenerativeModel(model)
- mode = "vision"
- request_str += f"llm_model = GenerativeModel({model})\n"
- elif model in litellm.vertex_chat_models:
- llm_model = ChatModel.from_pretrained(model)
- mode = "chat"
- request_str += f"llm_model = ChatModel.from_pretrained({model})\n"
- elif model in litellm.vertex_text_models:
- llm_model = TextGenerationModel.from_pretrained(model)
- mode = "text"
- request_str += f"llm_model = TextGenerationModel.from_pretrained({model})\n"
- elif model in litellm.vertex_code_text_models:
- llm_model = CodeGenerationModel.from_pretrained(model)
- mode = "text"
- request_str += f"llm_model = CodeGenerationModel.from_pretrained({model})\n"
- elif model in litellm.vertex_code_chat_models: # vertex_code_llm_models
- llm_model = CodeChatModel.from_pretrained(model)
- mode = "chat"
- request_str += f"llm_model = CodeChatModel.from_pretrained({model})\n"
- elif model == "private":
- mode = "private"
- model = optional_params.pop("model_id", None)
- # private endpoint requires a dict instead of JSON
- instances = [optional_params.copy()]
- instances[0]["prompt"] = prompt
- llm_model = aiplatform.PrivateEndpoint(
- endpoint_name=model,
- project=vertex_project,
- location=vertex_location,
- )
- request_str += f"llm_model = aiplatform.PrivateEndpoint(endpoint_name={model}, project={vertex_project}, location={vertex_location})\n"
- else: # assume vertex model garden on public endpoint
- mode = "custom"
-
- instances = [optional_params.copy()]
- instances[0]["prompt"] = prompt
- instances = [
- json_format.ParseDict(instance_dict, Value())
- for instance_dict in instances
- ]
- # Will determine the API used based on async parameter
- llm_model = None
-
- # NOTE: async prediction and streaming under "private" mode isn't supported by aiplatform right now
- if acompletion == True:
- data = {
- "llm_model": llm_model,
- "mode": mode,
- "prompt": prompt,
- "logging_obj": logging_obj,
- "request_str": request_str,
- "model": model,
- "model_response": model_response,
- "encoding": encoding,
- "messages": messages,
- "print_verbose": print_verbose,
- "client_options": client_options,
- "instances": instances,
- "vertex_location": vertex_location,
- "vertex_project": vertex_project,
- "safety_settings": safety_settings,
- **optional_params,
- }
- if optional_params.get("stream", False) is True:
- # async streaming
- return async_streaming(**data)
-
- return async_completion(**data)
-
- if mode == "vision":
- print_verbose("\nMaking VertexAI Gemini Pro / Pro Vision Call")
- print_verbose(f"\nProcessing input messages = {messages}")
- tools = optional_params.pop("tools", None)
- content = _gemini_convert_messages_text(messages=messages)
- stream = optional_params.pop("stream", False)
- if stream == True:
- request_str += f"response = llm_model.generate_content({content}, generation_config=GenerationConfig(**{optional_params}), safety_settings={safety_settings}, stream={stream})\n"
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
-
- model_response = llm_model.generate_content(
- contents={"content": content},
- generation_config=optional_params,
- safety_settings=safety_settings,
- stream=True,
- tools=tools,
- )
-
- return model_response
-
- request_str += f"response = llm_model.generate_content({content})\n"
- ## LOGGING
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
-
- ## LLM Call
- response = llm_model.generate_content(
- contents=content,
- generation_config=optional_params,
- safety_settings=safety_settings,
- tools=tools,
- )
-
- if tools is not None and bool(
- getattr(response.candidates[0].content.parts[0], "function_call", None)
- ):
- function_call = response.candidates[0].content.parts[0].function_call
- args_dict = {}
-
- # Check if it's a RepeatedComposite instance
- for key, val in function_call.args.items():
- if isinstance(
- val, proto.marshal.collections.repeated.RepeatedComposite
- ):
- # If so, convert to list
- args_dict[key] = [v for v in val]
- else:
- args_dict[key] = val
-
- try:
- args_str = json.dumps(args_dict)
- except Exception as e:
- raise VertexAIError(status_code=422, message=str(e))
- message = litellm.Message(
- content=None,
- tool_calls=[
- {
- "id": f"call_{str(uuid.uuid4())}",
- "function": {
- "arguments": args_str,
- "name": function_call.name,
- },
- "type": "function",
- }
- ],
- )
- completion_response = message
- else:
- completion_response = response.text
- response_obj = response._raw_response
- optional_params["tools"] = tools
- elif mode == "chat":
- chat = llm_model.start_chat()
- request_str += f"chat = llm_model.start_chat()\n"
-
- if "stream" in optional_params and optional_params["stream"] == True:
- # NOTE: VertexAI does not accept stream=True as a param and raises an error,
- # we handle this by removing 'stream' from optional params and sending the request
- # after we get the response we add optional_params["stream"] = True, since main.py needs to know it's a streaming response to then transform it for the OpenAI format
- optional_params.pop(
- "stream", None
- ) # vertex ai raises an error when passing stream in optional params
- request_str += (
- f"chat.send_message_streaming({prompt}, **{optional_params})\n"
- )
- ## LOGGING
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
- model_response = chat.send_message_streaming(prompt, **optional_params)
-
- return model_response
-
- request_str += f"chat.send_message({prompt}, **{optional_params}).text\n"
- ## LOGGING
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
- completion_response = chat.send_message(prompt, **optional_params).text
- elif mode == "text":
- if "stream" in optional_params and optional_params["stream"] == True:
- optional_params.pop(
- "stream", None
- ) # See note above on handling streaming for vertex ai
- request_str += (
- f"llm_model.predict_streaming({prompt}, **{optional_params})\n"
- )
- ## LOGGING
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
- model_response = llm_model.predict_streaming(prompt, **optional_params)
-
- return model_response
-
- request_str += f"llm_model.predict({prompt}, **{optional_params}).text\n"
- ## LOGGING
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
- completion_response = llm_model.predict(prompt, **optional_params).text
- elif mode == "custom":
- """
- Vertex AI Model Garden
- """
- ## LOGGING
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
- llm_model = aiplatform.gapic.PredictionServiceClient(
- client_options=client_options
- )
- request_str += f"llm_model = aiplatform.gapic.PredictionServiceClient(client_options={client_options})\n"
- endpoint_path = llm_model.endpoint_path(
- project=vertex_project, location=vertex_location, endpoint=model
- )
- request_str += (
- f"llm_model.predict(endpoint={endpoint_path}, instances={instances})\n"
- )
- response = llm_model.predict(
- endpoint=endpoint_path, instances=instances
- ).predictions
-
- completion_response = response[0]
- if (
- isinstance(completion_response, str)
- and "\nOutput:\n" in completion_response
- ):
- completion_response = completion_response.split("\nOutput:\n", 1)[1]
- if "stream" in optional_params and optional_params["stream"] == True:
- response = TextStreamer(completion_response)
- return response
- elif mode == "private":
- """
- Vertex AI Model Garden deployed on private endpoint
- """
- ## LOGGING
- logging_obj.pre_call(
- input=prompt,
- api_key=None,
- additional_args={
- "complete_input_dict": optional_params,
- "request_str": request_str,
- },
- )
- request_str += f"llm_model.predict(instances={instances})\n"
- response = llm_model.predict(instances=instances).predictions
-
- completion_response = response[0]
- if (
- isinstance(completion_response, str)
- and "\nOutput:\n" in completion_response
- ):
- completion_response = completion_response.split("\nOutput:\n", 1)[1]
- if "stream" in optional_params and optional_params["stream"] == True:
- response = TextStreamer(completion_response)
- return response
-
- ## LOGGING
- logging_obj.post_call(
- input=prompt, api_key=None, original_response=completion_response
- )
-
- ## RESPONSE OBJECT
- if isinstance(completion_response, litellm.Message):
- model_response["choices"][0]["message"] = completion_response
- elif len(str(completion_response)) > 0:
- model_response["choices"][0]["message"]["content"] = str(
- completion_response
- )
- model_response["created"] = int(time.time())
- model_response["model"] = model
- ## CALCULATING USAGE
- if model in litellm.vertex_language_models and response_obj is not None:
- model_response["choices"][0].finish_reason = map_finish_reason(
- response_obj.candidates[0].finish_reason.name
- )
- usage = Usage(
- prompt_tokens=response_obj.usage_metadata.prompt_token_count,
- completion_tokens=response_obj.usage_metadata.candidates_token_count,
- total_tokens=response_obj.usage_metadata.total_token_count,
- )
- else:
- # init prompt tokens
- # this block attempts to get usage from response_obj if it exists, if not it uses the litellm token counter
- prompt_tokens, completion_tokens, total_tokens = 0, 0, 0
- if response_obj is not None:
- if hasattr(response_obj, "usage_metadata") and hasattr(
- response_obj.usage_metadata, "prompt_token_count"
- ):
- prompt_tokens = response_obj.usage_metadata.prompt_token_count
- completion_tokens = (
- response_obj.usage_metadata.candidates_token_count
- )
- else:
- prompt_tokens = len(encoding.encode(prompt))
- completion_tokens = len(
- encoding.encode(
- model_response["choices"][0]["message"].get("content", "")
- )
- )
-
- usage = Usage(
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
- total_tokens=prompt_tokens + completion_tokens,
- )
- setattr(model_response, "usage", usage)
- return model_response
- except Exception as e:
- if isinstance(e, VertexAIError):
- raise e
-> raise VertexAIError(status_code=500, message=str(e))
-E litellm.llms.vertex_ai.VertexAIError: Parameter to MergeFrom() must be instance of same class: expected got .
-
-../llms/vertex_ai.py:971: VertexAIError
-
-During handling of the above exception, another exception occurred:
-
-args = ()
-kwargs = {'litellm_call_id': '7f48b7ab-47b3-4beb-b2b5-fa298be49d3f', 'litellm_logging_obj':
-call_type = 'completion', model = 'vertex_ai/gemini-1.5-flash-preview-0514'
-k = 'litellm_logging_obj'
-
- @wraps(original_function)
- def wrapper(*args, **kwargs):
- # DO NOT MOVE THIS. It always needs to run first
- # Check if this is an async function. If so only execute the async function
- if (
- kwargs.get("acompletion", False) == True
- or kwargs.get("aembedding", False) == True
- or kwargs.get("aimg_generation", False) == True
- or kwargs.get("amoderation", False) == True
- or kwargs.get("atext_completion", False) == True
- or kwargs.get("atranscription", False) == True
- ):
- # [OPTIONAL] CHECK MAX RETRIES / REQUEST
- if litellm.num_retries_per_request is not None:
- # check if previous_models passed in as ['litellm_params']['metadata]['previous_models']
- previous_models = kwargs.get("metadata", {}).get(
- "previous_models", None
- )
- if previous_models is not None:
- if litellm.num_retries_per_request <= len(previous_models):
- raise Exception(f"Max retries per request hit!")
-
- # MODEL CALL
- result = original_function(*args, **kwargs)
- if "stream" in kwargs and kwargs["stream"] == True:
- if (
- "complete_response" in kwargs
- and kwargs["complete_response"] == True
- ):
- chunks = []
- for idx, chunk in enumerate(result):
- chunks.append(chunk)
- return litellm.stream_chunk_builder(
- chunks, messages=kwargs.get("messages", None)
- )
- else:
- return result
-
- return result
-
- # Prints Exactly what was passed to litellm function - don't execute any logic here - it should just print
- print_args_passed_to_litellm(original_function, args, kwargs)
- start_time = datetime.datetime.now()
- result = None
- logging_obj = kwargs.get("litellm_logging_obj", None)
-
- # only set litellm_call_id if its not in kwargs
- call_type = original_function.__name__
- if "litellm_call_id" not in kwargs:
- kwargs["litellm_call_id"] = str(uuid.uuid4())
- try:
- model = args[0] if len(args) > 0 else kwargs["model"]
- except:
- model = None
- if (
- call_type != CallTypes.image_generation.value
- and call_type != CallTypes.text_completion.value
- ):
- raise ValueError("model param not passed in.")
-
- try:
- if logging_obj is None:
- logging_obj, kwargs = function_setup(
- original_function.__name__, rules_obj, start_time, *args, **kwargs
- )
- kwargs["litellm_logging_obj"] = logging_obj
-
- # CHECK FOR 'os.environ/' in kwargs
- for k, v in kwargs.items():
- if v is not None and isinstance(v, str) and v.startswith("os.environ/"):
- kwargs[k] = litellm.get_secret(v)
- # [OPTIONAL] CHECK BUDGET
- if litellm.max_budget:
- if litellm._current_cost > litellm.max_budget:
- raise BudgetExceededError(
- current_cost=litellm._current_cost,
- max_budget=litellm.max_budget,
- )
-
- # [OPTIONAL] CHECK MAX RETRIES / REQUEST
- if litellm.num_retries_per_request is not None:
- # check if previous_models passed in as ['litellm_params']['metadata]['previous_models']
- previous_models = kwargs.get("metadata", {}).get(
- "previous_models", None
- )
- if previous_models is not None:
- if litellm.num_retries_per_request <= len(previous_models):
- raise Exception(f"Max retries per request hit!")
-
- # [OPTIONAL] CHECK CACHE
- print_verbose(
- f"SYNC kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}; kwargs.get('cache')['no-cache']: {kwargs.get('cache', {}).get('no-cache', False)}"
- )
- # if caching is false or cache["no-cache"]==True, don't run this
- if (
- (
- (
- (
- kwargs.get("caching", None) is None
- and litellm.cache is not None
- )
- or kwargs.get("caching", False) == True
- )
- and kwargs.get("cache", {}).get("no-cache", False) != True
- )
- and kwargs.get("aembedding", False) != True
- and kwargs.get("atext_completion", False) != True
- and kwargs.get("acompletion", False) != True
- and kwargs.get("aimg_generation", False) != True
- and kwargs.get("atranscription", False) != True
- ): # allow users to control returning cached responses from the completion function
- # checking cache
- print_verbose(f"INSIDE CHECKING CACHE")
- if (
- litellm.cache is not None
- and str(original_function.__name__)
- in litellm.cache.supported_call_types
- ):
- print_verbose(f"Checking Cache")
- preset_cache_key = litellm.cache.get_cache_key(*args, **kwargs)
- kwargs["preset_cache_key"] = (
- preset_cache_key # for streaming calls, we need to pass the preset_cache_key
- )
- cached_result = litellm.cache.get_cache(*args, **kwargs)
- if cached_result != None:
- if "detail" in cached_result:
- # implies an error occurred
- pass
- else:
- call_type = original_function.__name__
- print_verbose(
- f"Cache Response Object routing: call_type - {call_type}; cached_result instace: {type(cached_result)}"
- )
- if call_type == CallTypes.completion.value and isinstance(
- cached_result, dict
- ):
- cached_result = convert_to_model_response_object(
- response_object=cached_result,
- model_response_object=ModelResponse(),
- stream=kwargs.get("stream", False),
- )
-
- if kwargs.get("stream", False) == True:
- cached_result = CustomStreamWrapper(
- completion_stream=cached_result,
- model=model,
- custom_llm_provider="cached_response",
- logging_obj=logging_obj,
- )
- elif call_type == CallTypes.embedding.value and isinstance(
- cached_result, dict
- ):
- cached_result = convert_to_model_response_object(
- response_object=cached_result,
- response_type="embedding",
- )
-
- # LOG SUCCESS
- cache_hit = True
- end_time = datetime.datetime.now()
- (
- model,
- custom_llm_provider,
- dynamic_api_key,
- api_base,
- ) = litellm.get_llm_provider(
- model=model,
- custom_llm_provider=kwargs.get(
- "custom_llm_provider", None
- ),
- api_base=kwargs.get("api_base", None),
- api_key=kwargs.get("api_key", None),
- )
- print_verbose(
- f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}"
- )
- logging_obj.update_environment_variables(
- model=model,
- user=kwargs.get("user", None),
- optional_params={},
- litellm_params={
- "logger_fn": kwargs.get("logger_fn", None),
- "acompletion": False,
- "metadata": kwargs.get("metadata", {}),
- "model_info": kwargs.get("model_info", {}),
- "proxy_server_request": kwargs.get(
- "proxy_server_request", None
- ),
- "preset_cache_key": kwargs.get(
- "preset_cache_key", None
- ),
- "stream_response": kwargs.get(
- "stream_response", {}
- ),
- },
- input=kwargs.get("messages", ""),
- api_key=kwargs.get("api_key", None),
- original_response=str(cached_result),
- additional_args=None,
- stream=kwargs.get("stream", False),
- )
- threading.Thread(
- target=logging_obj.success_handler,
- args=(cached_result, start_time, end_time, cache_hit),
- ).start()
- return cached_result
-
- # CHECK MAX TOKENS
- if (
- kwargs.get("max_tokens", None) is not None
- and model is not None
- and litellm.modify_params
- == True # user is okay with params being modified
- and (
- call_type == CallTypes.acompletion.value
- or call_type == CallTypes.completion.value
- )
- ):
- try:
- base_model = model
- if kwargs.get("hf_model_name", None) is not None:
- base_model = f"huggingface/{kwargs.get('hf_model_name')}"
- max_output_tokens = (
- get_max_tokens(model=base_model) or 4096
- ) # assume min context window is 4k tokens
- user_max_tokens = kwargs.get("max_tokens")
- ## Scenario 1: User limit + prompt > model limit
- messages = None
- if len(args) > 1:
- messages = args[1]
- elif kwargs.get("messages", None):
- messages = kwargs["messages"]
- input_tokens = token_counter(model=base_model, messages=messages)
- input_tokens += max(
- 0.1 * input_tokens, 10
- ) # give at least a 10 token buffer. token counting can be imprecise.
- if input_tokens > max_output_tokens:
- pass # allow call to fail normally
- elif user_max_tokens + input_tokens > max_output_tokens:
- user_max_tokens = max_output_tokens - input_tokens
- print_verbose(f"user_max_tokens: {user_max_tokens}")
- kwargs["max_tokens"] = int(
- round(user_max_tokens)
- ) # make sure max tokens is always an int
- except Exception as e:
- print_verbose(f"Error while checking max token limit: {str(e)}")
- # MODEL CALL
-> result = original_function(*args, **kwargs)
-
-../utils.py:3211:
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-../main.py:2368: in completion
- raise exception_type(
-../utils.py:9709: in exception_type
- raise e
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-
-model = 'gemini-1.5-flash-preview-0514'
-original_exception = VertexAIError("Parameter to MergeFrom() must be instance of same class: expected got .")
-custom_llm_provider = 'vertex_ai'
-completion_kwargs = {'acompletion': False, 'api_base': None, 'api_key': None, 'api_version': None, ...}
-extra_kwargs = {'litellm_call_id': '7f48b7ab-47b3-4beb-b2b5-fa298be49d3f', 'litellm_logging_obj': }
-
- def exception_type(
- model,
- original_exception,
- custom_llm_provider,
- completion_kwargs={},
- extra_kwargs={},
- ):
- global user_logger_fn, liteDebuggerClient
- exception_mapping_worked = False
- if litellm.suppress_debug_info is False:
- print() # noqa
- print( # noqa
- "\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m" # noqa
- ) # noqa
- print( # noqa
- "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'." # noqa
- ) # noqa
- print() # noqa
- try:
- if model:
- error_str = str(original_exception)
- if isinstance(original_exception, BaseException):
- exception_type = type(original_exception).__name__
- else:
- exception_type = ""
-
- ################################################################################
- # Common Extra information needed for all providers
- # We pass num retries, api_base, vertex_deployment etc to the exception here
- ################################################################################
- extra_information = ""
- try:
- _api_base = litellm.get_api_base(
- model=model, optional_params=extra_kwargs
- )
- messages = litellm.get_first_chars_messages(kwargs=completion_kwargs)
- _vertex_project = extra_kwargs.get("vertex_project")
- _vertex_location = extra_kwargs.get("vertex_location")
- _metadata = extra_kwargs.get("metadata", {}) or {}
- _model_group = _metadata.get("model_group")
- _deployment = _metadata.get("deployment")
- extra_information = f"\nModel: {model}"
- if _api_base:
- extra_information += f"\nAPI Base: {_api_base}"
- if messages and len(messages) > 0:
- extra_information += f"\nMessages: {messages}"
-
- if _model_group is not None:
- extra_information += f"\nmodel_group: {_model_group}\n"
- if _deployment is not None:
- extra_information += f"\ndeployment: {_deployment}\n"
- if _vertex_project is not None:
- extra_information += f"\nvertex_project: {_vertex_project}\n"
- if _vertex_location is not None:
- extra_information += f"\nvertex_location: {_vertex_location}\n"
-
- # on litellm proxy add key name + team to exceptions
- extra_information = _add_key_name_and_team_to_alert(
- request_info=extra_information, metadata=_metadata
- )
- except:
- # DO NOT LET this Block raising the original exception
- pass
-
- ################################################################################
- # End of Common Extra information Needed for all providers
- ################################################################################
-
- ################################################################################
- #################### Start of Provider Exception mapping ####################
- ################################################################################
-
- if "Request Timeout Error" in error_str or "Request timed out" in error_str:
- exception_mapping_worked = True
- raise Timeout(
- message=f"APITimeoutError - Request timed out. \nerror_str: {error_str}",
- model=model,
- llm_provider=custom_llm_provider,
- litellm_debug_info=extra_information,
- )
-
- if (
- custom_llm_provider == "openai"
- or custom_llm_provider == "text-completion-openai"
- or custom_llm_provider == "custom_openai"
- or custom_llm_provider in litellm.openai_compatible_providers
- ):
- # custom_llm_provider is openai, make it OpenAI
- if hasattr(original_exception, "message"):
- message = original_exception.message
- else:
- message = str(original_exception)
- if message is not None and isinstance(message, str):
- message = message.replace("OPENAI", custom_llm_provider.upper())
- message = message.replace("openai", custom_llm_provider)
- message = message.replace("OpenAI", custom_llm_provider)
- if custom_llm_provider == "openai":
- exception_provider = "OpenAI" + "Exception"
- else:
- exception_provider = (
- custom_llm_provider[0].upper()
- + custom_llm_provider[1:]
- + "Exception"
- )
-
- if "This model's maximum context length is" in error_str:
- exception_mapping_worked = True
- raise ContextWindowExceededError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif (
- "invalid_request_error" in error_str
- and "model_not_found" in error_str
- ):
- exception_mapping_worked = True
- raise NotFoundError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif (
- "invalid_request_error" in error_str
- and "content_policy_violation" in error_str
- ):
- exception_mapping_worked = True
- raise ContentPolicyViolationError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif (
- "invalid_request_error" in error_str
- and "Incorrect API key provided" not in error_str
- ):
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif "Request too large" in error_str:
- raise RateLimitError(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif (
- "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable"
- in error_str
- ):
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif "Mistral API raised a streaming error" in error_str:
- exception_mapping_worked = True
- _request = httpx.Request(
- method="POST", url="https://api.openai.com/v1"
- )
- raise APIError(
- status_code=500,
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- request=_request,
- litellm_debug_info=extra_information,
- )
- elif hasattr(original_exception, "status_code"):
- exception_mapping_worked = True
- if original_exception.status_code == 401:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 404:
- exception_mapping_worked = True
- raise NotFoundError(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 408:
- exception_mapping_worked = True
- raise Timeout(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 422:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 429:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 503:
- exception_mapping_worked = True
- raise ServiceUnavailableError(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 504: # gateway timeout error
- exception_mapping_worked = True
- raise Timeout(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- litellm_debug_info=extra_information,
- )
- else:
- exception_mapping_worked = True
- raise APIError(
- status_code=original_exception.status_code,
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- request=original_exception.request,
- litellm_debug_info=extra_information,
- )
- else:
- # if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors
- raise APIConnectionError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- litellm_debug_info=extra_information,
- request=httpx.Request(
- method="POST", url="https://api.openai.com/v1/"
- ),
- )
- elif custom_llm_provider == "anthropic": # one of the anthropics
- if hasattr(original_exception, "message"):
- if (
- "prompt is too long" in original_exception.message
- or "prompt: length" in original_exception.message
- ):
- exception_mapping_worked = True
- raise ContextWindowExceededError(
- message=original_exception.message,
- model=model,
- llm_provider="anthropic",
- response=original_exception.response,
- )
- if "Invalid API Key" in original_exception.message:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=original_exception.message,
- model=model,
- llm_provider="anthropic",
- response=original_exception.response,
- )
- if hasattr(original_exception, "status_code"):
- print_verbose(f"status_code: {original_exception.status_code}")
- if original_exception.status_code == 401:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"AnthropicException - {original_exception.message}",
- llm_provider="anthropic",
- model=model,
- response=original_exception.response,
- )
- elif (
- original_exception.status_code == 400
- or original_exception.status_code == 413
- ):
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"AnthropicException - {original_exception.message}",
- model=model,
- llm_provider="anthropic",
- response=original_exception.response,
- )
- elif original_exception.status_code == 408:
- exception_mapping_worked = True
- raise Timeout(
- message=f"AnthropicException - {original_exception.message}",
- model=model,
- llm_provider="anthropic",
- )
- elif original_exception.status_code == 429:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"AnthropicException - {original_exception.message}",
- llm_provider="anthropic",
- model=model,
- response=original_exception.response,
- )
- elif original_exception.status_code == 500:
- exception_mapping_worked = True
- raise APIError(
- status_code=500,
- message=f"AnthropicException - {original_exception.message}. Handle with `litellm.APIError`.",
- llm_provider="anthropic",
- model=model,
- request=original_exception.request,
- )
- elif custom_llm_provider == "replicate":
- if "Incorrect authentication token" in error_str:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"ReplicateException - {error_str}",
- llm_provider="replicate",
- model=model,
- response=original_exception.response,
- )
- elif "input is too long" in error_str:
- exception_mapping_worked = True
- raise ContextWindowExceededError(
- message=f"ReplicateException - {error_str}",
- model=model,
- llm_provider="replicate",
- response=original_exception.response,
- )
- elif exception_type == "ModelError":
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"ReplicateException - {error_str}",
- model=model,
- llm_provider="replicate",
- response=original_exception.response,
- )
- elif "Request was throttled" in error_str:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"ReplicateException - {error_str}",
- llm_provider="replicate",
- model=model,
- response=original_exception.response,
- )
- elif hasattr(original_exception, "status_code"):
- if original_exception.status_code == 401:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"ReplicateException - {original_exception.message}",
- llm_provider="replicate",
- model=model,
- response=original_exception.response,
- )
- elif (
- original_exception.status_code == 400
- or original_exception.status_code == 422
- or original_exception.status_code == 413
- ):
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"ReplicateException - {original_exception.message}",
- model=model,
- llm_provider="replicate",
- response=original_exception.response,
- )
- elif original_exception.status_code == 408:
- exception_mapping_worked = True
- raise Timeout(
- message=f"ReplicateException - {original_exception.message}",
- model=model,
- llm_provider="replicate",
- )
- elif original_exception.status_code == 429:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"ReplicateException - {original_exception.message}",
- llm_provider="replicate",
- model=model,
- response=original_exception.response,
- )
- elif original_exception.status_code == 500:
- exception_mapping_worked = True
- raise ServiceUnavailableError(
- message=f"ReplicateException - {original_exception.message}",
- llm_provider="replicate",
- model=model,
- response=original_exception.response,
- )
- exception_mapping_worked = True
- raise APIError(
- status_code=500,
- message=f"ReplicateException - {str(original_exception)}",
- llm_provider="replicate",
- model=model,
- request=httpx.Request(
- method="POST",
- url="https://api.replicate.com/v1/deployments",
- ),
- )
- elif custom_llm_provider == "watsonx":
- if "token_quota_reached" in error_str:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"WatsonxException: Rate Limit Errror - {error_str}",
- llm_provider="watsonx",
- model=model,
- response=original_exception.response,
- )
- elif custom_llm_provider == "predibase":
- if "authorization denied for" in error_str:
- exception_mapping_worked = True
-
- # Predibase returns the raw API Key in the response - this block ensures it's not returned in the exception
- if (
- error_str is not None
- and isinstance(error_str, str)
- and "bearer" in error_str.lower()
- ):
- # only keep the first 10 chars after the occurnence of "bearer"
- _bearer_token_start_index = error_str.lower().find("bearer")
- error_str = error_str[: _bearer_token_start_index + 14]
- error_str += "XXXXXXX" + '"'
-
- raise AuthenticationError(
- message=f"PredibaseException: Authentication Error - {error_str}",
- llm_provider="predibase",
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif custom_llm_provider == "bedrock":
- if (
- "too many tokens" in error_str
- or "expected maxLength:" in error_str
- or "Input is too long" in error_str
- or "prompt: length: 1.." in error_str
- or "Too many input tokens" in error_str
- ):
- exception_mapping_worked = True
- raise ContextWindowExceededError(
- message=f"BedrockException: Context Window Error - {error_str}",
- model=model,
- llm_provider="bedrock",
- response=original_exception.response,
- )
- elif "Malformed input request" in error_str:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"BedrockException - {error_str}",
- model=model,
- llm_provider="bedrock",
- response=original_exception.response,
- )
- elif (
- "Unable to locate credentials" in error_str
- or "The security token included in the request is invalid"
- in error_str
- ):
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"BedrockException Invalid Authentication - {error_str}",
- model=model,
- llm_provider="bedrock",
- response=original_exception.response,
- )
- elif "AccessDeniedException" in error_str:
- exception_mapping_worked = True
- raise PermissionDeniedError(
- message=f"BedrockException PermissionDeniedError - {error_str}",
- model=model,
- llm_provider="bedrock",
- response=original_exception.response,
- )
- elif (
- "throttlingException" in error_str
- or "ThrottlingException" in error_str
- ):
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"BedrockException: Rate Limit Error - {error_str}",
- model=model,
- llm_provider="bedrock",
- response=original_exception.response,
- )
- elif (
- "Connect timeout on endpoint URL" in error_str
- or "timed out" in error_str
- ):
- exception_mapping_worked = True
- raise Timeout(
- message=f"BedrockException: Timeout Error - {error_str}",
- model=model,
- llm_provider="bedrock",
- )
- elif hasattr(original_exception, "status_code"):
- if original_exception.status_code == 500:
- exception_mapping_worked = True
- raise ServiceUnavailableError(
- message=f"BedrockException - {original_exception.message}",
- llm_provider="bedrock",
- model=model,
- response=httpx.Response(
- status_code=500,
- request=httpx.Request(
- method="POST", url="https://api.openai.com/v1/"
- ),
- ),
- )
- elif original_exception.status_code == 401:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"BedrockException - {original_exception.message}",
- llm_provider="bedrock",
- model=model,
- response=original_exception.response,
- )
- elif original_exception.status_code == 400:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"BedrockException - {original_exception.message}",
- llm_provider="bedrock",
- model=model,
- response=original_exception.response,
- )
- elif original_exception.status_code == 404:
- exception_mapping_worked = True
- raise NotFoundError(
- message=f"BedrockException - {original_exception.message}",
- llm_provider="bedrock",
- model=model,
- response=original_exception.response,
- )
- elif original_exception.status_code == 408:
- exception_mapping_worked = True
- raise Timeout(
- message=f"BedrockException - {original_exception.message}",
- model=model,
- llm_provider=custom_llm_provider,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 422:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"BedrockException - {original_exception.message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 429:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"BedrockException - {original_exception.message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 503:
- exception_mapping_worked = True
- raise ServiceUnavailableError(
- message=f"BedrockException - {original_exception.message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 504: # gateway timeout error
- exception_mapping_worked = True
- raise Timeout(
- message=f"BedrockException - {original_exception.message}",
- model=model,
- llm_provider=custom_llm_provider,
- litellm_debug_info=extra_information,
- )
- elif custom_llm_provider == "sagemaker":
- if "Unable to locate credentials" in error_str:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"SagemakerException - {error_str}",
- model=model,
- llm_provider="sagemaker",
- response=original_exception.response,
- )
- elif (
- "Input validation error: `best_of` must be > 0 and <= 2"
- in error_str
- ):
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"SagemakerException - the value of 'n' must be > 0 and <= 2 for sagemaker endpoints",
- model=model,
- llm_provider="sagemaker",
- response=original_exception.response,
- )
- elif (
- "`inputs` tokens + `max_new_tokens` must be <=" in error_str
- or "instance type with more CPU capacity or memory" in error_str
- ):
- exception_mapping_worked = True
- raise ContextWindowExceededError(
- message=f"SagemakerException - {error_str}",
- model=model,
- llm_provider="sagemaker",
- response=original_exception.response,
- )
- elif custom_llm_provider == "vertex_ai":
- if (
- "Vertex AI API has not been used in project" in error_str
- or "Unable to find your project" in error_str
- ):
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"VertexAIException - {error_str}",
- model=model,
- llm_provider="vertex_ai",
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif (
- "None Unknown Error." in error_str
- or "Content has no parts." in error_str
- ):
- exception_mapping_worked = True
- raise APIError(
- message=f"VertexAIException - {error_str}",
- status_code=500,
- model=model,
- llm_provider="vertex_ai",
- request=original_exception.request,
- litellm_debug_info=extra_information,
- )
- elif "403" in error_str:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"VertexAIException - {error_str}",
- model=model,
- llm_provider="vertex_ai",
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif "The response was blocked." in error_str:
- exception_mapping_worked = True
- raise UnprocessableEntityError(
- message=f"VertexAIException - {error_str}",
- model=model,
- llm_provider="vertex_ai",
- litellm_debug_info=extra_information,
- response=httpx.Response(
- status_code=429,
- request=httpx.Request(
- method="POST",
- url=" https://cloud.google.com/vertex-ai/",
- ),
- ),
- )
- elif (
- "429 Quota exceeded" in error_str
- or "IndexError: list index out of range" in error_str
- or "429 Unable to submit request because the service is temporarily out of capacity."
- in error_str
- ):
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"VertexAIException - {error_str}",
- model=model,
- llm_provider="vertex_ai",
- litellm_debug_info=extra_information,
- response=httpx.Response(
- status_code=429,
- request=httpx.Request(
- method="POST",
- url=" https://cloud.google.com/vertex-ai/",
- ),
- ),
- )
- if hasattr(original_exception, "status_code"):
- if original_exception.status_code == 400:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"VertexAIException - {error_str}",
- model=model,
- llm_provider="vertex_ai",
- litellm_debug_info=extra_information,
- response=original_exception.response,
- )
- if original_exception.status_code == 500:
- exception_mapping_worked = True
-> raise APIError(
- message=f"VertexAIException - {error_str}",
- status_code=500,
- model=model,
- llm_provider="vertex_ai",
- litellm_debug_info=extra_information,
- request=original_exception.request,
-E litellm.exceptions.APIError: VertexAIException - Parameter to MergeFrom() must be instance of same class: expected got .
-
-../utils.py:8922: APIError
-
-During handling of the above exception, another exception occurred:
-
- def test_gemini_pro_vision():
- try:
- load_vertex_ai_credentials()
- litellm.set_verbose = True
- litellm.num_retries = 3
-> resp = litellm.completion(
- model="vertex_ai/gemini-1.5-flash-preview-0514",
- messages=[
- {
- "role": "user",
- "content": [
- {"type": "text", "text": "Whats in this image?"},
- {
- "type": "image_url",
- "image_url": {
- "url": "gs://cloud-samples-data/generative-ai/image/boats.jpeg"
- },
- },
- ],
- }
- ],
- )
-
-test_amazing_vertex_completion.py:510:
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-../utils.py:3289: in wrapper
- return litellm.completion_with_retries(*args, **kwargs)
-../main.py:2401: in completion_with_retries
- return retryer(original_function, *args, **kwargs)
-../proxy/myenv/lib/python3.11/site-packages/tenacity/__init__.py:379: in __call__
- do = self.iter(retry_state=retry_state)
-../proxy/myenv/lib/python3.11/site-packages/tenacity/__init__.py:325: in iter
- raise retry_exc.reraise()
-../proxy/myenv/lib/python3.11/site-packages/tenacity/__init__.py:158: in reraise
- raise self.last_attempt.result()
-/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py:449: in result
- return self.__get_result()
-/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py:401: in __get_result
- raise self._exception
-../proxy/myenv/lib/python3.11/site-packages/tenacity/__init__.py:382: in __call__
- result = fn(*args, **kwargs)
-../utils.py:3317: in wrapper
- raise e
-../utils.py:3211: in wrapper
- result = original_function(*args, **kwargs)
-../main.py:2368: in completion
- raise exception_type(
-../utils.py:9709: in exception_type
- raise e
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-
-model = 'gemini-1.5-flash-preview-0514'
-original_exception = VertexAIError("Parameter to MergeFrom() must be instance of same class: expected got .")
-custom_llm_provider = 'vertex_ai'
-completion_kwargs = {'acompletion': False, 'api_base': None, 'api_key': None, 'api_version': None, ...}
-extra_kwargs = {'litellm_call_id': '7f48b7ab-47b3-4beb-b2b5-fa298be49d3f', 'litellm_logging_obj': }
-
- def exception_type(
- model,
- original_exception,
- custom_llm_provider,
- completion_kwargs={},
- extra_kwargs={},
- ):
- global user_logger_fn, liteDebuggerClient
- exception_mapping_worked = False
- if litellm.suppress_debug_info is False:
- print() # noqa
- print( # noqa
- "\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m" # noqa
- ) # noqa
- print( # noqa
- "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'." # noqa
- ) # noqa
- print() # noqa
- try:
- if model:
- error_str = str(original_exception)
- if isinstance(original_exception, BaseException):
- exception_type = type(original_exception).__name__
- else:
- exception_type = ""
-
- ################################################################################
- # Common Extra information needed for all providers
- # We pass num retries, api_base, vertex_deployment etc to the exception here
- ################################################################################
- extra_information = ""
- try:
- _api_base = litellm.get_api_base(
- model=model, optional_params=extra_kwargs
- )
- messages = litellm.get_first_chars_messages(kwargs=completion_kwargs)
- _vertex_project = extra_kwargs.get("vertex_project")
- _vertex_location = extra_kwargs.get("vertex_location")
- _metadata = extra_kwargs.get("metadata", {}) or {}
- _model_group = _metadata.get("model_group")
- _deployment = _metadata.get("deployment")
- extra_information = f"\nModel: {model}"
- if _api_base:
- extra_information += f"\nAPI Base: {_api_base}"
- if messages and len(messages) > 0:
- extra_information += f"\nMessages: {messages}"
-
- if _model_group is not None:
- extra_information += f"\nmodel_group: {_model_group}\n"
- if _deployment is not None:
- extra_information += f"\ndeployment: {_deployment}\n"
- if _vertex_project is not None:
- extra_information += f"\nvertex_project: {_vertex_project}\n"
- if _vertex_location is not None:
- extra_information += f"\nvertex_location: {_vertex_location}\n"
-
- # on litellm proxy add key name + team to exceptions
- extra_information = _add_key_name_and_team_to_alert(
- request_info=extra_information, metadata=_metadata
- )
- except:
- # DO NOT LET this Block raising the original exception
- pass
-
- ################################################################################
- # End of Common Extra information Needed for all providers
- ################################################################################
-
- ################################################################################
- #################### Start of Provider Exception mapping ####################
- ################################################################################
-
- if "Request Timeout Error" in error_str or "Request timed out" in error_str:
- exception_mapping_worked = True
- raise Timeout(
- message=f"APITimeoutError - Request timed out. \nerror_str: {error_str}",
- model=model,
- llm_provider=custom_llm_provider,
- litellm_debug_info=extra_information,
- )
-
- if (
- custom_llm_provider == "openai"
- or custom_llm_provider == "text-completion-openai"
- or custom_llm_provider == "custom_openai"
- or custom_llm_provider in litellm.openai_compatible_providers
- ):
- # custom_llm_provider is openai, make it OpenAI
- if hasattr(original_exception, "message"):
- message = original_exception.message
- else:
- message = str(original_exception)
- if message is not None and isinstance(message, str):
- message = message.replace("OPENAI", custom_llm_provider.upper())
- message = message.replace("openai", custom_llm_provider)
- message = message.replace("OpenAI", custom_llm_provider)
- if custom_llm_provider == "openai":
- exception_provider = "OpenAI" + "Exception"
- else:
- exception_provider = (
- custom_llm_provider[0].upper()
- + custom_llm_provider[1:]
- + "Exception"
- )
-
- if "This model's maximum context length is" in error_str:
- exception_mapping_worked = True
- raise ContextWindowExceededError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif (
- "invalid_request_error" in error_str
- and "model_not_found" in error_str
- ):
- exception_mapping_worked = True
- raise NotFoundError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif (
- "invalid_request_error" in error_str
- and "content_policy_violation" in error_str
- ):
- exception_mapping_worked = True
- raise ContentPolicyViolationError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif (
- "invalid_request_error" in error_str
- and "Incorrect API key provided" not in error_str
- ):
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif "Request too large" in error_str:
- raise RateLimitError(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif (
- "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable"
- in error_str
- ):
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif "Mistral API raised a streaming error" in error_str:
- exception_mapping_worked = True
- _request = httpx.Request(
- method="POST", url="https://api.openai.com/v1"
- )
- raise APIError(
- status_code=500,
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- request=_request,
- litellm_debug_info=extra_information,
- )
- elif hasattr(original_exception, "status_code"):
- exception_mapping_worked = True
- if original_exception.status_code == 401:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 404:
- exception_mapping_worked = True
- raise NotFoundError(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 408:
- exception_mapping_worked = True
- raise Timeout(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 422:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 429:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 503:
- exception_mapping_worked = True
- raise ServiceUnavailableError(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 504: # gateway timeout error
- exception_mapping_worked = True
- raise Timeout(
- message=f"{exception_provider} - {message}",
- model=model,
- llm_provider=custom_llm_provider,
- litellm_debug_info=extra_information,
- )
- else:
- exception_mapping_worked = True
- raise APIError(
- status_code=original_exception.status_code,
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- request=original_exception.request,
- litellm_debug_info=extra_information,
- )
- else:
- # if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors
- raise APIConnectionError(
- message=f"{exception_provider} - {message}",
- llm_provider=custom_llm_provider,
- model=model,
- litellm_debug_info=extra_information,
- request=httpx.Request(
- method="POST", url="https://api.openai.com/v1/"
- ),
- )
- elif custom_llm_provider == "anthropic": # one of the anthropics
- if hasattr(original_exception, "message"):
- if (
- "prompt is too long" in original_exception.message
- or "prompt: length" in original_exception.message
- ):
- exception_mapping_worked = True
- raise ContextWindowExceededError(
- message=original_exception.message,
- model=model,
- llm_provider="anthropic",
- response=original_exception.response,
- )
- if "Invalid API Key" in original_exception.message:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=original_exception.message,
- model=model,
- llm_provider="anthropic",
- response=original_exception.response,
- )
- if hasattr(original_exception, "status_code"):
- print_verbose(f"status_code: {original_exception.status_code}")
- if original_exception.status_code == 401:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"AnthropicException - {original_exception.message}",
- llm_provider="anthropic",
- model=model,
- response=original_exception.response,
- )
- elif (
- original_exception.status_code == 400
- or original_exception.status_code == 413
- ):
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"AnthropicException - {original_exception.message}",
- model=model,
- llm_provider="anthropic",
- response=original_exception.response,
- )
- elif original_exception.status_code == 408:
- exception_mapping_worked = True
- raise Timeout(
- message=f"AnthropicException - {original_exception.message}",
- model=model,
- llm_provider="anthropic",
- )
- elif original_exception.status_code == 429:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"AnthropicException - {original_exception.message}",
- llm_provider="anthropic",
- model=model,
- response=original_exception.response,
- )
- elif original_exception.status_code == 500:
- exception_mapping_worked = True
- raise APIError(
- status_code=500,
- message=f"AnthropicException - {original_exception.message}. Handle with `litellm.APIError`.",
- llm_provider="anthropic",
- model=model,
- request=original_exception.request,
- )
- elif custom_llm_provider == "replicate":
- if "Incorrect authentication token" in error_str:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"ReplicateException - {error_str}",
- llm_provider="replicate",
- model=model,
- response=original_exception.response,
- )
- elif "input is too long" in error_str:
- exception_mapping_worked = True
- raise ContextWindowExceededError(
- message=f"ReplicateException - {error_str}",
- model=model,
- llm_provider="replicate",
- response=original_exception.response,
- )
- elif exception_type == "ModelError":
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"ReplicateException - {error_str}",
- model=model,
- llm_provider="replicate",
- response=original_exception.response,
- )
- elif "Request was throttled" in error_str:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"ReplicateException - {error_str}",
- llm_provider="replicate",
- model=model,
- response=original_exception.response,
- )
- elif hasattr(original_exception, "status_code"):
- if original_exception.status_code == 401:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"ReplicateException - {original_exception.message}",
- llm_provider="replicate",
- model=model,
- response=original_exception.response,
- )
- elif (
- original_exception.status_code == 400
- or original_exception.status_code == 422
- or original_exception.status_code == 413
- ):
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"ReplicateException - {original_exception.message}",
- model=model,
- llm_provider="replicate",
- response=original_exception.response,
- )
- elif original_exception.status_code == 408:
- exception_mapping_worked = True
- raise Timeout(
- message=f"ReplicateException - {original_exception.message}",
- model=model,
- llm_provider="replicate",
- )
- elif original_exception.status_code == 429:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"ReplicateException - {original_exception.message}",
- llm_provider="replicate",
- model=model,
- response=original_exception.response,
- )
- elif original_exception.status_code == 500:
- exception_mapping_worked = True
- raise ServiceUnavailableError(
- message=f"ReplicateException - {original_exception.message}",
- llm_provider="replicate",
- model=model,
- response=original_exception.response,
- )
- exception_mapping_worked = True
- raise APIError(
- status_code=500,
- message=f"ReplicateException - {str(original_exception)}",
- llm_provider="replicate",
- model=model,
- request=httpx.Request(
- method="POST",
- url="https://api.replicate.com/v1/deployments",
- ),
- )
- elif custom_llm_provider == "watsonx":
- if "token_quota_reached" in error_str:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"WatsonxException: Rate Limit Errror - {error_str}",
- llm_provider="watsonx",
- model=model,
- response=original_exception.response,
- )
- elif custom_llm_provider == "predibase":
- if "authorization denied for" in error_str:
- exception_mapping_worked = True
-
- # Predibase returns the raw API Key in the response - this block ensures it's not returned in the exception
- if (
- error_str is not None
- and isinstance(error_str, str)
- and "bearer" in error_str.lower()
- ):
- # only keep the first 10 chars after the occurnence of "bearer"
- _bearer_token_start_index = error_str.lower().find("bearer")
- error_str = error_str[: _bearer_token_start_index + 14]
- error_str += "XXXXXXX" + '"'
-
- raise AuthenticationError(
- message=f"PredibaseException: Authentication Error - {error_str}",
- llm_provider="predibase",
- model=model,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif custom_llm_provider == "bedrock":
- if (
- "too many tokens" in error_str
- or "expected maxLength:" in error_str
- or "Input is too long" in error_str
- or "prompt: length: 1.." in error_str
- or "Too many input tokens" in error_str
- ):
- exception_mapping_worked = True
- raise ContextWindowExceededError(
- message=f"BedrockException: Context Window Error - {error_str}",
- model=model,
- llm_provider="bedrock",
- response=original_exception.response,
- )
- elif "Malformed input request" in error_str:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"BedrockException - {error_str}",
- model=model,
- llm_provider="bedrock",
- response=original_exception.response,
- )
- elif (
- "Unable to locate credentials" in error_str
- or "The security token included in the request is invalid"
- in error_str
- ):
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"BedrockException Invalid Authentication - {error_str}",
- model=model,
- llm_provider="bedrock",
- response=original_exception.response,
- )
- elif "AccessDeniedException" in error_str:
- exception_mapping_worked = True
- raise PermissionDeniedError(
- message=f"BedrockException PermissionDeniedError - {error_str}",
- model=model,
- llm_provider="bedrock",
- response=original_exception.response,
- )
- elif (
- "throttlingException" in error_str
- or "ThrottlingException" in error_str
- ):
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"BedrockException: Rate Limit Error - {error_str}",
- model=model,
- llm_provider="bedrock",
- response=original_exception.response,
- )
- elif (
- "Connect timeout on endpoint URL" in error_str
- or "timed out" in error_str
- ):
- exception_mapping_worked = True
- raise Timeout(
- message=f"BedrockException: Timeout Error - {error_str}",
- model=model,
- llm_provider="bedrock",
- )
- elif hasattr(original_exception, "status_code"):
- if original_exception.status_code == 500:
- exception_mapping_worked = True
- raise ServiceUnavailableError(
- message=f"BedrockException - {original_exception.message}",
- llm_provider="bedrock",
- model=model,
- response=httpx.Response(
- status_code=500,
- request=httpx.Request(
- method="POST", url="https://api.openai.com/v1/"
- ),
- ),
- )
- elif original_exception.status_code == 401:
- exception_mapping_worked = True
- raise AuthenticationError(
- message=f"BedrockException - {original_exception.message}",
- llm_provider="bedrock",
- model=model,
- response=original_exception.response,
- )
- elif original_exception.status_code == 400:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"BedrockException - {original_exception.message}",
- llm_provider="bedrock",
- model=model,
- response=original_exception.response,
- )
- elif original_exception.status_code == 404:
- exception_mapping_worked = True
- raise NotFoundError(
- message=f"BedrockException - {original_exception.message}",
- llm_provider="bedrock",
- model=model,
- response=original_exception.response,
- )
- elif original_exception.status_code == 408:
- exception_mapping_worked = True
- raise Timeout(
- message=f"BedrockException - {original_exception.message}",
- model=model,
- llm_provider=custom_llm_provider,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 422:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"BedrockException - {original_exception.message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 429:
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"BedrockException - {original_exception.message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 503:
- exception_mapping_worked = True
- raise ServiceUnavailableError(
- message=f"BedrockException - {original_exception.message}",
- model=model,
- llm_provider=custom_llm_provider,
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif original_exception.status_code == 504: # gateway timeout error
- exception_mapping_worked = True
- raise Timeout(
- message=f"BedrockException - {original_exception.message}",
- model=model,
- llm_provider=custom_llm_provider,
- litellm_debug_info=extra_information,
- )
- elif custom_llm_provider == "sagemaker":
- if "Unable to locate credentials" in error_str:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"SagemakerException - {error_str}",
- model=model,
- llm_provider="sagemaker",
- response=original_exception.response,
- )
- elif (
- "Input validation error: `best_of` must be > 0 and <= 2"
- in error_str
- ):
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"SagemakerException - the value of 'n' must be > 0 and <= 2 for sagemaker endpoints",
- model=model,
- llm_provider="sagemaker",
- response=original_exception.response,
- )
- elif (
- "`inputs` tokens + `max_new_tokens` must be <=" in error_str
- or "instance type with more CPU capacity or memory" in error_str
- ):
- exception_mapping_worked = True
- raise ContextWindowExceededError(
- message=f"SagemakerException - {error_str}",
- model=model,
- llm_provider="sagemaker",
- response=original_exception.response,
- )
- elif custom_llm_provider == "vertex_ai":
- if (
- "Vertex AI API has not been used in project" in error_str
- or "Unable to find your project" in error_str
- ):
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"VertexAIException - {error_str}",
- model=model,
- llm_provider="vertex_ai",
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif (
- "None Unknown Error." in error_str
- or "Content has no parts." in error_str
- ):
- exception_mapping_worked = True
- raise APIError(
- message=f"VertexAIException - {error_str}",
- status_code=500,
- model=model,
- llm_provider="vertex_ai",
- request=original_exception.request,
- litellm_debug_info=extra_information,
- )
- elif "403" in error_str:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"VertexAIException - {error_str}",
- model=model,
- llm_provider="vertex_ai",
- response=original_exception.response,
- litellm_debug_info=extra_information,
- )
- elif "The response was blocked." in error_str:
- exception_mapping_worked = True
- raise UnprocessableEntityError(
- message=f"VertexAIException - {error_str}",
- model=model,
- llm_provider="vertex_ai",
- litellm_debug_info=extra_information,
- response=httpx.Response(
- status_code=429,
- request=httpx.Request(
- method="POST",
- url=" https://cloud.google.com/vertex-ai/",
- ),
- ),
- )
- elif (
- "429 Quota exceeded" in error_str
- or "IndexError: list index out of range" in error_str
- or "429 Unable to submit request because the service is temporarily out of capacity."
- in error_str
- ):
- exception_mapping_worked = True
- raise RateLimitError(
- message=f"VertexAIException - {error_str}",
- model=model,
- llm_provider="vertex_ai",
- litellm_debug_info=extra_information,
- response=httpx.Response(
- status_code=429,
- request=httpx.Request(
- method="POST",
- url=" https://cloud.google.com/vertex-ai/",
- ),
- ),
- )
- if hasattr(original_exception, "status_code"):
- if original_exception.status_code == 400:
- exception_mapping_worked = True
- raise BadRequestError(
- message=f"VertexAIException - {error_str}",
- model=model,
- llm_provider="vertex_ai",
- litellm_debug_info=extra_information,
- response=original_exception.response,
- )
- if original_exception.status_code == 500:
- exception_mapping_worked = True
-> raise APIError(
- message=f"VertexAIException - {error_str}",
- status_code=500,
- model=model,
- llm_provider="vertex_ai",
- litellm_debug_info=extra_information,
- request=original_exception.request,
-E litellm.exceptions.APIError: VertexAIException - Parameter to MergeFrom() must be instance of same class: expected got .
-
-../utils.py:8922: APIError
-
-During handling of the above exception, another exception occurred:
-
- def test_gemini_pro_vision():
- try:
- load_vertex_ai_credentials()
- litellm.set_verbose = True
- litellm.num_retries = 3
- resp = litellm.completion(
- model="vertex_ai/gemini-1.5-flash-preview-0514",
- messages=[
- {
- "role": "user",
- "content": [
- {"type": "text", "text": "Whats in this image?"},
- {
- "type": "image_url",
- "image_url": {
- "url": "gs://cloud-samples-data/generative-ai/image/boats.jpeg"
- },
- },
- ],
- }
- ],
- )
- print(resp)
-
- prompt_tokens = resp.usage.prompt_tokens
-
- # DO Not DELETE this ASSERT
- # Google counts the prompt tokens for us, we should ensure we use the tokens from the orignal response
- assert prompt_tokens == 263 # the gemini api returns 263 to us
- except litellm.RateLimitError as e:
- pass
- except Exception as e:
- if "500 Internal error encountered.'" in str(e):
- pass
- else:
-> pytest.fail(f"An exception occurred - {str(e)}")
-E Failed: An exception occurred - VertexAIException - Parameter to MergeFrom() must be instance of same class: expected got .
-
-test_amazing_vertex_completion.py:540: Failed
----------------------------- Captured stdout setup -----------------------------
-
------------------------------ Captured stdout call -----------------------------
-loading vertex ai credentials
-Read vertexai file path
-
-
-[92mRequest to litellm:[0m
-[92mlitellm.completion(model='vertex_ai/gemini-1.5-flash-preview-0514', messages=[{'role': 'user', 'content': [{'type': 'text', 'text': 'Whats in this image?'}, {'type': 'image_url', 'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}}]}])[0m
-
-
-self.optional_params: {}
-SYNC kwargs[caching]: False; litellm.cache: None; kwargs.get('cache')['no-cache']: False
-(start) INSIDE THE VERTEX AI OPTIONAL PARAM BLOCK
-(end) INSIDE THE VERTEX AI OPTIONAL PARAM BLOCK - optional_params: {}
-Final returned optional params: {}
-self.optional_params: {}
-VERTEX AI: vertex_project=None; vertex_location=None
-VERTEX AI: creds=; google application credentials: /var/folders/gf/5h3fnlwx40sdrycs4y5qzqx40000gn/T/tmpolsest5s
-
-Making VertexAI Gemini Pro / Pro Vision Call
-
-Processing input messages = [{'role': 'user', 'content': [{'type': 'text', 'text': 'Whats in this image?'}, {'type': 'image_url', 'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}}]}]
-[92m
-Request Sent from LiteLLM:
-llm_model = GenerativeModel(gemini-1.5-flash-preview-0514)
-response = llm_model.generate_content([{'role': 'user', 'parts': [{'text': 'Whats in this image?'}, file_data {
- mime_type: "image/jpeg"
- file_uri: "gs://cloud-samples-data/generative-ai/image/boats.jpeg"
-}
-]}])
-[0m
-
-
-[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new[0m
-LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'.
-
-Logging Details: logger_fn - None | callable(logger_fn) - False
-
-
-[92mRequest to litellm:[0m
-[92mlitellm.completion(model='vertex_ai/gemini-1.5-flash-preview-0514', messages=[{'role': 'user', 'content': [{'type': 'text', 'text': 'Whats in this image?'}, {'type': 'image_url', 'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}}]}], litellm_call_id='7f48b7ab-47b3-4beb-b2b5-fa298be49d3f', litellm_logging_obj=)[0m
-
-
-SYNC kwargs[caching]: False; litellm.cache: None; kwargs.get('cache')['no-cache']: False
-(start) INSIDE THE VERTEX AI OPTIONAL PARAM BLOCK
-(end) INSIDE THE VERTEX AI OPTIONAL PARAM BLOCK - optional_params: {}
-Final returned optional params: {}
-self.optional_params: {}
-VERTEX AI: vertex_project=None; vertex_location=None
-VERTEX AI: creds=; google application credentials: /var/folders/gf/5h3fnlwx40sdrycs4y5qzqx40000gn/T/tmpolsest5s
-
-Making VertexAI Gemini Pro / Pro Vision Call
-
-Processing input messages = [{'role': 'user', 'content': [{'type': 'text', 'text': 'Whats in this image?'}, {'type': 'image_url', 'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}}]}]
-[92m
-Request Sent from LiteLLM:
-llm_model = GenerativeModel(gemini-1.5-flash-preview-0514)
-response = llm_model.generate_content([{'role': 'user', 'parts': [{'text': 'Whats in this image?'}, file_data {
- mime_type: "image/jpeg"
- file_uri: "gs://cloud-samples-data/generative-ai/image/boats.jpeg"
-}
-]}])
-[0m
-
-
-[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new[0m
-LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'.
-
-Logging Details: logger_fn - None | callable(logger_fn) - False
-Logging Details LiteLLM-Failure Call
-self.failure_callback: []
-
-
-[92mRequest to litellm:[0m
-[92mlitellm.completion(model='vertex_ai/gemini-1.5-flash-preview-0514', messages=[{'role': 'user', 'content': [{'type': 'text', 'text': 'Whats in this image?'}, {'type': 'image_url', 'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}}]}], litellm_call_id='7f48b7ab-47b3-4beb-b2b5-fa298be49d3f', litellm_logging_obj=)[0m
-
-
-SYNC kwargs[caching]: False; litellm.cache: None; kwargs.get('cache')['no-cache']: False
-(start) INSIDE THE VERTEX AI OPTIONAL PARAM BLOCK
-(end) INSIDE THE VERTEX AI OPTIONAL PARAM BLOCK - optional_params: {}
-Final returned optional params: {}
-self.optional_params: {}
-VERTEX AI: vertex_project=None; vertex_location=None
-VERTEX AI: creds=; google application credentials: /var/folders/gf/5h3fnlwx40sdrycs4y5qzqx40000gn/T/tmpolsest5s
-
-Making VertexAI Gemini Pro / Pro Vision Call
-
-Processing input messages = [{'role': 'user', 'content': [{'type': 'text', 'text': 'Whats in this image?'}, {'type': 'image_url', 'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}}]}]
-[92m
-Request Sent from LiteLLM:
-llm_model = GenerativeModel(gemini-1.5-flash-preview-0514)
-response = llm_model.generate_content([{'role': 'user', 'parts': [{'text': 'Whats in this image?'}, file_data {
- mime_type: "image/jpeg"
- file_uri: "gs://cloud-samples-data/generative-ai/image/boats.jpeg"
-}
-]}])
-[0m
-
-
-[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new[0m
-LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'.
-
-Logging Details: logger_fn - None | callable(logger_fn) - False
-Logging Details LiteLLM-Failure Call
-self.failure_callback: []
-
-
-[92mRequest to litellm:[0m
-[92mlitellm.completion(model='vertex_ai/gemini-1.5-flash-preview-0514', messages=[{'role': 'user', 'content': [{'type': 'text', 'text': 'Whats in this image?'}, {'type': 'image_url', 'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}}]}], litellm_call_id='7f48b7ab-47b3-4beb-b2b5-fa298be49d3f', litellm_logging_obj=)[0m
-
-
-SYNC kwargs[caching]: False; litellm.cache: None; kwargs.get('cache')['no-cache']: False
-(start) INSIDE THE VERTEX AI OPTIONAL PARAM BLOCK
-(end) INSIDE THE VERTEX AI OPTIONAL PARAM BLOCK - optional_params: {}
-Final returned optional params: {}
-self.optional_params: {}
-VERTEX AI: vertex_project=None; vertex_location=None
-VERTEX AI: creds=; google application credentials: /var/folders/gf/5h3fnlwx40sdrycs4y5qzqx40000gn/T/tmpolsest5s
-
-Making VertexAI Gemini Pro / Pro Vision Call
-
-Processing input messages = [{'role': 'user', 'content': [{'type': 'text', 'text': 'Whats in this image?'}, {'type': 'image_url', 'image_url': {'url': 'gs://cloud-samples-data/generative-ai/image/boats.jpeg'}}]}]
-[92m
-Request Sent from LiteLLM:
-llm_model = GenerativeModel(gemini-1.5-flash-preview-0514)
-response = llm_model.generate_content([{'role': 'user', 'parts': [{'text': 'Whats in this image?'}, file_data {
- mime_type: "image/jpeg"
- file_uri: "gs://cloud-samples-data/generative-ai/image/boats.jpeg"
-}
-]}])
-[0m
-
-
-[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new[0m
-LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'.
-
-Logging Details: logger_fn - None | callable(logger_fn) - False
-Logging Details LiteLLM-Failure Call
-self.failure_callback: []
-=============================== warnings summary ===============================
-../proxy/myenv/lib/python3.11/site-packages/pydantic/_internal/_config.py:284: 25 warnings
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/myenv/lib/python3.11/site-packages/pydantic/_internal/_config.py:284: PydanticDeprecatedSince20: Support for class-based `config` is deprecated, use ConfigDict instead. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- warnings.warn(DEPRECATION_MESSAGE, DeprecationWarning)
-
-../proxy/_types.py:255
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:255: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:342
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:342: PydanticDeprecatedSince20: `pydantic.config.Extra` is deprecated, use literal values instead (e.g. `extra='allow'`). Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- extra = Extra.allow # Allow extra fields
-
-../proxy/_types.py:345
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:345: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:374
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:374: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:421
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:421: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:490
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:490: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:510
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:510: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:523
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:523: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:568
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:568: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:605
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:605: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:923
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:923: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:950
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:950: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../proxy/_types.py:971
- /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:971: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/
- @root_validator(pre=True)
-
-../utils.py:60
- /Users/krrishdholakia/Documents/litellm/litellm/utils.py:60: DeprecationWarning: open_text is deprecated. Use files() instead. Refer to https://importlib-resources.readthedocs.io/en/latest/using.html#migrating-from-legacy for migration advice.
- with resources.open_text("litellm.llms.tokenizers", "anthropic_tokenizer.json") as f:
-
--- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
-=========================== short test summary info ============================
-FAILED test_amazing_vertex_completion.py::test_gemini_pro_vision - Failed: An...
-======================== 1 failed, 39 warnings in 2.09s ========================
diff --git a/litellm/tests/test_assistants.py b/litellm/tests/test_assistants.py
index 5f565f67cc..d5f047a092 100644
--- a/litellm/tests/test_assistants.py
+++ b/litellm/tests/test_assistants.py
@@ -198,7 +198,11 @@ async def test_aarun_thread_litellm(sync_mode, provider, is_streaming):
)
assert isinstance(messages.data[0], Message)
else:
- pytest.fail("An unexpected error occurred when running the thread")
+ pytest.fail(
+ "An unexpected error occurred when running the thread, {}".format(
+ run
+ )
+ )
else:
added_message = await litellm.a_add_message(**data)
@@ -226,4 +230,8 @@ async def test_aarun_thread_litellm(sync_mode, provider, is_streaming):
)
assert isinstance(messages.data[0], Message)
else:
- pytest.fail("An unexpected error occurred when running the thread")
+ pytest.fail(
+ "An unexpected error occurred when running the thread, {}".format(
+ run
+ )
+ )
diff --git a/litellm/tests/test_bedrock_completion.py b/litellm/tests/test_bedrock_completion.py
index 047f0cb2e2..64e7741e2a 100644
--- a/litellm/tests/test_bedrock_completion.py
+++ b/litellm/tests/test_bedrock_completion.py
@@ -243,6 +243,7 @@ def test_completion_bedrock_claude_sts_oidc_auth():
except Exception as e:
pytest.fail(f"Error occurred: {e}")
+
@pytest.mark.skipif(
os.environ.get("CIRCLE_OIDC_TOKEN_V2") is None,
reason="Cannot run without being in CircleCI Runner",
@@ -277,7 +278,15 @@ def test_completion_bedrock_httpx_command_r_sts_oidc_auth():
except Exception as e:
pytest.fail(f"Error occurred: {e}")
-def test_bedrock_claude_3():
+
+@pytest.mark.parametrize(
+ "image_url",
+ [
+ "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAL0AAAC9CAMAAADRCYwCAAAAh1BMVEX///8AAAD8/Pz5+fkEBAT39/cJCQn09PRNTU3y8vIMDAwzMzPe3t7v7+8QEBCOjo7FxcXR0dHn5+elpaWGhoYYGBivr686OjocHBy0tLQtLS1TU1PY2Ni6urpaWlpERER3d3ecnJxoaGiUlJRiYmIlJSU4ODhBQUFycnKAgIDBwcFnZ2chISE7EjuwAAAI/UlEQVR4nO1caXfiOgz1bhJIyAJhX1JoSzv8/9/3LNlpYd4rhX6o4/N8Z2lKM2cURZau5JsQEhERERERERERERERERERERHx/wBjhDPC3OGN8+Cc5JeMuheaETSdO8vZFyCScHtmz2CsktoeMn7rLM1u3h0PMAEhyYX7v/Q9wQvoGdB0hlbzm45lEq/wd6y6G9aezvBk9AXwp1r3LHJIRsh6s2maxaJpmvqgvkC7WFS3loUnaFJtKRVUCEoV/RpCnHRvAsesVQ1hw+vd7Mpo+424tLs72NplkvQgcdrsvXkW/zJWqH/fA0FT84M/xnQJt4to3+ZLuanbM6X5lfXKHosO9COgREqpCR5i86pf2zPS7j9tTj+9nO7bQz3+xGEyGW9zqgQ1tyQ/VsxEDvce/4dcUPNb5OD9yXvR4Z2QisuP0xiGWPnemgugU5q/troHhGEjIF5sTOyW648aC0TssuaaCEsYEIkGzjWXOp3A0vVsf6kgRyqaDk+T7DIVWrb58b2tT5xpUucKwodOD/5LbrZC1ws6YSaBZJ/8xlh+XZSYXaMJ2ezNqjB3IPXuehPcx2U6b4t1dS/xNdFzguUt8ie7arnPeyCZroxLHzGgGdqVcspwafizPWEXBee+9G1OaufGdvNng/9C+gwgZ3PH3r87G6zXTZ5D5De2G2DeFoANXfbACkT+fxBQ22YFsTTJF9hjFVO6VbqxZXko4WJ8s52P4PnuxO5KRzu0/hlix1ySt8iXjgaQ+4IHPA9nVzNkdduM9LFT/Aacj4FtKrHA7iAw602Vnht6R8Vq1IOS+wNMKLYqayAYfRuufQPGeGb7sZogQQoLZrGPgZ6KoYn70Iw30O92BNEDpvwouCFn6wH2uS+EhRb3WF/HObZk3HuxfRQM3Y/Of/VH0n4MKNHZDiZvO9+m/ABALfkOcuar/7nOo7B95ACGVAFaz4jMiJwJhdaHBkySmzlGTu82gr6FSTik2kJvLnY9nOd/D90qcH268m3I/cgI1xg1maE5CuZYaWLH+UHANCIck0yt7Mx5zBm5vVHXHwChsZ35kKqUpmo5Svq5/fzfAI5g2vDtFPYo1HiEA85QrDeGm9g//LG7K0scO3sdpj2CBDgCa+0OFs0bkvVgnnM/QBDwllOMm+cN7vMSHlB7Uu4haHKaTwgGkv8tlK+hP8fzmFuK/RQTpaLPWvbd58yWIo66HHM0OsPoPhVqmtaEVL7N+wYcTLTbb0DLdgp23Eyy2VYJ2N7bkLFAAibtoLPe5sLt6Oa2bvU+zyeMa8wrixO0gRTn9tO9NCSThTLGqcqtsDvphlfmx/cPBZVvw24jg1LE2lPuEo35Mhi58U0I/Ga8n5w+NS8i34MAQLos5B1u0xL1ZvCVYVRw/Fs2q53KLaXJMWwOZZ/4MPYV19bAHmgGDKB6f01xoeJKFbl63q9J34KdaVNPJWztQyRkzA3KNs1AdAEDowMxh10emXTCx75CkurtbY/ZpdNDGdsn2UcHKHsQ8Ai3WZi48IfkvtjOhsLpuIRSKZTX9FA4o+0d6o/zOWqQzVJMynL9NsxhSJOaourq6nBVQBueMSyubsX2xHrmuABZN2Ns9jr5nwLFlLF/2R6atjW/67Yd11YQ1Z+kA9Zk9dPTM/o6dVo6HHVgC0JR8oUfmI93T9u3gvTG94bAH02Y5xeqRcjuwnKCK6Q2+ajl8KXJ3GSh22P3Zfx6S+n008ROhJn+JRIUVu6o7OXl8w1SeyhuqNDwNI7SjbK08QrqPxS95jy4G7nCXVq6G3HNu0LtK5J0e226CfC005WKK9sVvfxI0eUbcnzutfhWe3rpZHM0nZ/ny/N8tanKYlQ6VEW5Xuym8yV1zZX58vwGhZp/5tFfhybZabdbrQYOs8F+xEhmPsb0/nki6kIyVvzZzUASiOrTfF+Sj9bXC7DoJxeiV8tjQL6loSd0yCx7YyB6rPdLx31U2qCG3F/oXIuDuqd6LFO+4DNIJuxFZqSsU0ea88avovFnWKRYFYRQDfCfcGaBCLn4M4A1ntJ5E57vicwqq2enaZEF5nokCYu9TbKqCC5yCDfL+GhLxT4w4xEJs+anqgou8DOY2q8FMryjb2MehC1dRJ9s4g9NXeTwPkWON4RH+FhIe0AWR/S9ekvQ+t70XHeimGF78LzuU7d7PwrswdIG2VpgF8C53qVQsTDtBJc4CdnkQPbnZY9mbPdDFra3PCXBBQ5QBn2aQqtyhvlyYM4Hb2/mdhsxCUen04GZVvIJZw5PAamMOmjzq8Q+dzAKLXDQ3RUZItWsg4t7W2DP+JDrJDymoMH7E5zQtuEpG03GTIjGCW3LQqOYEsXgFc78x76NeRwY6SNM+IfQoh6myJKRBIcLYxZcwscJ/gI2isTBty2Po9IkYzP0/SS4hGlxRjFAG5z1Jt1LckiB57yWvo35EaolbvA+6fBa24xodL2YjsPpTnj3JgJOqhcgOeLVsYYwoK0wjY+m1D3rGc40CukkaHnkEjarlXrF1B9M6ECQ6Ow0V7R7N4G3LfOHAXtymoyXOb4QhaYHJ/gNBJUkxclpSs7DNcgWWDDmM7Ke5MJpGuioe7w5EOvfTunUKRzOh7G2ylL+6ynHrD54oQO3//cN3yVO+5qMVsPZq0CZIOx4TlcJ8+Vz7V5waL+7WekzUpRFMTnnTlSCq3X5usi8qmIleW/rit1+oQZn1WGSU/sKBYEqMNh1mBOc6PhK8yCfKHdUNQk8o/G19ZPTs5MYfai+DLs5vmee37zEyyH48WW3XA6Xw6+Az8lMhci7N/KleToo7PtTKm+RA887Kqc6E9dyqL/QPTugzMHLbLZtJKqKLFfzVWRNJ63c+95uWT/F7R0U5dDVvuS409AJXhJvD0EwWaWdW8UN11u/7+umaYjT8mJtzZwP/MD4r57fihiHlC5fylHfaqnJdro+Dr7DajvO+vi2EwyD70s8nCH71nzIO1l5Zl+v1DMCb5ebvCMkGHvobXy/hPumGLyX0218/3RyD1GRLOuf9u/OGQyDmto32yMiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIv7GP8YjWPR/czH2AAAAAElFTkSuQmCC",
+ "https://avatars.githubusercontent.com/u/29436595?v=",
+ ],
+)
+def test_bedrock_claude_3(image_url):
try:
litellm.set_verbose = True
data = {
@@ -294,7 +303,7 @@ def test_bedrock_claude_3():
{
"image_url": {
"detail": "high",
- "url": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAL0AAAC9CAMAAADRCYwCAAAAh1BMVEX///8AAAD8/Pz5+fkEBAT39/cJCQn09PRNTU3y8vIMDAwzMzPe3t7v7+8QEBCOjo7FxcXR0dHn5+elpaWGhoYYGBivr686OjocHBy0tLQtLS1TU1PY2Ni6urpaWlpERER3d3ecnJxoaGiUlJRiYmIlJSU4ODhBQUFycnKAgIDBwcFnZ2chISE7EjuwAAAI/UlEQVR4nO1caXfiOgz1bhJIyAJhX1JoSzv8/9/3LNlpYd4rhX6o4/N8Z2lKM2cURZau5JsQEhERERERERERERERERERERHx/wBjhDPC3OGN8+Cc5JeMuheaETSdO8vZFyCScHtmz2CsktoeMn7rLM1u3h0PMAEhyYX7v/Q9wQvoGdB0hlbzm45lEq/wd6y6G9aezvBk9AXwp1r3LHJIRsh6s2maxaJpmvqgvkC7WFS3loUnaFJtKRVUCEoV/RpCnHRvAsesVQ1hw+vd7Mpo+424tLs72NplkvQgcdrsvXkW/zJWqH/fA0FT84M/xnQJt4to3+ZLuanbM6X5lfXKHosO9COgREqpCR5i86pf2zPS7j9tTj+9nO7bQz3+xGEyGW9zqgQ1tyQ/VsxEDvce/4dcUPNb5OD9yXvR4Z2QisuP0xiGWPnemgugU5q/troHhGEjIF5sTOyW648aC0TssuaaCEsYEIkGzjWXOp3A0vVsf6kgRyqaDk+T7DIVWrb58b2tT5xpUucKwodOD/5LbrZC1ws6YSaBZJ/8xlh+XZSYXaMJ2ezNqjB3IPXuehPcx2U6b4t1dS/xNdFzguUt8ie7arnPeyCZroxLHzGgGdqVcspwafizPWEXBee+9G1OaufGdvNng/9C+gwgZ3PH3r87G6zXTZ5D5De2G2DeFoANXfbACkT+fxBQ22YFsTTJF9hjFVO6VbqxZXko4WJ8s52P4PnuxO5KRzu0/hlix1ySt8iXjgaQ+4IHPA9nVzNkdduM9LFT/Aacj4FtKrHA7iAw602Vnht6R8Vq1IOS+wNMKLYqayAYfRuufQPGeGb7sZogQQoLZrGPgZ6KoYn70Iw30O92BNEDpvwouCFn6wH2uS+EhRb3WF/HObZk3HuxfRQM3Y/Of/VH0n4MKNHZDiZvO9+m/ABALfkOcuar/7nOo7B95ACGVAFaz4jMiJwJhdaHBkySmzlGTu82gr6FSTik2kJvLnY9nOd/D90qcH268m3I/cgI1xg1maE5CuZYaWLH+UHANCIck0yt7Mx5zBm5vVHXHwChsZ35kKqUpmo5Svq5/fzfAI5g2vDtFPYo1HiEA85QrDeGm9g//LG7K0scO3sdpj2CBDgCa+0OFs0bkvVgnnM/QBDwllOMm+cN7vMSHlB7Uu4haHKaTwgGkv8tlK+hP8fzmFuK/RQTpaLPWvbd58yWIo66HHM0OsPoPhVqmtaEVL7N+wYcTLTbb0DLdgp23Eyy2VYJ2N7bkLFAAibtoLPe5sLt6Oa2bvU+zyeMa8wrixO0gRTn9tO9NCSThTLGqcqtsDvphlfmx/cPBZVvw24jg1LE2lPuEo35Mhi58U0I/Ga8n5w+NS8i34MAQLos5B1u0xL1ZvCVYVRw/Fs2q53KLaXJMWwOZZ/4MPYV19bAHmgGDKB6f01xoeJKFbl63q9J34KdaVNPJWztQyRkzA3KNs1AdAEDowMxh10emXTCx75CkurtbY/ZpdNDGdsn2UcHKHsQ8Ai3WZi48IfkvtjOhsLpuIRSKZTX9FA4o+0d6o/zOWqQzVJMynL9NsxhSJOaourq6nBVQBueMSyubsX2xHrmuABZN2Ns9jr5nwLFlLF/2R6atjW/67Yd11YQ1Z+kA9Zk9dPTM/o6dVo6HHVgC0JR8oUfmI93T9u3gvTG94bAH02Y5xeqRcjuwnKCK6Q2+ajl8KXJ3GSh22P3Zfx6S+n008ROhJn+JRIUVu6o7OXl8w1SeyhuqNDwNI7SjbK08QrqPxS95jy4G7nCXVq6G3HNu0LtK5J0e226CfC005WKK9sVvfxI0eUbcnzutfhWe3rpZHM0nZ/ny/N8tanKYlQ6VEW5Xuym8yV1zZX58vwGhZp/5tFfhybZabdbrQYOs8F+xEhmPsb0/nki6kIyVvzZzUASiOrTfF+Sj9bXC7DoJxeiV8tjQL6loSd0yCx7YyB6rPdLx31U2qCG3F/oXIuDuqd6LFO+4DNIJuxFZqSsU0ea88avovFnWKRYFYRQDfCfcGaBCLn4M4A1ntJ5E57vicwqq2enaZEF5nokCYu9TbKqCC5yCDfL+GhLxT4w4xEJs+anqgou8DOY2q8FMryjb2MehC1dRJ9s4g9NXeTwPkWON4RH+FhIe0AWR/S9ekvQ+t70XHeimGF78LzuU7d7PwrswdIG2VpgF8C53qVQsTDtBJc4CdnkQPbnZY9mbPdDFra3PCXBBQ5QBn2aQqtyhvlyYM4Hb2/mdhsxCUen04GZVvIJZw5PAamMOmjzq8Q+dzAKLXDQ3RUZItWsg4t7W2DP+JDrJDymoMH7E5zQtuEpG03GTIjGCW3LQqOYEsXgFc78x76NeRwY6SNM+IfQoh6myJKRBIcLYxZcwscJ/gI2isTBty2Po9IkYzP0/SS4hGlxRjFAG5z1Jt1LckiB57yWvo35EaolbvA+6fBa24xodL2YjsPpTnj3JgJOqhcgOeLVsYYwoK0wjY+m1D3rGc40CukkaHnkEjarlXrF1B9M6ECQ6Ow0V7R7N4G3LfOHAXtymoyXOb4QhaYHJ/gNBJUkxclpSs7DNcgWWDDmM7Ke5MJpGuioe7w5EOvfTunUKRzOh7G2ylL+6ynHrD54oQO3//cN3yVO+5qMVsPZq0CZIOx4TlcJ8+Vz7V5waL+7WekzUpRFMTnnTlSCq3X5usi8qmIleW/rit1+oQZn1WGSU/sKBYEqMNh1mBOc6PhK8yCfKHdUNQk8o/G19ZPTs5MYfai+DLs5vmee37zEyyH48WW3XA6Xw6+Az8lMhci7N/KleToo7PtTKm+RA887Kqc6E9dyqL/QPTugzMHLbLZtJKqKLFfzVWRNJ63c+95uWT/F7R0U5dDVvuS409AJXhJvD0EwWaWdW8UN11u/7+umaYjT8mJtzZwP/MD4r57fihiHlC5fylHfaqnJdro+Dr7DajvO+vi2EwyD70s8nCH71nzIO1l5Zl+v1DMCb5ebvCMkGHvobXy/hPumGLyX0218/3RyD1GRLOuf9u/OGQyDmto32yMiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIv7GP8YjWPR/czH2AAAAAElFTkSuQmCC",
+ "url": image_url,
},
"type": "image_url",
},
@@ -313,7 +322,6 @@ def test_bedrock_claude_3():
# Add any assertions here to check the response
assert len(response.choices) > 0
assert len(response.choices[0].message.content) > 0
-
except RateLimitError:
pass
except Exception as e:
@@ -552,7 +560,7 @@ def test_bedrock_ptu():
assert "url" in mock_client_post.call_args.kwargs
assert (
mock_client_post.call_args.kwargs["url"]
- == "https://bedrock-runtime.us-west-2.amazonaws.com/model/arn%3Aaws%3Abedrock%3Aus-west-2%3A888602223428%3Aprovisioned-model%2F8fxff74qyhs3/invoke"
+ == "https://bedrock-runtime.us-west-2.amazonaws.com/model/arn%3Aaws%3Abedrock%3Aus-west-2%3A888602223428%3Aprovisioned-model%2F8fxff74qyhs3/converse"
)
mock_client_post.assert_called_once()
diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py
index d143d1ab80..98898052b1 100644
--- a/litellm/tests/test_completion.py
+++ b/litellm/tests/test_completion.py
@@ -300,7 +300,11 @@ def test_completion_claude_3():
pytest.fail(f"Error occurred: {e}")
-def test_completion_claude_3_function_call():
+@pytest.mark.parametrize(
+ "model",
+ ["anthropic/claude-3-opus-20240229", "anthropic.claude-3-sonnet-20240229-v1:0"],
+)
+def test_completion_claude_3_function_call(model):
litellm.set_verbose = True
tools = [
{
@@ -331,13 +335,14 @@ def test_completion_claude_3_function_call():
try:
# test without max tokens
response = completion(
- model="anthropic/claude-3-opus-20240229",
+ model=model,
messages=messages,
tools=tools,
tool_choice={
"type": "function",
"function": {"name": "get_current_weather"},
},
+ drop_params=True,
)
# Add any assertions, here to check response args
@@ -364,10 +369,11 @@ def test_completion_claude_3_function_call():
)
# In the second response, Claude should deduce answer from tool results
second_response = completion(
- model="anthropic/claude-3-opus-20240229",
+ model=model,
messages=messages,
tools=tools,
tool_choice="auto",
+ drop_params=True,
)
print(second_response)
except Exception as e:
@@ -2162,6 +2168,7 @@ def test_completion_azure_key_completion_arg():
logprobs=True,
max_tokens=10,
)
+
print(f"response: {response}")
print("Hidden Params", response._hidden_params)
@@ -2534,6 +2541,7 @@ def test_replicate_custom_prompt_dict():
"content": "what is yc write 1 paragraph",
}
],
+ mock_response="Hello world",
repetition_penalty=0.1,
num_retries=3,
)
diff --git a/litellm/tests/test_image_generation.py b/litellm/tests/test_image_generation.py
index fea014e67a..1d3d53f70a 100644
--- a/litellm/tests/test_image_generation.py
+++ b/litellm/tests/test_image_generation.py
@@ -76,7 +76,7 @@ def test_image_generation_azure_dall_e_3():
)
print(f"response: {response}")
assert len(response.data) > 0
- except litellm.RateLimitError as e:
+ except litellm.InternalServerError as e:
pass
except litellm.ContentPolicyViolationError:
pass # OpenAI randomly raises these errors - skip when they occur
diff --git a/litellm/tests/test_key_generate_prisma.py b/litellm/tests/test_key_generate_prisma.py
index 148d32cd9c..083d84c2b5 100644
--- a/litellm/tests/test_key_generate_prisma.py
+++ b/litellm/tests/test_key_generate_prisma.py
@@ -2248,3 +2248,55 @@ async def test_create_update_team(prisma_client):
assert _team_info["budget_reset_at"] is not None and isinstance(
_team_info["budget_reset_at"], datetime.datetime
)
+
+
+@pytest.mark.asyncio()
+async def test_enforced_params(prisma_client):
+ setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
+ setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
+ from litellm.proxy.proxy_server import general_settings
+
+ general_settings["enforced_params"] = [
+ "user",
+ "metadata",
+ "metadata.generation_name",
+ ]
+
+ await litellm.proxy.proxy_server.prisma_client.connect()
+ request = NewUserRequest()
+ key = await new_user(request)
+ print(key)
+
+ generated_key = key.key
+ bearer_token = "Bearer " + generated_key
+
+ request = Request(scope={"type": "http"})
+ request._url = URL(url="/chat/completions")
+
+ # Case 1: Missing user
+ async def return_body():
+ return b'{"model": "gemini-pro-vision"}'
+
+ request.body = return_body
+ try:
+ await user_api_key_auth(request=request, api_key=bearer_token)
+ pytest.fail(f"This should have failed!. IT's an invalid request")
+ except Exception as e:
+ assert (
+ "BadRequest please pass param=user in request body. This is a required param"
+ in e.message
+ )
+
+ # Case 2: Missing metadata["generation_name"]
+ async def return_body_2():
+ return b'{"model": "gemini-pro-vision", "user": "1234", "metadata": {}}'
+
+ request.body = return_body_2
+ try:
+ await user_api_key_auth(request=request, api_key=bearer_token)
+ pytest.fail(f"This should have failed!. IT's an invalid request")
+ except Exception as e:
+ assert (
+ "Authentication Error, BadRequest please pass param=[metadata][generation_name] in request body"
+ in e.message
+ )
diff --git a/litellm/tests/test_lowest_latency_routing.py b/litellm/tests/test_lowest_latency_routing.py
index f9f510673d..d83cdef9ad 100644
--- a/litellm/tests/test_lowest_latency_routing.py
+++ b/litellm/tests/test_lowest_latency_routing.py
@@ -275,7 +275,7 @@ async def _deploy(lowest_latency_logger, deployment_id, tokens_used, duration):
}
start_time = time.time()
response_obj = {"usage": {"total_tokens": tokens_used}}
- time.sleep(duration)
+ await asyncio.sleep(duration)
end_time = time.time()
lowest_latency_logger.log_success_event(
response_obj=response_obj,
@@ -325,6 +325,7 @@ def test_get_available_endpoints_tpm_rpm_check_async(ans_rpm):
d1 = [(lowest_latency_logger, "1234", 50, 0.01)] * non_ans_rpm
d2 = [(lowest_latency_logger, "5678", 50, 0.01)] * non_ans_rpm
asyncio.run(_gather_deploy([*d1, *d2]))
+ time.sleep(3)
## CHECK WHAT'S SELECTED ##
d_ans = lowest_latency_logger.get_available_deployments(
model_group=model_group, healthy_deployments=model_list
diff --git a/litellm/tests/test_prompt_factory.py b/litellm/tests/test_prompt_factory.py
index 2fc04ec528..b3aafab6e6 100644
--- a/litellm/tests/test_prompt_factory.py
+++ b/litellm/tests/test_prompt_factory.py
@@ -15,6 +15,7 @@ from litellm.llms.prompt_templates.factory import (
claude_2_1_pt,
llama_2_chat_pt,
prompt_factory,
+ _bedrock_tools_pt,
)
@@ -128,3 +129,27 @@ def test_anthropic_messages_pt():
# codellama_prompt_format()
+def test_bedrock_tool_calling_pt():
+ tools = [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_current_weather",
+ "description": "Get the current weather in a given location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA",
+ },
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
+ },
+ "required": ["location"],
+ },
+ },
+ }
+ ]
+ converted_tools = _bedrock_tools_pt(tools=tools)
+
+ print(converted_tools)
diff --git a/litellm/tests/test_router.py b/litellm/tests/test_router.py
index d76dec25c7..02bf9a16b8 100644
--- a/litellm/tests/test_router.py
+++ b/litellm/tests/test_router.py
@@ -38,6 +38,48 @@ def test_router_sensitive_keys():
assert "special-key" not in str(e)
+def test_router_order():
+ """
+ Asserts for 2 models in a model group, model with order=1 always called first
+ """
+ router = Router(
+ model_list=[
+ {
+ "model_name": "gpt-3.5-turbo",
+ "litellm_params": {
+ "model": "gpt-4o",
+ "api_key": os.getenv("OPENAI_API_KEY"),
+ "mock_response": "Hello world",
+ "order": 1,
+ },
+ "model_info": {"id": "1"},
+ },
+ {
+ "model_name": "gpt-3.5-turbo",
+ "litellm_params": {
+ "model": "gpt-4o",
+ "api_key": "bad-key",
+ "mock_response": Exception("this is a bad key"),
+ "order": 2,
+ },
+ "model_info": {"id": "2"},
+ },
+ ],
+ num_retries=0,
+ allowed_fails=0,
+ enable_pre_call_checks=True,
+ )
+
+ for _ in range(100):
+ response = router.completion(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "Hey, how's it going?"}],
+ )
+
+ assert isinstance(response, litellm.ModelResponse)
+ assert response._hidden_params["model_id"] == "1"
+
+
@pytest.mark.parametrize("num_retries", [None, 2])
@pytest.mark.parametrize("max_retries", [None, 4])
def test_router_num_retries_init(num_retries, max_retries):
diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py
index c24de601f5..a5e098b027 100644
--- a/litellm/tests/test_streaming.py
+++ b/litellm/tests/test_streaming.py
@@ -1284,18 +1284,18 @@ async def test_completion_replicate_llama3_streaming(sync_mode):
# pytest.fail(f"Error occurred: {e}")
-@pytest.mark.parametrize("sync_mode", [True, False])
+@pytest.mark.parametrize("sync_mode", [True]) # False
@pytest.mark.parametrize(
"model",
[
- # "bedrock/cohere.command-r-plus-v1:0",
- # "anthropic.claude-3-sonnet-20240229-v1:0",
- # "anthropic.claude-instant-v1",
- # "bedrock/ai21.j2-mid",
- # "mistral.mistral-7b-instruct-v0:2",
- # "bedrock/amazon.titan-tg1-large",
- # "meta.llama3-8b-instruct-v1:0",
- "cohere.command-text-v14"
+ "bedrock/cohere.command-r-plus-v1:0",
+ "anthropic.claude-3-sonnet-20240229-v1:0",
+ "anthropic.claude-instant-v1",
+ "bedrock/ai21.j2-mid",
+ "mistral.mistral-7b-instruct-v0:2",
+ "bedrock/amazon.titan-tg1-large",
+ "meta.llama3-8b-instruct-v1:0",
+ "cohere.command-text-v14",
],
)
@pytest.mark.asyncio
diff --git a/litellm/tests/test_token_counter.py b/litellm/tests/test_token_counter.py
index 194dfb8af3..a6f7cd7616 100644
--- a/litellm/tests/test_token_counter.py
+++ b/litellm/tests/test_token_counter.py
@@ -186,3 +186,13 @@ def test_load_test_token_counter(model):
total_time = end_time - start_time
print("model={}, total test time={}".format(model, total_time))
assert total_time < 10, f"Total encoding time > 10s, {total_time}"
+
+def test_openai_token_with_image_and_text():
+ model = "gpt-4o"
+ full_request = {'model': 'gpt-4o', 'tools': [{'type': 'function', 'function': {'name': 'json', 'parameters': {'type': 'object', 'required': ['clause'], 'properties': {'clause': {'type': 'string'}}}, 'description': 'Respond with a JSON object.'}}], 'logprobs': False, 'messages': [{'role': 'user', 'content': [{'text': '\n Just some long text, long long text, and you know it will be longer than 7 tokens definetly.', 'type': 'text'}]}], 'tool_choice': {'type': 'function', 'function': {'name': 'json'}}, 'exclude_models': [], 'disable_fallback': False, 'exclude_providers': []}
+ messages = full_request.get("messages", [])
+
+ token_count = token_counter(model=model, messages=messages)
+ print(token_count)
+
+test_openai_token_with_image_and_text()
\ No newline at end of file
diff --git a/litellm/types/files.py b/litellm/types/files.py
new file mode 100644
index 0000000000..0545567ece
--- /dev/null
+++ b/litellm/types/files.py
@@ -0,0 +1,267 @@
+from enum import Enum
+from types import MappingProxyType
+from typing import List, Set
+
+"""
+Base Enums/Consts
+"""
+class FileType(Enum):
+ AAC = "AAC"
+ CSV = "CSV"
+ DOC = "DOC"
+ DOCX = "DOCX"
+ FLAC = "FLAC"
+ FLV = "FLV"
+ GIF = "GIF"
+ GOOGLE_DOC = "GOOGLE_DOC"
+ GOOGLE_DRAWINGS = "GOOGLE_DRAWINGS"
+ GOOGLE_SHEETS = "GOOGLE_SHEETS"
+ GOOGLE_SLIDES = "GOOGLE_SLIDES"
+ HEIC = "HEIC"
+ HEIF = "HEIF"
+ HTML = "HTML"
+ JPEG = "JPEG"
+ JSON = "JSON"
+ M4A = "M4A"
+ M4V = "M4V"
+ MOV = "MOV"
+ MP3 = "MP3"
+ MP4 = "MP4"
+ MPEG = "MPEG"
+ MPEGPS = "MPEGPS"
+ MPG = "MPG"
+ MPA = "MPA"
+ MPGA = "MPGA"
+ OGG = "OGG"
+ OPUS = "OPUS"
+ PDF = "PDF"
+ PCM = "PCM"
+ PNG = "PNG"
+ PPT = "PPT"
+ PPTX = "PPTX"
+ RTF = "RTF"
+ THREE_GPP = "3GPP"
+ TXT = "TXT"
+ WAV = "WAV"
+ WEBM = "WEBM"
+ WEBP = "WEBP"
+ WMV = "WMV"
+ XLS = "XLS"
+ XLSX = "XLSX"
+
+FILE_EXTENSIONS: MappingProxyType[FileType, List[str]] = MappingProxyType({
+ FileType.AAC: ["aac"],
+ FileType.CSV: ["csv"],
+ FileType.DOC: ["doc"],
+ FileType.DOCX: ["docx"],
+ FileType.FLAC: ["flac"],
+ FileType.FLV: ["flv"],
+ FileType.GIF: ["gif"],
+ FileType.GOOGLE_DOC: ["gdoc"],
+ FileType.GOOGLE_DRAWINGS: ["gdraw"],
+ FileType.GOOGLE_SHEETS: ["gsheet"],
+ FileType.GOOGLE_SLIDES: ["gslides"],
+ FileType.HEIC: ["heic"],
+ FileType.HEIF: ["heif"],
+ FileType.HTML: ["html", "htm"],
+ FileType.JPEG: ["jpeg", "jpg"],
+ FileType.JSON: ["json"],
+ FileType.M4A: ["m4a"],
+ FileType.M4V: ["m4v"],
+ FileType.MOV: ["mov"],
+ FileType.MP3: ["mp3"],
+ FileType.MP4: ["mp4"],
+ FileType.MPEG: ["mpeg"],
+ FileType.MPEGPS: ["mpegps"],
+ FileType.MPG: ["mpg"],
+ FileType.MPA: ["mpa"],
+ FileType.MPGA: ["mpga"],
+ FileType.OGG: ["ogg"],
+ FileType.OPUS: ["opus"],
+ FileType.PDF: ["pdf"],
+ FileType.PCM: ["pcm"],
+ FileType.PNG: ["png"],
+ FileType.PPT: ["ppt"],
+ FileType.PPTX: ["pptx"],
+ FileType.RTF: ["rtf"],
+ FileType.THREE_GPP: ["3gpp"],
+ FileType.TXT: ["txt"],
+ FileType.WAV: ["wav"],
+ FileType.WEBM: ["webm"],
+ FileType.WEBP: ["webp"],
+ FileType.WMV: ["wmv"],
+ FileType.XLS: ["xls"],
+ FileType.XLSX: ["xlsx"],
+})
+
+FILE_MIME_TYPES: MappingProxyType[FileType, str] = MappingProxyType({
+ FileType.AAC: "audio/aac",
+ FileType.CSV: "text/csv",
+ FileType.DOC: "application/msword",
+ FileType.DOCX: "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ FileType.FLAC: "audio/flac",
+ FileType.FLV: "video/x-flv",
+ FileType.GIF: "image/gif",
+ FileType.GOOGLE_DOC: "application/vnd.google-apps.document",
+ FileType.GOOGLE_DRAWINGS: "application/vnd.google-apps.drawing",
+ FileType.GOOGLE_SHEETS: "application/vnd.google-apps.spreadsheet",
+ FileType.GOOGLE_SLIDES: "application/vnd.google-apps.presentation",
+ FileType.HEIC: "image/heic",
+ FileType.HEIF: "image/heif",
+ FileType.HTML: "text/html",
+ FileType.JPEG: "image/jpeg",
+ FileType.JSON: "application/json",
+ FileType.M4A: "audio/x-m4a",
+ FileType.M4V: "video/x-m4v",
+ FileType.MOV: "video/quicktime",
+ FileType.MP3: "audio/mpeg",
+ FileType.MP4: "video/mp4",
+ FileType.MPEG: "video/mpeg",
+ FileType.MPEGPS: "video/mpegps",
+ FileType.MPG: "video/mpg",
+ FileType.MPA: "audio/m4a",
+ FileType.MPGA: "audio/mpga",
+ FileType.OGG: "audio/ogg",
+ FileType.OPUS: "audio/opus",
+ FileType.PDF: "application/pdf",
+ FileType.PCM: "audio/pcm",
+ FileType.PNG: "image/png",
+ FileType.PPT: "application/vnd.ms-powerpoint",
+ FileType.PPTX: "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ FileType.RTF: "application/rtf",
+ FileType.THREE_GPP: "video/3gpp",
+ FileType.TXT: "text/plain",
+ FileType.WAV: "audio/wav",
+ FileType.WEBM: "video/webm",
+ FileType.WEBP: "image/webp",
+ FileType.WMV: "video/wmv",
+ FileType.XLS: "application/vnd.ms-excel",
+ FileType.XLSX: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+})
+
+"""
+Util Functions
+"""
+def get_file_mime_type_from_extension(extension: str) -> str:
+ for file_type, extensions in FILE_EXTENSIONS.items():
+ if extension in extensions:
+ return FILE_MIME_TYPES[file_type]
+ raise ValueError(f"Unknown mime type for extension: {extension}")
+
+
+def get_file_extension_from_mime_type(mime_type: str) -> str:
+ for file_type, mime in FILE_MIME_TYPES.items():
+ if mime == mime_type:
+ return FILE_EXTENSIONS[file_type][0]
+ raise ValueError(f"Unknown extension for mime type: {mime_type}")
+
+
+def get_file_type_from_extension(extension: str) -> FileType:
+ for file_type, extensions in FILE_EXTENSIONS.items():
+ if extension in extensions:
+ return file_type
+
+ raise ValueError(f"Unknown file type for extension: {extension}")
+
+
+def get_file_extension_for_file_type(file_type: FileType) -> str:
+ return FILE_EXTENSIONS[file_type][0]
+
+def get_file_mime_type_for_file_type(file_type: FileType) -> str:
+ return FILE_MIME_TYPES[file_type]
+
+
+"""
+FileType Type Groupings (Videos, Images, etc)
+"""
+
+# Images
+IMAGE_FILE_TYPES = {
+ FileType.PNG,
+ FileType.JPEG,
+ FileType.GIF,
+ FileType.WEBP,
+ FileType.HEIC,
+ FileType.HEIF
+}
+
+def is_image_file_type(file_type):
+ return file_type in IMAGE_FILE_TYPES
+
+# Videos
+VIDEO_FILE_TYPES = {
+ FileType.MOV,
+ FileType.MP4,
+ FileType.MPEG,
+ FileType.M4V,
+ FileType.FLV,
+ FileType.MPEGPS,
+ FileType.MPG,
+ FileType.WEBM,
+ FileType.WMV,
+ FileType.THREE_GPP
+}
+
+def is_video_file_type(file_type):
+ return file_type in VIDEO_FILE_TYPES
+
+# Audio
+AUDIO_FILE_TYPES = {
+ FileType.AAC,
+ FileType.FLAC,
+ FileType.MP3,
+ FileType.MPA,
+ FileType.MPGA,
+ FileType.OPUS,
+ FileType.PCM,
+ FileType.WAV,
+}
+
+def is_audio_file_type(file_type):
+ return file_type in AUDIO_FILE_TYPES
+
+# Text
+TEXT_FILE_TYPES = {
+ FileType.CSV,
+ FileType.HTML,
+ FileType.RTF,
+ FileType.TXT
+}
+
+def is_text_file_type(file_type):
+ return file_type in TEXT_FILE_TYPES
+
+"""
+Other FileType Groupings
+"""
+# Accepted file types for GEMINI 1.5 through Vertex AI
+# https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/send-multimodal-prompts#gemini-send-multimodal-samples-images-nodejs
+GEMINI_1_5_ACCEPTED_FILE_TYPES: Set[FileType] = {
+ # Image
+ FileType.PNG,
+ FileType.JPEG,
+ # Audio
+ FileType.AAC,
+ FileType.FLAC,
+ FileType.MP3,
+ FileType.MPA,
+ FileType.MPGA,
+ FileType.OPUS,
+ FileType.PCM,
+ FileType.WAV,
+ # Video
+ FileType.FLV,
+ FileType.MOV,
+ FileType.MPEG,
+ FileType.MPEGPS,
+ FileType.MPG,
+ FileType.MP4,
+ FileType.WEBM,
+ FileType.WMV,
+ FileType.THREE_GPP,
+ # PDF
+ FileType.PDF,
+}
+
+def is_gemini_1_5_accepted_file_type(file_type: FileType) -> bool:
+ return file_type in GEMINI_1_5_ACCEPTED_FILE_TYPES
diff --git a/litellm/types/llms/bedrock.py b/litellm/types/llms/bedrock.py
index 0c82596827..b06075092f 100644
--- a/litellm/types/llms/bedrock.py
+++ b/litellm/types/llms/bedrock.py
@@ -1,4 +1,4 @@
-from typing import TypedDict, Any, Union, Optional
+from typing import TypedDict, Any, Union, Optional, Literal, List
import json
from typing_extensions import (
Self,
@@ -11,10 +11,137 @@ from typing_extensions import (
)
+class SystemContentBlock(TypedDict):
+ text: str
+
+
+class ImageSourceBlock(TypedDict):
+ bytes: Optional[str] # base 64 encoded string
+
+
+class ImageBlock(TypedDict):
+ format: Literal["png", "jpeg", "gif", "webp"]
+ source: ImageSourceBlock
+
+
+class ToolResultContentBlock(TypedDict, total=False):
+ image: ImageBlock
+ json: dict
+ text: str
+
+
+class ToolResultBlock(TypedDict, total=False):
+ content: Required[List[ToolResultContentBlock]]
+ toolUseId: Required[str]
+ status: Literal["success", "error"]
+
+
+class ToolUseBlock(TypedDict):
+ input: dict
+ name: str
+ toolUseId: str
+
+
+class ContentBlock(TypedDict, total=False):
+ text: str
+ image: ImageBlock
+ toolResult: ToolResultBlock
+ toolUse: ToolUseBlock
+
+
+class MessageBlock(TypedDict):
+ content: List[ContentBlock]
+ role: Literal["user", "assistant"]
+
+
+class ConverseMetricsBlock(TypedDict):
+ latencyMs: float # time in ms
+
+
+class ConverseResponseOutputBlock(TypedDict):
+ message: Optional[MessageBlock]
+
+
+class ConverseTokenUsageBlock(TypedDict):
+ inputTokens: int
+ outputTokens: int
+ totalTokens: int
+
+
+class ConverseResponseBlock(TypedDict):
+ additionalModelResponseFields: dict
+ metrics: ConverseMetricsBlock
+ output: ConverseResponseOutputBlock
+ stopReason: (
+ str # end_turn | tool_use | max_tokens | stop_sequence | content_filtered
+ )
+ usage: ConverseTokenUsageBlock
+
+
+class ToolInputSchemaBlock(TypedDict):
+ json: Optional[dict]
+
+
+class ToolSpecBlock(TypedDict, total=False):
+ inputSchema: Required[ToolInputSchemaBlock]
+ name: Required[str]
+ description: str
+
+
+class ToolBlock(TypedDict):
+ toolSpec: Optional[ToolSpecBlock]
+
+
+class SpecificToolChoiceBlock(TypedDict):
+ name: str
+
+
+class ToolChoiceValuesBlock(TypedDict, total=False):
+ any: dict
+ auto: dict
+ tool: SpecificToolChoiceBlock
+
+
+class ToolConfigBlock(TypedDict, total=False):
+ tools: Required[List[ToolBlock]]
+ toolChoice: Union[str, ToolChoiceValuesBlock]
+
+
+class InferenceConfig(TypedDict, total=False):
+ maxTokens: int
+ stopSequences: List[str]
+ temperature: float
+ topP: float
+
+
+class ToolBlockDeltaEvent(TypedDict):
+ input: str
+
+
+class ContentBlockDeltaEvent(TypedDict, total=False):
+ """
+ Either 'text' or 'toolUse' will be specified for Converse API streaming response.
+ """
+
+ text: str
+ toolUse: ToolBlockDeltaEvent
+
+
+class RequestObject(TypedDict, total=False):
+ additionalModelRequestFields: dict
+ additionalModelResponseFieldPaths: List[str]
+ inferenceConfig: InferenceConfig
+ messages: Required[List[MessageBlock]]
+ system: List[SystemContentBlock]
+ toolConfig: ToolConfigBlock
+
+
class GenericStreamingChunk(TypedDict):
text: Required[str]
+ tool_str: Required[str]
is_finished: Required[bool]
finish_reason: Required[str]
+ usage: Optional[ConverseTokenUsageBlock]
class Document(TypedDict):
diff --git a/litellm/types/llms/openai.py b/litellm/types/llms/openai.py
index bc0c82434f..7861e394cd 100644
--- a/litellm/types/llms/openai.py
+++ b/litellm/types/llms/openai.py
@@ -293,3 +293,20 @@ class ListBatchRequest(TypedDict, total=False):
extra_headers: Optional[Dict[str, str]]
extra_body: Optional[Dict[str, str]]
timeout: Optional[float]
+
+
+class ChatCompletionToolCallFunctionChunk(TypedDict):
+ name: str
+ arguments: str
+
+
+class ChatCompletionToolCallChunk(TypedDict):
+ id: str
+ type: Literal["function"]
+ function: ChatCompletionToolCallFunctionChunk
+
+
+class ChatCompletionResponseMessage(TypedDict, total=False):
+ content: Optional[str]
+ tool_calls: List[ChatCompletionToolCallChunk]
+ role: Literal["assistant"]
diff --git a/litellm/utils.py b/litellm/utils.py
index be7728dfef..ad9779f2d7 100644
--- a/litellm/utils.py
+++ b/litellm/utils.py
@@ -239,6 +239,8 @@ def map_finish_reason(
return "length"
elif finish_reason == "tool_use": # anthropic
return "tool_calls"
+ elif finish_reason == "content_filtered":
+ return "content_filter"
return finish_reason
@@ -1372,8 +1374,12 @@ class Logging:
callback_func=callback,
)
except Exception as e:
- traceback.print_exc()
- print_verbose(
+ verbose_logger.error(
+ "litellm.Logging.pre_call(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while input logging with integrations {traceback.format_exc()}"
)
print_verbose(
@@ -4060,6 +4066,9 @@ def openai_token_counter(
for c in value:
if c["type"] == "text":
text += c["text"]
+ num_tokens += len(
+ encoding.encode(c["text"], disallowed_special=())
+ )
elif c["type"] == "image_url":
if isinstance(c["image_url"], dict):
image_url_dict = c["image_url"]
@@ -5634,19 +5643,29 @@ def get_optional_params(
optional_params["stream"] = stream
elif "anthropic" in model:
_check_valid_arg(supported_params=supported_params)
- # anthropic params on bedrock
- # \"max_tokens_to_sample\":300,\"temperature\":0.5,\"top_p\":1,\"stop_sequences\":[\"\\\\n\\\\nHuman:\"]}"
- if model.startswith("anthropic.claude-3"):
- optional_params = (
- litellm.AmazonAnthropicClaude3Config().map_openai_params(
+ if "aws_bedrock_client" in passed_params: # deprecated boto3.invoke route.
+ if model.startswith("anthropic.claude-3"):
+ optional_params = (
+ litellm.AmazonAnthropicClaude3Config().map_openai_params(
+ non_default_params=non_default_params,
+ optional_params=optional_params,
+ )
+ )
+ else:
+ optional_params = litellm.AmazonAnthropicConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
)
- )
- else:
- optional_params = litellm.AmazonAnthropicConfig().map_openai_params(
+ else: # bedrock httpx route
+ optional_params = litellm.AmazonConverseConfig().map_openai_params(
+ model=model,
non_default_params=non_default_params,
optional_params=optional_params,
+ drop_params=(
+ drop_params
+ if drop_params is not None and isinstance(drop_params, bool)
+ else False
+ ),
)
elif "amazon" in model: # amazon titan llms
_check_valid_arg(supported_params=supported_params)
@@ -6198,6 +6217,27 @@ def calculate_max_parallel_requests(
return None
+def _get_order_filtered_deployments(healthy_deployments: List[Dict]) -> List:
+ min_order = min(
+ (
+ deployment["litellm_params"]["order"]
+ for deployment in healthy_deployments
+ if "order" in deployment["litellm_params"]
+ ),
+ default=None,
+ )
+
+ if min_order is not None:
+ filtered_deployments = [
+ deployment
+ for deployment in healthy_deployments
+ if deployment["litellm_params"].get("order") == min_order
+ ]
+
+ return filtered_deployments
+ return healthy_deployments
+
+
def _get_model_region(
custom_llm_provider: str, litellm_params: LiteLLM_Params
) -> Optional[str]:
@@ -6403,20 +6443,7 @@ def get_supported_openai_params(
- None if unmapped
"""
if custom_llm_provider == "bedrock":
- if model.startswith("anthropic.claude-3"):
- return litellm.AmazonAnthropicClaude3Config().get_supported_openai_params()
- elif model.startswith("anthropic"):
- return litellm.AmazonAnthropicConfig().get_supported_openai_params()
- elif model.startswith("ai21"):
- return ["max_tokens", "temperature", "top_p", "stream"]
- elif model.startswith("amazon"):
- return ["max_tokens", "temperature", "stop", "top_p", "stream"]
- elif model.startswith("meta"):
- return ["max_tokens", "temperature", "top_p", "stream"]
- elif model.startswith("cohere"):
- return ["stream", "temperature", "max_tokens"]
- elif model.startswith("mistral"):
- return ["max_tokens", "temperature", "stop", "top_p", "stream"]
+ return litellm.AmazonConverseConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "ollama":
return litellm.OllamaConfig().get_supported_openai_params()
elif custom_llm_provider == "ollama_chat":
@@ -8516,7 +8543,11 @@ def exception_type(
extra_information = f"\nModel: {model}"
if _api_base:
extra_information += f"\nAPI Base: `{_api_base}`"
- if messages and len(messages) > 0:
+ if (
+ messages
+ and len(messages) > 0
+ and litellm.redact_messages_in_exceptions is False
+ ):
extra_information += f"\nMessages: `{messages}`"
if _model_group is not None:
@@ -9803,8 +9834,7 @@ def exception_type(
elif custom_llm_provider == "azure":
if "Internal server error" in error_str:
exception_mapping_worked = True
- raise APIError(
- status_code=500,
+ raise litellm.InternalServerError(
message=f"AzureException Internal server error - {original_exception.message}",
llm_provider="azure",
model=model,
@@ -10054,6 +10084,8 @@ def get_secret(
):
key_management_system = litellm._key_management_system
key_management_settings = litellm._key_management_settings
+ args = locals()
+
if secret_name.startswith("os.environ/"):
secret_name = secret_name.replace("os.environ/", "")
@@ -10141,13 +10173,13 @@ def get_secret(
key_manager = "local"
if (
- key_manager == KeyManagementSystem.AZURE_KEY_VAULT
+ key_manager == KeyManagementSystem.AZURE_KEY_VAULT.value
or type(client).__module__ + "." + type(client).__name__
== "azure.keyvault.secrets._client.SecretClient"
): # support Azure Secret Client - from azure.keyvault.secrets import SecretClient
secret = client.get_secret(secret_name).value
elif (
- key_manager == KeyManagementSystem.GOOGLE_KMS
+ key_manager == KeyManagementSystem.GOOGLE_KMS.value
or client.__class__.__name__ == "KeyManagementServiceClient"
):
encrypted_secret: Any = os.getenv(secret_name)
@@ -10175,6 +10207,25 @@ def get_secret(
secret = response.plaintext.decode(
"utf-8"
) # assumes the original value was encoded with utf-8
+ elif key_manager == KeyManagementSystem.AWS_KMS.value:
+ """
+ Only check the tokens which start with 'aws_kms/'. This prevents latency impact caused by checking all keys.
+ """
+ encrypted_value = os.getenv(secret_name, None)
+ if encrypted_value is None:
+ raise Exception("encrypted value for AWS KMS cannot be None.")
+ # Decode the base64 encoded ciphertext
+ ciphertext_blob = base64.b64decode(encrypted_value)
+
+ # Set up the parameters for the decrypt call
+ params = {"CiphertextBlob": ciphertext_blob}
+
+ # Perform the decryption
+ response = client.decrypt(**params)
+
+ # Extract and decode the plaintext
+ plaintext = response["Plaintext"]
+ secret = plaintext.decode("utf-8")
elif key_manager == KeyManagementSystem.AWS_SECRET_MANAGER.value:
try:
get_secret_value_response = client.get_secret_value(
@@ -10195,10 +10246,14 @@ def get_secret(
for k, v in secret_dict.items():
secret = v
print_verbose(f"secret: {secret}")
+ elif key_manager == "local":
+ secret = os.getenv(secret_name)
else: # assume the default is infisicial client
secret = client.get_secret(secret_name).secret_value
except Exception as e: # check if it's in os.environ
- print_verbose(f"An exception occurred - {str(e)}")
+ verbose_logger.error(
+ f"An exception occurred - {str(e)}\n\n{traceback.format_exc()}"
+ )
secret = os.getenv(secret_name)
try:
secret_value_as_bool = ast.literal_eval(secret)
@@ -10532,7 +10587,12 @@ class CustomStreamWrapper:
"finish_reason": finish_reason,
}
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.CustomStreamWrapper.handle_predibase_chunk(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
raise e
def handle_huggingface_chunk(self, chunk):
@@ -10576,7 +10636,12 @@ class CustomStreamWrapper:
"finish_reason": finish_reason,
}
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.CustomStreamWrapper.handle_huggingface_chunk(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
raise e
def handle_ai21_chunk(self, chunk): # fake streaming
@@ -10811,7 +10876,12 @@ class CustomStreamWrapper:
"usage": usage,
}
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.CustomStreamWrapper.handle_openai_chat_completion_chunk(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
raise e
def handle_azure_text_completion_chunk(self, chunk):
@@ -10892,7 +10962,12 @@ class CustomStreamWrapper:
else:
return ""
except:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.CustomStreamWrapper.handle_baseten_chunk(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
return ""
def handle_cloudlfare_stream(self, chunk):
@@ -11091,7 +11166,12 @@ class CustomStreamWrapper:
"is_finished": True,
}
except:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.CustomStreamWrapper.handle_clarifai_chunk(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
return ""
def model_response_creator(self):
@@ -11332,12 +11412,27 @@ class CustomStreamWrapper:
if response_obj["is_finished"]:
self.received_finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "bedrock":
+ from litellm.types.llms.bedrock import GenericStreamingChunk
+
if self.received_finish_reason is not None:
raise StopIteration
- response_obj = self.handle_bedrock_stream(chunk)
+ response_obj: GenericStreamingChunk = chunk
completion_obj["content"] = response_obj["text"]
+
if response_obj["is_finished"]:
self.received_finish_reason = response_obj["finish_reason"]
+
+ if (
+ self.stream_options
+ and self.stream_options.get("include_usage", False) is True
+ and response_obj["usage"] is not None
+ ):
+ self.sent_stream_usage = True
+ model_response.usage = litellm.Usage(
+ prompt_tokens=response_obj["usage"]["inputTokens"],
+ completion_tokens=response_obj["usage"]["outputTokens"],
+ total_tokens=response_obj["usage"]["totalTokens"],
+ )
elif self.custom_llm_provider == "sagemaker":
print_verbose(f"ENTERS SAGEMAKER STREAMING for chunk {chunk}")
response_obj = self.handle_sagemaker_stream(chunk)
@@ -11563,7 +11658,12 @@ class CustomStreamWrapper:
tool["type"] = "function"
model_response.choices[0].delta = Delta(**_json_delta)
except Exception as e:
- traceback.print_exc()
+ verbose_logger.error(
+ "litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}".format(
+ str(e)
+ )
+ )
+ verbose_logger.debug(traceback.format_exc())
model_response.choices[0].delta = Delta()
else:
try:
@@ -11599,7 +11699,7 @@ class CustomStreamWrapper:
and hasattr(model_response, "usage")
and hasattr(model_response.usage, "prompt_tokens")
):
- if self.sent_first_chunk == False:
+ if self.sent_first_chunk is False:
completion_obj["role"] = "assistant"
self.sent_first_chunk = True
model_response.choices[0].delta = Delta(**completion_obj)
@@ -11767,6 +11867,8 @@ class CustomStreamWrapper:
def __next__(self):
try:
+ if self.completion_stream is None:
+ self.fetch_sync_stream()
while True:
if (
isinstance(self.completion_stream, str)
@@ -11841,6 +11943,14 @@ class CustomStreamWrapper:
custom_llm_provider=self.custom_llm_provider,
)
+ def fetch_sync_stream(self):
+ if self.completion_stream is None and self.make_call is not None:
+ # Call make_call to get the completion stream
+ self.completion_stream = self.make_call(client=litellm.module_level_client)
+ self._stream_iter = self.completion_stream.__iter__()
+
+ return self.completion_stream
+
async def fetch_stream(self):
if self.completion_stream is None and self.make_call is not None:
# Call make_call to get the completion stream
diff --git a/pyproject.toml b/pyproject.toml
index 648a8b41ac..a472ae1956 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,10 +1,14 @@
[tool.poetry]
name = "litellm"
-version = "1.40.4"
+version = "1.40.5"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT"
readme = "README.md"
+packages = [
+ { include = "litellm" },
+ { include = "litellm/py.typed"},
+]
[tool.poetry.urls]
homepage = "https://litellm.ai"
@@ -80,7 +84,7 @@ requires = ["poetry-core", "wheel"]
build-backend = "poetry.core.masonry.api"
[tool.commitizen]
-version = "1.40.4"
+version = "1.40.5"
version_files = [
"pyproject.toml:^version"
]
diff --git a/ruff.toml b/ruff.toml
new file mode 100644
index 0000000000..dfb323c1b3
--- /dev/null
+++ b/ruff.toml
@@ -0,0 +1,3 @@
+ignore = ["F405"]
+extend-select = ["E501"]
+line-length = 120