diff --git a/.circleci/config.yml b/.circleci/config.yml index 6caed4810..1ab163653 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -61,7 +61,7 @@ jobs: command: | pwd ls - python -m pytest -vv litellm/tests/ -x --junitxml=test-results/junit.xml --durations=5 + python -m pytest -vv litellm/tests/ -x --junitxml=test-results/junit.xml --durations=5 no_output_timeout: 120m # Store test results @@ -78,6 +78,11 @@ jobs: steps: - checkout + + - run: + name: Copy model_prices_and_context_window File to model_prices_and_context_window_backup + command: | + cp model_prices_and_context_window.json litellm/model_prices_and_context_window_backup.json - run: name: Check if litellm dir was updated or if pyproject.toml was modified diff --git a/.gitignore b/.gitignore index 088996ddd..b31366a33 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,9 @@ litellm/proxy/_secret_config.yaml litellm/tests/aiologs.log litellm/tests/exception_data.txt litellm/tests/config_*.yaml +litellm/tests/langfuse.log +litellm/tests/test_custom_logger.py +litellm/tests/langfuse.log +litellm/tests/dynamo*.log +.vscode/settings.json +litellm/proxy/log.txt diff --git a/Dockerfile b/Dockerfile index 0647ee92a..1a59db16a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,11 @@ # Base image -ARG LITELLM_BASE_IMAGE=python:3.9-slim +ARG LITELLM_BUILD_IMAGE=python:3.9 -# allow users to specify, else use python 3.9-slim -FROM $LITELLM_BASE_IMAGE +# Runtime image +ARG LITELLM_RUNTIME_IMAGE=python:3.9-slim + +# allow users to specify, else use python 3.9 +FROM $LITELLM_BUILD_IMAGE as builder # Set the working directory to /app WORKDIR /app @@ -16,7 +19,7 @@ RUN pip install --upgrade pip && \ pip install build # Copy the current directory contents into the container at /app -COPY . /app +COPY requirements.txt . # Build the package RUN rm -rf dist/* && python -m build @@ -25,13 +28,27 @@ RUN rm -rf dist/* && python -m build RUN pip install dist/*.whl # Install any needed packages specified in requirements.txt -RUN pip wheel --no-cache-dir --wheel-dir=wheels -r requirements.txt -RUN pip install --no-cache-dir --find-links=wheels -r requirements.txt +RUN pip install wheel && \ + pip wheel --no-cache-dir --wheel-dir=/app/wheels -r requirements.txt + +############################################################################### +FROM $LITELLM_RUNTIME_IMAGE as runtime + +WORKDIR /app + +# Copy the current directory contents into the container at /app +COPY . . + +COPY --from=builder /app/wheels /app/wheels + +RUN pip install --no-index --find-links=/app/wheels -r requirements.txt + +# Trigger the Prisma CLI to be installed +RUN prisma -v EXPOSE 4000/tcp # Start the litellm proxy, using the `litellm` cli command https://docs.litellm.ai/docs/simple_proxy - # Start the litellm proxy with default options CMD ["--port", "4000"] diff --git a/README.md b/README.md index cc5c1a3a5..583a643f3 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,22 @@ response = completion(model="command-nightly", messages=messages) print(response) ``` +## Async ([Docs](https://docs.litellm.ai/docs/completion/stream#async-completion)) + +```python +from litellm import acompletion +import asyncio + +async def test_get_response(): + user_message = "Hello, how are you?" + messages = [{"content": user_message, "role": "user"}] + response = await acompletion(model="gpt-3.5-turbo", messages=messages) + return response + +response = asyncio.run(test_get_response()) +print(response) +``` + ## Streaming ([Docs](https://docs.litellm.ai/docs/completion/stream)) liteLLM supports streaming the model response back, pass `stream=True` to get a streaming iterator in response. Streaming is supported for all models (Bedrock, Huggingface, TogetherAI, Azure, OpenAI, etc.) @@ -140,6 +156,7 @@ response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content | [openrouter](https://docs.litellm.ai/docs/providers/openrouter) | ✅ | ✅ | ✅ | ✅ | | [google - vertex_ai](https://docs.litellm.ai/docs/providers/vertex) | ✅ | ✅ | ✅ | ✅ | | [google - palm](https://docs.litellm.ai/docs/providers/palm) | ✅ | ✅ | ✅ | ✅ | +| [mistral ai api](https://docs.litellm.ai/docs/providers/mistral) | ✅ | ✅ | ✅ | ✅ | | [ai21](https://docs.litellm.ai/docs/providers/ai21) | ✅ | ✅ | ✅ | ✅ | | [baseten](https://docs.litellm.ai/docs/providers/baseten) | ✅ | ✅ | ✅ | ✅ | | [vllm](https://docs.litellm.ai/docs/providers/vllm) | ✅ | ✅ | ✅ | ✅ | diff --git a/dist/litellm-1.12.5.dev1-py3-none-any.whl b/dist/litellm-1.12.5.dev1-py3-none-any.whl new file mode 100644 index 000000000..395d5c567 Binary files /dev/null and b/dist/litellm-1.12.5.dev1-py3-none-any.whl differ diff --git a/dist/litellm-1.12.5.dev1.tar.gz b/dist/litellm-1.12.5.dev1.tar.gz new file mode 100644 index 000000000..8fdfd9d5e Binary files /dev/null and b/dist/litellm-1.12.5.dev1.tar.gz differ diff --git a/dist/litellm-1.12.6.dev1-py3-none-any.whl b/dist/litellm-1.12.6.dev1-py3-none-any.whl new file mode 100644 index 000000000..95ba50b4f Binary files /dev/null and b/dist/litellm-1.12.6.dev1-py3-none-any.whl differ diff --git a/dist/litellm-1.12.6.dev1.tar.gz b/dist/litellm-1.12.6.dev1.tar.gz new file mode 100644 index 000000000..c18e6d1ce Binary files /dev/null and b/dist/litellm-1.12.6.dev1.tar.gz differ diff --git a/dist/litellm-1.12.6.dev2-py3-none-any.whl b/dist/litellm-1.12.6.dev2-py3-none-any.whl new file mode 100644 index 000000000..bf6d294b9 Binary files /dev/null and b/dist/litellm-1.12.6.dev2-py3-none-any.whl differ diff --git a/dist/litellm-1.12.6.dev2.tar.gz b/dist/litellm-1.12.6.dev2.tar.gz new file mode 100644 index 000000000..6f032ba7b Binary files /dev/null and b/dist/litellm-1.12.6.dev2.tar.gz differ diff --git a/dist/litellm-1.12.6.dev3-py3-none-any.whl b/dist/litellm-1.12.6.dev3-py3-none-any.whl new file mode 100644 index 000000000..81290067d Binary files /dev/null and b/dist/litellm-1.12.6.dev3-py3-none-any.whl differ diff --git a/dist/litellm-1.12.6.dev3.tar.gz b/dist/litellm-1.12.6.dev3.tar.gz new file mode 100644 index 000000000..2dbe390eb Binary files /dev/null and b/dist/litellm-1.12.6.dev3.tar.gz differ diff --git a/dist/litellm-1.12.6.dev4-py3-none-any.whl b/dist/litellm-1.12.6.dev4-py3-none-any.whl new file mode 100644 index 000000000..db89f6f9d Binary files /dev/null and b/dist/litellm-1.12.6.dev4-py3-none-any.whl differ diff --git a/dist/litellm-1.12.6.dev4.tar.gz b/dist/litellm-1.12.6.dev4.tar.gz new file mode 100644 index 000000000..3daae3edd Binary files /dev/null and b/dist/litellm-1.12.6.dev4.tar.gz differ diff --git a/dist/litellm-1.12.6.dev5-py3-none-any.whl b/dist/litellm-1.12.6.dev5-py3-none-any.whl new file mode 100644 index 000000000..d4233ebfb Binary files /dev/null and b/dist/litellm-1.12.6.dev5-py3-none-any.whl differ diff --git a/dist/litellm-1.12.6.dev5.tar.gz b/dist/litellm-1.12.6.dev5.tar.gz new file mode 100644 index 000000000..d2a6280bf Binary files /dev/null and b/dist/litellm-1.12.6.dev5.tar.gz differ diff --git a/dist/litellm-1.14.0.dev1-py3-none-any.whl b/dist/litellm-1.14.0.dev1-py3-none-any.whl new file mode 100644 index 000000000..7428d252d Binary files /dev/null and b/dist/litellm-1.14.0.dev1-py3-none-any.whl differ diff --git a/dist/litellm-1.14.0.dev1.tar.gz b/dist/litellm-1.14.0.dev1.tar.gz new file mode 100644 index 000000000..cd80b0e71 Binary files /dev/null and b/dist/litellm-1.14.0.dev1.tar.gz differ diff --git a/dist/litellm-1.14.5.dev1-py3-none-any.whl b/dist/litellm-1.14.5.dev1-py3-none-any.whl new file mode 100644 index 000000000..1555e6f68 Binary files /dev/null and b/dist/litellm-1.14.5.dev1-py3-none-any.whl differ diff --git a/dist/litellm-1.14.5.dev1.tar.gz b/dist/litellm-1.14.5.dev1.tar.gz new file mode 100644 index 000000000..06f47b0eb Binary files /dev/null and b/dist/litellm-1.14.5.dev1.tar.gz differ diff --git a/docs/my-website/docs/caching/redis_cache.md b/docs/my-website/docs/caching/redis_cache.md index 521c4d00f..979119ad7 100644 --- a/docs/my-website/docs/caching/redis_cache.md +++ b/docs/my-website/docs/caching/redis_cache.md @@ -55,27 +55,76 @@ litellm.cache = cache # set litellm.cache to your cache ``` -### Detecting Cached Responses -For resposes that were returned as cache hit, the response includes a param `cache` = True +## Cache Initialization Parameters -:::info +#### `type` (str, optional) -Only valid for OpenAI <= 0.28.1 [Let us know if you still need this](https://github.com/BerriAI/litellm/issues/new?assignees=&labels=bug&projects=&template=bug_report.yml&title=%5BBug%5D%3A+) -::: +The type of cache to initialize. It can be either "local" or "redis". Defaults to "local". -Example response with cache hit -```python -{ - 'cache': True, - 'id': 'chatcmpl-7wggdzd6OXhgE2YhcLJHJNZsEWzZ2', - 'created': 1694221467, - 'model': 'gpt-3.5-turbo-0613', - 'choices': [ - { - 'index': 0, 'message': {'role': 'assistant', 'content': 'I\'m sorry, but I couldn\'t find any information about "litellm" or how many stars it has. It is possible that you may be referring to a specific product, service, or platform that I am not familiar with. Can you please provide more context or clarify your question?' - }, 'finish_reason': 'stop'} - ], - 'usage': {'prompt_tokens': 17, 'completion_tokens': 59, 'total_tokens': 76}, -} +#### `host` (str, optional) -``` \ No newline at end of file +The host address for the Redis cache. This parameter is required if the `type` is set to "redis". + +#### `port` (int, optional) + +The port number for the Redis cache. This parameter is required if the `type` is set to "redis". + +#### `password` (str, optional) + +The password for the Redis cache. This parameter is required if the `type` is set to "redis". + +#### `supported_call_types` (list, optional) + +A list of call types to cache for. Defaults to caching for all call types. The available call types are: + +- "completion" +- "acompletion" +- "embedding" +- "aembedding" + +#### `**kwargs` (additional keyword arguments) + +Additional keyword arguments are accepted for the initialization of the Redis cache using the `redis.Redis()` constructor. These arguments allow you to fine-tune the Redis cache configuration based on your specific needs. + + +## Logging + +Cache hits are logged in success events as `kwarg["cache_hit"]`. + +Here's an example of accessing it: + + ```python + import litellm +from litellm.integrations.custom_logger import CustomLogger +from litellm import completion, acompletion, Cache + +# create custom callback for success_events +class MyCustomHandler(CustomLogger): + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Success") + print(f"Value of Cache hit: {kwargs['cache_hit']"}) + +async def test_async_completion_azure_caching(): + # set custom callback + customHandler_caching = MyCustomHandler() + litellm.callbacks = [customHandler_caching] + + # init cache + litellm.cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD']) + unique_time = time.time() + response1 = await litellm.acompletion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": f"Hi 👋 - i'm async azure {unique_time}" + }], + caching=True) + await asyncio.sleep(1) + print(f"customHandler_caching.states pre-cache hit: {customHandler_caching.states}") + response2 = await litellm.acompletion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": f"Hi 👋 - i'm async azure {unique_time}" + }], + caching=True) + await asyncio.sleep(1) # success callbacks are done in parallel + ``` diff --git a/docs/my-website/docs/observability/custom_callback.md b/docs/my-website/docs/observability/custom_callback.md index 78b7499a8..319a25e87 100644 --- a/docs/my-website/docs/observability/custom_callback.md +++ b/docs/my-website/docs/observability/custom_callback.md @@ -4,7 +4,9 @@ You can create a custom callback class to precisely log events as they occur in litellm. ```python +import litellm from litellm.integrations.custom_logger import CustomLogger +from litellm import completion, acompletion class MyCustomHandler(CustomLogger): def log_pre_api_call(self, model, messages, kwargs): @@ -21,14 +23,38 @@ class MyCustomHandler(CustomLogger): def log_failure_event(self, kwargs, response_obj, start_time, end_time): print(f"On Failure") + + #### ASYNC #### - for acompletion/aembeddings + + async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Async Streaming") + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Async Success") + + async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Async Success") customHandler = MyCustomHandler() litellm.callbacks = [customHandler] + +## sync response = completion(model="gpt-3.5-turbo", messages=[{ "role": "user", "content": "Hi 👋 - i'm openai"}], stream=True) for chunk in response: continue + + +## async +import asyncio + +def async completion(): + response = await acompletion(model="gpt-3.5-turbo", messages=[{ "role": "user", "content": "Hi 👋 - i'm openai"}], + stream=True) + async for chunk in response: + continue +asyncio.run(completion()) ``` ## Callback Functions @@ -87,6 +113,41 @@ print(response) ## Async Callback Functions +We recommend using the Custom Logger class for async. + +```python +from litellm.integrations.custom_logger import CustomLogger +from litellm import acompletion + +class MyCustomHandler(CustomLogger): + #### ASYNC #### + + async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Async Streaming") + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Async Success") + + async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Async Failure") + +import asyncio +customHandler = MyCustomHandler() + +litellm.callbacks = [customHandler] + +def async completion(): + response = await acompletion(model="gpt-3.5-turbo", messages=[{ "role": "user", "content": "Hi 👋 - i'm openai"}], + stream=True) + async for chunk in response: + continue +asyncio.run(completion()) +``` + +**Functions** + +If you just want to pass in an async function for logging. + LiteLLM currently supports just async success callback functions for async completion/embedding calls. ```python @@ -117,9 +178,6 @@ asyncio.run(test_chat_openai()) :::info We're actively trying to expand this to other event types. [Tell us if you need this!](https://github.com/BerriAI/litellm/issues/1007) - - - ::: ## What's in kwargs? @@ -170,6 +228,48 @@ Here's exactly what you can expect in the kwargs dictionary: "end_time" = end_time # datetime object of when call was completed ``` + +### Cache hits + +Cache hits are logged in success events as `kwarg["cache_hit"]`. + +Here's an example of accessing it: + + ```python + import litellm +from litellm.integrations.custom_logger import CustomLogger +from litellm import completion, acompletion, Cache + +class MyCustomHandler(CustomLogger): + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Success") + print(f"Value of Cache hit: {kwargs['cache_hit']"}) + +async def test_async_completion_azure_caching(): + customHandler_caching = MyCustomHandler() + litellm.cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD']) + litellm.callbacks = [customHandler_caching] + unique_time = time.time() + response1 = await litellm.acompletion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": f"Hi 👋 - i'm async azure {unique_time}" + }], + caching=True) + await asyncio.sleep(1) + print(f"customHandler_caching.states pre-cache hit: {customHandler_caching.states}") + response2 = await litellm.acompletion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": f"Hi 👋 - i'm async azure {unique_time}" + }], + caching=True) + await asyncio.sleep(1) # success callbacks are done in parallel + print(f"customHandler_caching.states post-cache hit: {customHandler_caching.states}") + assert len(customHandler_caching.errors) == 0 + assert len(customHandler_caching.states) == 4 # pre, post, success, success + ``` + ### Get complete streaming response LiteLLM will pass you the complete streaming response in the final streaming chunk as part of the kwargs for your custom callback function. diff --git a/docs/my-website/docs/observability/traceloop_integration.md b/docs/my-website/docs/observability/traceloop_integration.md index 9902e58ba..8052420d1 100644 --- a/docs/my-website/docs/observability/traceloop_integration.md +++ b/docs/my-website/docs/observability/traceloop_integration.md @@ -27,8 +27,8 @@ To get better visualizations on how your code behaves, you may want to annotate ## Exporting traces to other systems (e.g. Datadog, New Relic, and others) -Since Traceloop SDK uses OpenTelemetry to send data, you can easily export your traces to other systems, such as Datadog, New Relic, and others. See [Traceloop docs on exporters](https://traceloop.com/docs/python-sdk/exporters) for more information. +Since OpenLLMetry uses OpenTelemetry to send data, you can easily export your traces to other systems, such as Datadog, New Relic, and others. See [OpenLLMetry docs on exporters](https://www.traceloop.com/docs/openllmetry/integrations/introduction) for more information. ## Support -For any question or issue with integration you can reach out to the Traceloop team on [Slack](https://join.slack.com/t/traceloopcommunity/shared_invite/zt-1plpfpm6r-zOHKI028VkpcWdobX65C~g) or via [email](mailto:dev@traceloop.com). +For any question or issue with integration you can reach out to the Traceloop team on [Slack](https://traceloop.com/slack) or via [email](mailto:dev@traceloop.com). diff --git a/docs/my-website/docs/projects/Docq.AI.md b/docs/my-website/docs/projects/Docq.AI.md new file mode 100644 index 000000000..492ce4490 --- /dev/null +++ b/docs/my-website/docs/projects/Docq.AI.md @@ -0,0 +1,21 @@ +**A private and secure ChatGPT alternative that knows your business.** + +Upload docs, ask questions --> get answers. + +Leverage GenAI with your confidential documents to increase efficiency and collaboration. + +OSS core, everything can run in your environment. An extensible platform you can build your GenAI strategy on. Support a variety of popular LLMs including embedded for air gap use cases. + +[![Static Badge][docs-shield]][docs-url] +[![Static Badge][github-shield]][github-url] +[![X (formerly Twitter) Follow][twitter-shield]][twitter-url] + + + + +[docs-shield]: https://img.shields.io/badge/docs-site-black?logo=materialformkdocs +[docs-url]: https://docqai.github.io/docq/ +[github-shield]: https://img.shields.io/badge/Github-repo-black?logo=github +[github-url]: https://github.com/docqai/docq/ +[twitter-shield]: https://img.shields.io/twitter/follow/docqai?logo=x&style=flat +[twitter-url]: https://twitter.com/docqai diff --git a/docs/my-website/docs/providers/mistral.md b/docs/my-website/docs/providers/mistral.md new file mode 100644 index 000000000..a7869415b --- /dev/null +++ b/docs/my-website/docs/providers/mistral.md @@ -0,0 +1,56 @@ +# Mistral AI API +https://docs.mistral.ai/api/ + +## API Key +```python +# env variable +os.environ['MISTRAL_API_KEY'] +``` + +## Sample Usage +```python +from litellm import completion +import os + +os.environ['MISTRAL_API_KEY'] = "" +response = completion( + model="mistral/mistral-tiny"", + messages=[ + {"role": "user", "content": "hello from litellm"} + ], +) +print(response) +``` + +## Sample Usage - Streaming +```python +from litellm import completion +import os + +os.environ['MISTRAL_API_KEY'] = "" +response = completion( + model="mistral/mistral-tiny", + messages=[ + {"role": "user", "content": "hello from litellm"} + ], + stream=True +) + +for chunk in response: + print(chunk) +``` + + +## Supported Models +All models listed here https://docs.mistral.ai/platform/endpoints are supported. We actively maintain the list of models, pricing, token window, etc. [here](https://github.com/BerriAI/litellm/blob/c1b25538277206b9f00de5254d80d6a83bb19a29/model_prices_and_context_window.json). + +| Model Name | Function Call | +|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mistral-tiny | `completion(model="mistral/mistral-tiny", messages)` | +| mistral-small | `completion(model="mistral/mistral-small", messages)` | +| mistral-medium | `completion(model="mistral/mistral-medium", messages)` | + + + + + diff --git a/docs/my-website/docs/providers/openai_compatible.md b/docs/my-website/docs/providers/openai_compatible.md new file mode 100644 index 000000000..ff68d7a00 --- /dev/null +++ b/docs/my-website/docs/providers/openai_compatible.md @@ -0,0 +1,47 @@ +# OpenAI-Compatible Endpoints + +To call models hosted behind an openai proxy, make 2 changes: + +1. Put `openai/` in front of your model name, so litellm knows you're trying to call an openai-compatible endpoint. + +2. **Do NOT** add anything additional to the base url e.g. `/v1/embedding`. LiteLLM uses the openai-client to make these calls, and that automatically adds the relevant endpoints. + +## Usage + +```python +import litellm +from litellm import embedding +litellm.set_verbose = True +import os + + +litellm_proxy_endpoint = "http://0.0.0.0:8000" +bearer_token = "sk-1234" + +CHOSEN_LITE_LLM_EMBEDDING_MODEL = "openai/GPT-J 6B - Sagemaker Text Embedding (Internal)" + +litellm.set_verbose = False + +print(litellm_proxy_endpoint) + + + +response = embedding( + + model = CHOSEN_LITE_LLM_EMBEDDING_MODEL, # add `openai/` prefix to model so litellm knows to route to OpenAI + + api_key=bearer_token, + + api_base=litellm_proxy_endpoint, # set API Base of your Custom OpenAI Endpoint + + input=["good morning from litellm"], + + api_version='2023-07-01-preview' + +) + +print('================================================') + +print(len(response.data[0]['embedding'])) + +``` \ No newline at end of file diff --git a/docs/my-website/docs/providers/vertex.md b/docs/my-website/docs/providers/vertex.md index 1dedd1450..f71aa0ada 100644 --- a/docs/my-website/docs/providers/vertex.md +++ b/docs/my-website/docs/providers/vertex.md @@ -1,4 +1,4 @@ -# VertexAI - Google +# VertexAI - Google [Gemini] Open In Colab @@ -10,6 +10,16 @@ * run `gcloud auth application-default login` See [Google Cloud Docs](https://cloud.google.com/docs/authentication/external/set-up-adc) * Alternatively you can set `application_default_credentials.json` + +## Sample Usage +```python +import litellm +litellm.vertex_project = "hardy-device-38811" # Your Project ID +litellm.vertex_location = "us-central1" # proj location + +response = completion(model="gemini-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}]) +``` + ## Set Vertex Project & Vertex Location All calls using Vertex AI require the following parameters: * Your Project ID @@ -37,13 +47,50 @@ os.environ["VERTEXAI_LOCATION"] = "us-central1 # Your Location litellm.vertex_location = "us-central1 # Your Location ``` -## Sample Usage +## Gemini Pro +| Model Name | Function Call | +|------------------|--------------------------------------| +| gemini-pro | `completion('gemini-pro', messages)` | + +## Gemini Pro Vision +| Model Name | Function Call | +|------------------|--------------------------------------| +| gemini-pro-vision | `completion('gemini-pro-vision', messages)` | + +#### Using Gemini Pro Vision + +Call `gemini-pro-vision` in the same input/output format as OpenAI [`gpt-4-vision`](https://docs.litellm.ai/docs/providers/openai#openai-vision-models) + +LiteLLM Supports the following image types passed in `url` +- Images with Cloud Storage URIs - gs://cloud-samples-data/generative-ai/image/boats.jpeg +- Images with direct links - https://storage.googleapis.com/github-repo/img/gemini/intro/landmark3.jpg +- Videos with Cloud Storage URIs - https://storage.googleapis.com/github-repo/img/gemini/multimodality_usecases_overview/pixel8.mp4 + +**Example Request** ```python import litellm -litellm.vertex_project = "hardy-device-38811" # Your Project ID -litellm.vertex_location = "us-central1" # proj location -response = completion(model="chat-bison", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}]) +response = litellm.completion( + model = "vertex_ai/gemini-pro-vision", + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "Whats in this image?" + }, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + } + } + ] + } + ], +) +print(response) ``` ## Chat Models diff --git a/docs/my-website/docs/proxy/caching.md b/docs/my-website/docs/proxy/caching.md index 37d3658e7..9eb6a1f55 100644 --- a/docs/my-website/docs/proxy/caching.md +++ b/docs/my-website/docs/proxy/caching.md @@ -1,20 +1,24 @@ # Caching Cache LLM Responses +## Quick Start Caching can be enabled by adding the `cache` key in the `config.yaml` -#### Step 1: Add `cache` to the config.yaml +### Step 1: Add `cache` to the config.yaml ```yaml model_list: - model_name: gpt-3.5-turbo litellm_params: model: gpt-3.5-turbo + - model_name: text-embedding-ada-002 + litellm_params: + model: text-embedding-ada-002 litellm_settings: set_verbose: True cache: True # set cache responses to True, litellm defaults to using a redis cache ``` -#### Step 2: Add Redis Credentials to .env +### Step 2: Add Redis Credentials to .env Set either `REDIS_URL` or the `REDIS_HOST` in your os environment, to enable caching. ```shell @@ -32,12 +36,12 @@ REDIS_ = "" ``` [**See how it's read from the environment**](https://github.com/BerriAI/litellm/blob/4d7ff1b33b9991dcf38d821266290631d9bcd2dd/litellm/_redis.py#L40) -#### Step 3: Run proxy with config +### Step 3: Run proxy with config ```shell $ litellm --config /path/to/config.yaml ``` -#### Using Caching +## Using Caching - /chat/completions Send the same request twice: ```shell curl http://0.0.0.0:8000/v1/chat/completions \ @@ -57,9 +61,51 @@ curl http://0.0.0.0:8000/v1/chat/completions \ }' ``` -#### Control caching per completion request +## Using Caching - /embeddings +Send the same request twice: +```shell +curl --location 'http://0.0.0.0:8000/embeddings' \ + --header 'Content-Type: application/json' \ + --data ' { + "model": "text-embedding-ada-002", + "input": ["write a litellm poem"] + }' + +curl --location 'http://0.0.0.0:8000/embeddings' \ + --header 'Content-Type: application/json' \ + --data ' { + "model": "text-embedding-ada-002", + "input": ["write a litellm poem"] + }' +``` + +## Advanced +### Set Cache Params on config.yaml +```yaml +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo + - model_name: text-embedding-ada-002 + litellm_params: + model: text-embedding-ada-002 + +litellm_settings: + set_verbose: True + cache: True # set cache responses to True, litellm defaults to using a redis cache + cache_params: # cache_params are optional + type: "redis" # The type of cache to initialize. Can be "local" or "redis". Defaults to "local". + host: "localhost" # The host address for the Redis cache. Required if type is "redis". + port: 6379 # The port number for the Redis cache. Required if type is "redis". + password: "your_password" # The password for the Redis cache. Required if type is "redis". + + # Optional configurations + supported_call_types: ["acompletion", "completion", "embedding", "aembedding"] # defaults to all litellm call types +``` + +### Override caching per `chat/completions` request Caching can be switched on/off per `/chat/completions` request -- Caching **on** for completion - pass `caching=True`: +- Caching **on** for individual completion - pass `caching=True`: ```shell curl http://0.0.0.0:8000/v1/chat/completions \ -H "Content-Type: application/json" \ @@ -70,7 +116,7 @@ Caching can be switched on/off per `/chat/completions` request "caching": true }' ``` -- Caching **off** for completion - pass `caching=False`: +- Caching **off** for individual completion - pass `caching=False`: ```shell curl http://0.0.0.0:8000/v1/chat/completions \ -H "Content-Type: application/json" \ @@ -80,4 +126,29 @@ Caching can be switched on/off per `/chat/completions` request "temperature": 0.7, "caching": false }' + ``` + + +### Override caching per `/embeddings` request + +Caching can be switched on/off per `/embeddings` request +- Caching **on** for embedding - pass `caching=True`: + ```shell + curl --location 'http://0.0.0.0:8000/embeddings' \ + --header 'Content-Type: application/json' \ + --data ' { + "model": "text-embedding-ada-002", + "input": ["write a litellm poem"], + "caching": true + }' + ``` +- Caching **off** for completion - pass `caching=False`: + ```shell + curl --location 'http://0.0.0.0:8000/embeddings' \ + --header 'Content-Type: application/json' \ + --data ' { + "model": "text-embedding-ada-002", + "input": ["write a litellm poem"], + "caching": false + }' ``` \ No newline at end of file diff --git a/docs/my-website/docs/proxy/call_hooks.md b/docs/my-website/docs/proxy/call_hooks.md new file mode 100644 index 000000000..a92b94a86 --- /dev/null +++ b/docs/my-website/docs/proxy/call_hooks.md @@ -0,0 +1,78 @@ +# Modify Incoming Data + +Modify data just before making litellm completion calls call on proxy + +See a complete example with our [parallel request rate limiter](https://github.com/BerriAI/litellm/blob/main/litellm/proxy/hooks/parallel_request_limiter.py) + +## Quick Start + +1. In your Custom Handler add a new `async_pre_call_hook` function + +This function is called just before a litellm completion call is made, and allows you to modify the data going into the litellm call [**See Code**](https://github.com/BerriAI/litellm/blob/589a6ca863000ba8e92c897ba0f776796e7a5904/litellm/proxy/proxy_server.py#L1000) + +```python +from litellm.integrations.custom_logger import CustomLogger +import litellm + +# This file includes the custom callbacks for LiteLLM Proxy +# Once defined, these can be passed in proxy_config.yaml +class MyCustomHandler(CustomLogger): # https://docs.litellm.ai/docs/observability/custom_callback#callback-class + # Class variables or attributes + def __init__(self): + pass + + #### ASYNC #### + + async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time): + pass + + async def async_log_pre_api_call(self, model, messages, kwargs): + pass + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + pass + + async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): + pass + + #### CALL HOOKS - proxy only #### + + async def async_pre_call_hook(self, user_api_key_dict: UserAPIKeyAuth, cache: DualCache, data: dict, call_type: Literal["completion", "embeddings"]): + data["model"] = "my-new-model" + return data + +proxy_handler_instance = MyCustomHandler() +``` + +2. Add this file to your proxy config + +```yaml +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo + +litellm_settings: + callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance] +``` + +3. Start the server + test the request + +```shell +$ litellm /path/to/config.yaml +``` +```shell +curl --location 'http://0.0.0.0:8000/chat/completions' \ + --data ' { + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "good morning good sir" + } + ], + "user": "ishaan-app", + "temperature": 0.2 + }' +``` + diff --git a/docs/my-website/docs/proxy/deploy.md b/docs/my-website/docs/proxy/deploy.md index 65ba90eee..b25bfaa53 100644 --- a/docs/my-website/docs/proxy/deploy.md +++ b/docs/my-website/docs/proxy/deploy.md @@ -1,4 +1,11 @@ -# Deploying LiteLLM Proxy +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 🐳 Docker, Deploying LiteLLM Proxy + +## Dockerfile + +You can find the Dockerfile to build litellm proxy [here](https://github.com/BerriAI/litellm/blob/main/Dockerfile) ## Quick Start Docker Image: Github Container Registry @@ -7,12 +14,12 @@ See the latest available ghcr docker image here: https://github.com/berriai/litellm/pkgs/container/litellm ```shell -docker pull ghcr.io/berriai/litellm:main-v1.10.1 +docker pull ghcr.io/berriai/litellm:main-v1.12.3 ``` ### Run the Docker Image ```shell -docker run ghcr.io/berriai/litellm:main-v1.10.0 +docker run ghcr.io/berriai/litellm:main-v1.12.3 ``` #### Run the Docker Image with LiteLLM CLI args @@ -21,12 +28,12 @@ See all supported CLI args [here](https://docs.litellm.ai/docs/proxy/cli): Here's how you can run the docker image and pass your config to `litellm` ```shell -docker run ghcr.io/berriai/litellm:main-v1.10.0 --config your_config.yaml +docker run ghcr.io/berriai/litellm:main-v1.12.3 --config your_config.yaml ``` Here's how you can run the docker image and start litellm on port 8002 with `num_workers=8` ```shell -docker run ghcr.io/berriai/litellm:main-v1.10.0 --port 8002 --num_workers 8 +docker run ghcr.io/berriai/litellm:main-v1.12.3 --port 8002 --num_workers 8 ``` #### Run the Docker Image using docker compose @@ -42,6 +49,10 @@ Here's an example `docker-compose.yml` file version: "3.9" services: litellm: + build: + context: . + args: + target: runtime image: ghcr.io/berriai/litellm:main ports: - "8000:8000" # Map the container port to the host, change the host port if necessary @@ -74,6 +85,26 @@ Your LiteLLM container should be running now on the defined port e.g. `8000`. +## Deploy on Google Cloud Run +**Click the button** to deploy to Google Cloud Run + +[![Deploy](https://deploy.cloud.run/button.svg)](https://deploy.cloud.run/?git_repo=https://github.com/BerriAI/litellm) + +#### Testing your deployed proxy +**Assuming the required keys are set as Environment Variables** + +https://litellm-7yjrj3ha2q-uc.a.run.app is our example proxy, substitute it with your deployed cloud run app + +```shell +curl https://litellm-7yjrj3ha2q-uc.a.run.app/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "Say this is a test!"}], + "temperature": 0.7 + }' +``` + ## LiteLLM Proxy Performance LiteLLM proxy has been load tested to handle 1500 req/s. diff --git a/docs/my-website/docs/proxy/embedding.md b/docs/my-website/docs/proxy/embedding.md new file mode 100644 index 000000000..e1a7677f9 --- /dev/null +++ b/docs/my-website/docs/proxy/embedding.md @@ -0,0 +1,244 @@ +import Image from '@theme/IdealImage'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Embeddings - `/embeddings` + +See supported Embedding Providers & Models [here](https://docs.litellm.ai/docs/embedding/supported_embedding) + + +## Quick start +Here's how to route between GPT-J embedding (sagemaker endpoint), Amazon Titan embedding (Bedrock) and Azure OpenAI embedding on the proxy server: + +1. Set models in your config.yaml +```yaml +model_list: + - model_name: sagemaker-embeddings + litellm_params: + model: "sagemaker/berri-benchmarking-gpt-j-6b-fp16" + - model_name: amazon-embeddings + litellm_params: + model: "bedrock/amazon.titan-embed-text-v1" + - model_name: azure-embeddings + litellm_params: + model: "azure/azure-embedding-model" + api_base: "os.environ/AZURE_API_BASE" # os.getenv("AZURE_API_BASE") + api_key: "os.environ/AZURE_API_KEY" # os.getenv("AZURE_API_KEY") + api_version: "2023-07-01-preview" + +general_settings: + master_key: sk-1234 # [OPTIONAL] if set all calls to proxy will require either this key or a valid generated token +``` + +2. Start the proxy +```shell +$ litellm --config /path/to/config.yaml +``` + +3. Test the embedding call + +```shell +curl --location 'http://0.0.0.0:8000/v1/embeddings' \ +--header 'Authorization: Bearer sk-1234' \ +--header 'Content-Type: application/json' \ +--data '{ + "input": "The food was delicious and the waiter..", + "model": "sagemaker-embeddings", +}' +``` + +## `/embeddings` Request Format +Input, Output and Exceptions are mapped to the OpenAI format for all supported models + + + + +```shell +curl --location 'http://0.0.0.0:8000/embeddings' \ + --header 'Content-Type: application/json' \ + --data ' { + "model": "text-embedding-ada-002", + "input": ["write a litellm poem"] + }' +``` + + + +```python +import openai +from openai import OpenAI + +# set base_url to your proxy server +# set api_key to send to proxy server +client = OpenAI(api_key="", base_url="http://0.0.0.0:8000") + +response = openai.embeddings.create( + input=["hello from litellm"], + model="text-embedding-ada-002" +) + +print(response) + +``` + + + + +```python +from langchain.embeddings import OpenAIEmbeddings + +embeddings = OpenAIEmbeddings(model="sagemaker-embeddings", openai_api_base="http://0.0.0.0:8000", openai_api_key="temp-key") + + +text = "This is a test document." + +query_result = embeddings.embed_query(text) + +print(f"SAGEMAKER EMBEDDINGS") +print(query_result[:5]) + +embeddings = OpenAIEmbeddings(model="bedrock-embeddings", openai_api_base="http://0.0.0.0:8000", openai_api_key="temp-key") + +text = "This is a test document." + +query_result = embeddings.embed_query(text) + +print(f"BEDROCK EMBEDDINGS") +print(query_result[:5]) + +embeddings = OpenAIEmbeddings(model="bedrock-titan-embeddings", openai_api_base="http://0.0.0.0:8000", openai_api_key="temp-key") + +text = "This is a test document." + +query_result = embeddings.embed_query(text) + +print(f"TITAN EMBEDDINGS") +print(query_result[:5]) +``` + + + + + +## `/embeddings` Response Format + +```json +{ + "object": "list", + "data": [ + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... + -0.0028842222, + ], + "index": 0 + } + ], + "model": "text-embedding-ada-002", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } +} + +``` + +## Supported Models + +See supported Embedding Providers & Models [here](https://docs.litellm.ai/docs/embedding/supported_embedding) + +#### Create Config.yaml + + + +LiteLLM Proxy supports all Feature-Extraction Embedding models. + +```yaml +model_list: + - model_name: deployed-codebert-base + litellm_params: + # send request to deployed hugging face inference endpoint + model: huggingface/microsoft/codebert-base # add huggingface prefix so it routes to hugging face + api_key: hf_LdS # api key for hugging face inference endpoint + api_base: https://uysneno1wv2wd4lw.us-east-1.aws.endpoints.huggingface.cloud # your hf inference endpoint + - model_name: codebert-base + litellm_params: + # no api_base set, sends request to hugging face free inference api https://api-inference.huggingface.co/models/ + model: huggingface/microsoft/codebert-base # add huggingface prefix so it routes to hugging face + api_key: hf_LdS # api key for hugging face + +``` + + + + + +```yaml +model_list: + - model_name: azure-embedding-model # model group + litellm_params: + model: azure/azure-embedding-model # model name for litellm.embedding(model=azure/azure-embedding-model) call + api_base: your-azure-api-base + api_key: your-api-key + api_version: 2023-07-01-preview +``` + + + + + +```yaml +model_list: +- model_name: text-embedding-ada-002 # model group + litellm_params: + model: text-embedding-ada-002 # model name for litellm.embedding(model=text-embedding-ada-002) + api_key: your-api-key-1 +- model_name: text-embedding-ada-002 + litellm_params: + model: text-embedding-ada-002 + api_key: your-api-key-2 +``` + + + + + +

Use this for calling /embedding endpoints on OpenAI Compatible Servers.

+ +**Note add `openai/` prefix to `litellm_params`: `model` so litellm knows to route to OpenAI** + +```yaml +model_list: +- model_name: text-embedding-ada-002 # model group + litellm_params: + model: openai/ # model name for litellm.embedding(model=text-embedding-ada-002) + api_base: +``` + +
+ + +#### Start Proxy +```shell +litellm --config config.yaml +``` + +#### Make Request +Sends Request to `deployed-codebert-base` + +```shell +curl --location 'http://0.0.0.0:8000/embeddings' \ + --header 'Content-Type: application/json' \ + --data ' { + "model": "deployed-codebert-base", + "input": ["write a litellm poem"] + }' +``` + + + + + diff --git a/docs/my-website/docs/proxy/health.md b/docs/my-website/docs/proxy/health.md new file mode 100644 index 000000000..5dffd7100 --- /dev/null +++ b/docs/my-website/docs/proxy/health.md @@ -0,0 +1,62 @@ +# Health Checks +Use this to health check all LLMs defined in your config.yaml + +## Summary + +The proxy exposes: +* a /health endpoint which returns the health of the LLM APIs +* a /test endpoint which makes a ping to the litellm server + +#### Request +Make a GET Request to `/health` on the proxy +```shell +curl --location 'http://0.0.0.0:8000/health' +``` + +You can also run `litellm -health` it makes a `get` request to `http://0.0.0.0:8000/health` for you +``` +litellm --health +``` +#### Response +```shell +{ + "healthy_endpoints": [ + { + "model": "azure/gpt-35-turbo", + "api_base": "https://my-endpoint-canada-berri992.openai.azure.com/" + }, + { + "model": "azure/gpt-35-turbo", + "api_base": "https://my-endpoint-europe-berri-992.openai.azure.com/" + } + ], + "unhealthy_endpoints": [ + { + "model": "azure/gpt-35-turbo", + "api_base": "https://openai-france-1234.openai.azure.com/" + } + ] +} +``` + +## Background Health Checks + +You can enable model health checks being run in the background, to prevent each model from being queried too frequently via `/health`. + +Here's how to use it: +1. in the config.yaml add: +``` +general_settings: + background_health_checks: True # enable background health checks + health_check_interval: 300 # frequency of background health checks +``` + +2. Start server +``` +$ litellm /path/to/config.yaml +``` + +3. Query health endpoint: +``` +curl --location 'http://0.0.0.0:8000/health' +``` \ No newline at end of file diff --git a/docs/my-website/docs/proxy/load_balancing.md b/docs/my-website/docs/proxy/load_balancing.md index e2e3a7ee6..e223c2d5a 100644 --- a/docs/my-website/docs/proxy/load_balancing.md +++ b/docs/my-website/docs/proxy/load_balancing.md @@ -72,128 +72,28 @@ curl --location 'http://0.0.0.0:8000/chat/completions' \ ' ``` +## Router settings on config - routing_strategy, model_group_alias -## Fallbacks + Cooldowns + Retries + Timeouts +litellm.Router() settings can be set under `router_settings`. You can set `model_group_alias`, `routing_strategy`, `num_retries`,`timeout` . See all Router supported params [here](https://github.com/BerriAI/litellm/blob/1b942568897a48f014fa44618ec3ce54d7570a46/litellm/router.py#L64) -If a call fails after num_retries, fall back to another model group. - -If the error is a context window exceeded error, fall back to a larger model group (if given). - -[**See Code**](https://github.com/BerriAI/litellm/blob/main/litellm/router.py) - -**Set via config** -```yaml -model_list: - - model_name: zephyr-beta - litellm_params: - model: huggingface/HuggingFaceH4/zephyr-7b-beta - api_base: http://0.0.0.0:8001 - - model_name: zephyr-beta - litellm_params: - model: huggingface/HuggingFaceH4/zephyr-7b-beta - api_base: http://0.0.0.0:8002 - - model_name: zephyr-beta - litellm_params: - model: huggingface/HuggingFaceH4/zephyr-7b-beta - api_base: http://0.0.0.0:8003 - - model_name: gpt-3.5-turbo - litellm_params: - model: gpt-3.5-turbo - api_key: - - model_name: gpt-3.5-turbo-16k - litellm_params: - model: gpt-3.5-turbo-16k - api_key: - -litellm_settings: - num_retries: 3 # retry call 3 times on each model_name (e.g. zephyr-beta) - request_timeout: 10 # raise Timeout error if call takes longer than 10s. Sets litellm.request_timeout - fallbacks: [{"zephyr-beta": ["gpt-3.5-turbo"]}] # fallback to gpt-3.5-turbo if call fails num_retries - context_window_fallbacks: [{"zephyr-beta": ["gpt-3.5-turbo-16k"]}, {"gpt-3.5-turbo": ["gpt-3.5-turbo-16k"]}] # fallback to gpt-3.5-turbo-16k if context window error - allowed_fails: 3 # cooldown model if it fails > 1 call in a minute. -``` - -**Set dynamically** - -```bash -curl --location 'http://0.0.0.0:8000/chat/completions' \ ---header 'Content-Type: application/json' \ ---data ' { - "model": "zephyr-beta", - "messages": [ - { - "role": "user", - "content": "what llm are you" - } - ], - "fallbacks": [{"zephyr-beta": ["gpt-3.5-turbo"]}], - "context_window_fallbacks": [{"zephyr-beta": ["gpt-3.5-turbo"]}], - "num_retries": 2, - "timeout": 10 - } -' -``` - -## Custom Timeouts, Stream Timeouts - Per Model -For each model you can set `timeout` & `stream_timeout` under `litellm_params` +Example config with `router_settings` ```yaml model_list: - model_name: gpt-3.5-turbo litellm_params: - model: azure/gpt-turbo-small-eu - api_base: https://my-endpoint-europe-berri-992.openai.azure.com/ - api_key: - timeout: 0.1 # timeout in (seconds) - stream_timeout: 0.01 # timeout for stream requests (seconds) - max_retries: 5 + model: azure/ + api_base: + api_key: + rpm: 6 # Rate limit for this deployment: in requests per minute (rpm) - model_name: gpt-3.5-turbo litellm_params: model: azure/gpt-turbo-small-ca api_base: https://my-endpoint-canada-berri992.openai.azure.com/ - api_key: - timeout: 0.1 # timeout in (seconds) - stream_timeout: 0.01 # timeout for stream requests (seconds) - max_retries: 5 - -``` - -#### Start Proxy -```shell -$ litellm --config /path/to/config.yaml -``` - - - -## Health Check LLMs on Proxy -Use this to health check all LLMs defined in your config.yaml -#### Request -Make a GET Request to `/health` on the proxy -```shell -curl --location 'http://0.0.0.0:8000/health' -``` - -You can also run `litellm -health` it makes a `get` request to `http://0.0.0.0:8000/health` for you -``` -litellm --health -``` -#### Response -```shell -{ - "healthy_endpoints": [ - { - "model": "azure/gpt-35-turbo", - "api_base": "https://my-endpoint-canada-berri992.openai.azure.com/" - }, - { - "model": "azure/gpt-35-turbo", - "api_base": "https://my-endpoint-europe-berri-992.openai.azure.com/" - } - ], - "unhealthy_endpoints": [ - { - "model": "azure/gpt-35-turbo", - "api_base": "https://openai-france-1234.openai.azure.com/" - } - ] -} + api_key: + rpm: 6 +router_settings: + model_group_alias: {"gpt-4": "gpt-3.5-turbo"} # all requests with `gpt-4` will be routed to models with `gpt-3.5-turbo` + routing_strategy: least-busy # Literal["simple-shuffle", "least-busy", "usage-based-routing", "latency-based-routing"] + num_retries: 2 + timeout: 30 # 30 seconds ``` \ No newline at end of file diff --git a/docs/my-website/docs/proxy/logging.md b/docs/my-website/docs/proxy/logging.md index c70c8680f..cf14abff3 100644 --- a/docs/my-website/docs/proxy/logging.md +++ b/docs/my-website/docs/proxy/logging.md @@ -1,5 +1,8 @@ -# Logging - Custom Callbacks, OpenTelemetry, Langfuse -Log Proxy Input, Output, Exceptions using Custom Callbacks, Langfuse, OpenTelemetry +import Image from '@theme/IdealImage'; + +# Logging - Custom Callbacks, OpenTelemetry, Langfuse, Sentry + +Log Proxy Input, Output, Exceptions using Custom Callbacks, Langfuse, OpenTelemetry, LangFuse, DynamoDB ## Custom Callback Class [Async] Use this when you want to run custom callbacks in `python` @@ -486,3 +489,166 @@ litellm --test Expected output on Langfuse + +## Logging Proxy Input/Output - DynamoDB + +We will use the `--config` to set +- `litellm.success_callback = ["dynamodb"]` +- `litellm.dynamodb_table_name = "your-table-name"` + +This will log all successfull LLM calls to DynamoDB + +**Step 1** Set AWS Credentials in .env + +```shell +AWS_ACCESS_KEY_ID = "" +AWS_SECRET_ACCESS_KEY = "" +AWS_REGION_NAME = "" +``` + +**Step 2**: Create a `config.yaml` file and set `litellm_settings`: `success_callback` +```yaml +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo +litellm_settings: + success_callback: ["dynamodb"] + dynamodb_table_name: your-table-name +``` + +**Step 3**: Start the proxy, make a test request + +Start proxy +```shell +litellm --config config.yaml --debug +``` + +Test Request +```shell +curl --location 'http://0.0.0.0:8000/chat/completions' \ + --header 'Content-Type: application/json' \ + --data ' { + "model": "Azure OpenAI GPT-4 East", + "messages": [ + { + "role": "user", + "content": "what llm are you" + } + ] + }' +``` + +Your logs should be available on DynamoDB + +#### Data Logged to DynamoDB /chat/completions + +```json +{ + "id": { + "S": "chatcmpl-8W15J4480a3fAQ1yQaMgtsKJAicen" + }, + "call_type": { + "S": "acompletion" + }, + "endTime": { + "S": "2023-12-15 17:25:58.424118" + }, + "messages": { + "S": "[{'role': 'user', 'content': 'This is a test'}]" + }, + "metadata": { + "S": "{}" + }, + "model": { + "S": "gpt-3.5-turbo" + }, + "modelParameters": { + "S": "{'temperature': 0.7, 'max_tokens': 100, 'user': 'ishaan-2'}" + }, + "response": { + "S": "ModelResponse(id='chatcmpl-8W15J4480a3fAQ1yQaMgtsKJAicen', choices=[Choices(finish_reason='stop', index=0, message=Message(content='Great! What can I assist you with?', role='assistant'))], created=1702641357, model='gpt-3.5-turbo-0613', object='chat.completion', system_fingerprint=None, usage=Usage(completion_tokens=9, prompt_tokens=11, total_tokens=20))" + }, + "startTime": { + "S": "2023-12-15 17:25:56.047035" + }, + "usage": { + "S": "Usage(completion_tokens=9, prompt_tokens=11, total_tokens=20)" + }, + "user": { + "S": "ishaan-2" + } +} +``` + +#### Data logged to DynamoDB /embeddings + +```json +{ + "id": { + "S": "4dec8d4d-4817-472d-9fc6-c7a6153eb2ca" + }, + "call_type": { + "S": "aembedding" + }, + "endTime": { + "S": "2023-12-15 17:25:59.890261" + }, + "messages": { + "S": "['hi']" + }, + "metadata": { + "S": "{}" + }, + "model": { + "S": "text-embedding-ada-002" + }, + "modelParameters": { + "S": "{'user': 'ishaan-2'}" + }, + "response": { + "S": "EmbeddingResponse(model='text-embedding-ada-002-v2', data=[{'embedding': [-0.03503197431564331, -0.020601635798811913, -0.015375726856291294, + } +} +``` + + + + +## Logging Proxy Input/Output - Sentry + +If api calls fail (llm/database) you can log those to Sentry: + +**Step 1** Install Sentry +```shell +pip install --upgrade sentry-sdk +``` + +**Step 2**: Save your Sentry_DSN and add `litellm_settings`: `failure_callback` +```shell +export SENTRY_DSN="your-sentry-dsn" +``` + +```yaml +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo +litellm_settings: + # other settings + failure_callback: ["sentry"] +general_settings: + database_url: "my-bad-url" # set a fake url to trigger a sentry exception +``` + +**Step 3**: Start the proxy, make a test request + +Start proxy +```shell +litellm --config config.yaml --debug +``` + +Test Request +``` +litellm --test +``` diff --git a/docs/my-website/docs/proxy/reliability.md b/docs/my-website/docs/proxy/reliability.md new file mode 100644 index 000000000..75f43bcdc --- /dev/null +++ b/docs/my-website/docs/proxy/reliability.md @@ -0,0 +1,89 @@ +# Fallbacks, Retries, Timeouts, Cooldowns + +If a call fails after num_retries, fall back to another model group. + +If the error is a context window exceeded error, fall back to a larger model group (if given). + +[**See Code**](https://github.com/BerriAI/litellm/blob/main/litellm/router.py) + +**Set via config** +```yaml +model_list: + - model_name: zephyr-beta + litellm_params: + model: huggingface/HuggingFaceH4/zephyr-7b-beta + api_base: http://0.0.0.0:8001 + - model_name: zephyr-beta + litellm_params: + model: huggingface/HuggingFaceH4/zephyr-7b-beta + api_base: http://0.0.0.0:8002 + - model_name: zephyr-beta + litellm_params: + model: huggingface/HuggingFaceH4/zephyr-7b-beta + api_base: http://0.0.0.0:8003 + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo + api_key: + - model_name: gpt-3.5-turbo-16k + litellm_params: + model: gpt-3.5-turbo-16k + api_key: + +litellm_settings: + num_retries: 3 # retry call 3 times on each model_name (e.g. zephyr-beta) + request_timeout: 10 # raise Timeout error if call takes longer than 10s. Sets litellm.request_timeout + fallbacks: [{"zephyr-beta": ["gpt-3.5-turbo"]}] # fallback to gpt-3.5-turbo if call fails num_retries + context_window_fallbacks: [{"zephyr-beta": ["gpt-3.5-turbo-16k"]}, {"gpt-3.5-turbo": ["gpt-3.5-turbo-16k"]}] # fallback to gpt-3.5-turbo-16k if context window error + allowed_fails: 3 # cooldown model if it fails > 1 call in a minute. +``` + +**Set dynamically** + +```bash +curl --location 'http://0.0.0.0:8000/chat/completions' \ +--header 'Content-Type: application/json' \ +--data ' { + "model": "zephyr-beta", + "messages": [ + { + "role": "user", + "content": "what llm are you" + } + ], + "fallbacks": [{"zephyr-beta": ["gpt-3.5-turbo"]}], + "context_window_fallbacks": [{"zephyr-beta": ["gpt-3.5-turbo"]}], + "num_retries": 2, + "timeout": 10 + } +' +``` + +## Custom Timeouts, Stream Timeouts - Per Model +For each model you can set `timeout` & `stream_timeout` under `litellm_params` +```yaml +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: azure/gpt-turbo-small-eu + api_base: https://my-endpoint-europe-berri-992.openai.azure.com/ + api_key: + timeout: 0.1 # timeout in (seconds) + stream_timeout: 0.01 # timeout for stream requests (seconds) + max_retries: 5 + - model_name: gpt-3.5-turbo + litellm_params: + model: azure/gpt-turbo-small-ca + api_base: https://my-endpoint-canada-berri992.openai.azure.com/ + api_key: + timeout: 0.1 # timeout in (seconds) + stream_timeout: 0.01 # timeout for stream requests (seconds) + max_retries: 5 + +``` + +#### Start Proxy +```shell +$ litellm --config /path/to/config.yaml +``` + diff --git a/docs/my-website/docs/proxy_server.md b/docs/my-website/docs/proxy_server.md index ec8ca15c5..200a92b95 100644 --- a/docs/my-website/docs/proxy_server.md +++ b/docs/my-website/docs/proxy_server.md @@ -1,13 +1,13 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# [OLD PROXY 👉 [**NEW** proxy here](./simple_proxy.md)] Local OpenAI Proxy Server +# [OLD PROXY 👉 [**NEW** proxy here](./simple_proxy)] Local OpenAI Proxy Server A fast, and lightweight OpenAI-compatible server to call 100+ LLM APIs. :::info -Docs outdated. New docs 👉 [here](./simple_proxy.md) +Docs outdated. New docs 👉 [here](./simple_proxy) ::: diff --git a/docs/my-website/docs/routing.md b/docs/my-website/docs/routing.md index 3f55ae28e..5239f7ab7 100644 --- a/docs/my-website/docs/routing.md +++ b/docs/my-website/docs/routing.md @@ -366,6 +366,63 @@ router = Router(model_list: Optional[list] = None, cache_responses=True) ``` +## Caching across model groups + +If you want to cache across 2 different model groups (e.g. azure deployments, and openai), use caching groups. + +```python +import litellm, asyncio, time +from litellm import Router + +# set os env +os.environ["OPENAI_API_KEY"] = "" +os.environ["AZURE_API_KEY"] = "" +os.environ["AZURE_API_BASE"] = "" +os.environ["AZURE_API_VERSION"] = "" + +async def test_acompletion_caching_on_router_caching_groups(): + # tests acompletion + caching on router + try: + litellm.set_verbose = True + model_list = [ + { + "model_name": "openai-gpt-3.5-turbo", + "litellm_params": { + "model": "gpt-3.5-turbo-0613", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + }, + { + "model_name": "azure-gpt-3.5-turbo", + "litellm_params": { + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_base": os.getenv("AZURE_API_BASE"), + "api_version": os.getenv("AZURE_API_VERSION") + }, + } + ] + + messages = [ + {"role": "user", "content": f"write a one sentence poem {time.time()}?"} + ] + start_time = time.time() + router = Router(model_list=model_list, + cache_responses=True, + caching_groups=[("openai-gpt-3.5-turbo", "azure-gpt-3.5-turbo")]) + response1 = await router.acompletion(model="openai-gpt-3.5-turbo", messages=messages, temperature=1) + print(f"response1: {response1}") + await asyncio.sleep(1) # add cache is async, async sleep for cache to get set + response2 = await router.acompletion(model="azure-gpt-3.5-turbo", messages=messages, temperature=1) + assert response1.id == response2.id + assert len(response1.choices[0].message.content) > 0 + assert response1.choices[0].message.content == response2.choices[0].message.content + except Exception as e: + traceback.print_exc() + +asyncio.run(test_acompletion_caching_on_router_caching_groups()) +``` + #### Default litellm.completion/embedding params You can also set default params for litellm completion/embedding calls. Here's how to do that: @@ -391,200 +448,3 @@ print(f"response: {response}") ## Deploy Router If you want a server to load balance across different LLM APIs, use our [OpenAI Proxy Server](./simple_proxy#load-balancing---multiple-instances-of-1-model) - -## Queuing (Beta) - -**Never fail a request due to rate limits** - -The LiteLLM Queuing endpoints can handle 100+ req/s. We use Celery workers to process requests. - -:::info - -This is pretty new, and might have bugs. Any contributions to improving our implementation are welcome - -::: - - -[**See Code**](https://github.com/BerriAI/litellm/blob/fbf9cab5b9e35df524e2c9953180c58d92e4cd97/litellm/proxy/proxy_server.py#L589) - - -### Quick Start - -1. Add Redis credentials in a .env file - -```python -REDIS_HOST="my-redis-endpoint" -REDIS_PORT="my-redis-port" -REDIS_PASSWORD="my-redis-password" # [OPTIONAL] if self-hosted -REDIS_USERNAME="default" # [OPTIONAL] if self-hosted -``` - -2. Start litellm server with your model config - -```bash -$ litellm --config /path/to/config.yaml --use_queue -``` - -Here's an example config for `gpt-3.5-turbo` - -**config.yaml** (This will load balance between OpenAI + Azure endpoints) -```yaml -model_list: - - model_name: gpt-3.5-turbo - litellm_params: - model: gpt-3.5-turbo - api_key: - - model_name: gpt-3.5-turbo - litellm_params: - model: azure/chatgpt-v-2 # actual model name - api_key: - api_version: 2023-07-01-preview - api_base: https://openai-gpt-4-test-v-1.openai.azure.com/ -``` - -3. Test (in another window) → sends 100 simultaneous requests to the queue - -```bash -$ litellm --test_async --num_requests 100 -``` - - -### Available Endpoints -- `/queue/request` - Queues a /chat/completions request. Returns a job id. -- `/queue/response/{id}` - Returns the status of a job. If completed, returns the response as well. Potential status's are: `queued` and `finished`. - - -## Hosted Request Queing api.litellm.ai -Queue your LLM API requests to ensure you're under your rate limits -- Step 1: Step 1 Add a config to the proxy, generate a temp key -- Step 2: Queue a request to the proxy, using your generated_key -- Step 3: Poll the request - - -### Step 1 Add a config to the proxy, generate a temp key -```python -import requests -import time -import os - -# Set the base URL as needed -base_url = "https://api.litellm.ai" - -# Step 1 Add a config to the proxy, generate a temp key -# use the same model_name to load balance -config = { - "model_list": [ - { - "model_name": "gpt-3.5-turbo", - "litellm_params": { - "model": "gpt-3.5-turbo", - "api_key": os.environ['OPENAI_API_KEY'], - } - }, - { - "model_name": "gpt-3.5-turbo", - "litellm_params": { - "model": "azure/chatgpt-v-2", - "api_key": "", - "api_base": "https://openai-gpt-4-test-v-1.openai.azure.com/", - "api_version": "2023-07-01-preview" - } - } - ] -} - -response = requests.post( - url=f"{base_url}/key/generate", - json={ - "config": config, - "duration": "30d" # default to 30d, set it to 30m if you want a temp 30 minute key - }, - headers={ - "Authorization": "Bearer sk-hosted-litellm" # this is the key to use api.litellm.ai - } -) - -print("\nresponse from generating key", response.text) -print("\n json response from gen key", response.json()) - -generated_key = response.json()["key"] -print("\ngenerated key for proxy", generated_key) -``` - -#### Output -```shell -response from generating key {"key":"sk-...,"expires":"2023-12-22T03:43:57.615000+00:00"} -``` - -### Step 2: Queue a request to the proxy, using your generated_key -```python -print("Creating a job on the proxy") -job_response = requests.post( - url=f"{base_url}/queue/request", - json={ - 'model': 'gpt-3.5-turbo', - 'messages': [ - {'role': 'system', 'content': f'You are a helpful assistant. What is your name'}, - ], - }, - headers={ - "Authorization": f"Bearer {generated_key}" - } -) -print(job_response.status_code) -print(job_response.text) -print("\nResponse from creating job", job_response.text) -job_response = job_response.json() -job_id = job_response["id"] -polling_url = job_response["url"] -polling_url = f"{base_url}{polling_url}" -print("\nCreated Job, Polling Url", polling_url) -``` - -#### Output -```shell -Response from creating job -{"id":"0e3d9e98-5d56-4d07-9cc8-c34b7e6658d7","url":"/queue/response/0e3d9e98-5d56-4d07-9cc8-c34b7e6658d7","eta":5,"status":"queued"} -``` - -### Step 3: Poll the request -```python -while True: - try: - print("\nPolling URL", polling_url) - polling_response = requests.get( - url=polling_url, - headers={ - "Authorization": f"Bearer {generated_key}" - } - ) - print("\nResponse from polling url", polling_response.text) - polling_response = polling_response.json() - status = polling_response.get("status", None) - if status == "finished": - llm_response = polling_response["result"] - print("LLM Response") - print(llm_response) - break - time.sleep(0.5) - except Exception as e: - print("got exception in polling", e) - break -``` - -#### Output -```shell -Polling URL https://api.litellm.ai/queue/response/0e3d9e98-5d56-4d07-9cc8-c34b7e6658d7 - -Response from polling url {"status":"queued"} - -Polling URL https://api.litellm.ai/queue/response/0e3d9e98-5d56-4d07-9cc8-c34b7e6658d7 - -Response from polling url {"status":"queued"} - -Polling URL https://api.litellm.ai/queue/response/0e3d9e98-5d56-4d07-9cc8-c34b7e6658d7 - -Response from polling url -{"status":"finished","result":{"id":"chatcmpl-8NYRce4IeI4NzYyodT3NNp8fk5cSW","choices":[{"finish_reason":"stop","index":0,"message":{"content":"I am an AI assistant and do not have a physical presence or personal identity. You can simply refer to me as \"Assistant.\" How may I assist you today?","role":"assistant"}}],"created":1700624639,"model":"gpt-3.5-turbo-0613","object":"chat.completion","system_fingerprint":null,"usage":{"completion_tokens":33,"prompt_tokens":17,"total_tokens":50}}} - -``` \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 11f81fa4d..069faa48a 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -61,11 +61,13 @@ const sidebars = { }, items: [ "providers/openai", + "providers/openai_compatible", "providers/azure", "providers/huggingface", "providers/ollama", "providers/vertex", "providers/palm", + "providers/mistral", "providers/anthropic", "providers/aws_sagemaker", "providers/bedrock", @@ -96,10 +98,14 @@ const sidebars = { }, items: [ "proxy/quick_start", - "proxy/configs", + "proxy/configs", + "proxy/embedding", "proxy/load_balancing", "proxy/virtual_keys", "proxy/model_management", + "proxy/reliability", + "proxy/health", + "proxy/call_hooks", "proxy/caching", "proxy/logging", "proxy/cli", @@ -189,6 +195,7 @@ const sidebars = { slug: '/project', }, items: [ + "projects/Docq.AI", "projects/OpenInterpreter", "projects/FastREPL", "projects/PROMPTMETHEUS", diff --git a/docs/my-website/yarn.lock b/docs/my-website/yarn.lock index 5c9fa101d..7e431def2 100644 --- a/docs/my-website/yarn.lock +++ b/docs/my-website/yarn.lock @@ -84,7 +84,7 @@ "@algolia/requester-common" "4.19.1" "@algolia/transporter" "4.19.1" -"@algolia/client-search@>= 4.9.1 < 6", "@algolia/client-search@4.19.1": +"@algolia/client-search@4.19.1": version "4.19.1" resolved "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.19.1.tgz" integrity sha512-mBecfMFS4N+yK/p0ZbK53vrZbL6OtWMk8YmnOv1i0LXx4pelY8TFhqKoTit3NPVPwoSNN0vdSN9dTu1xr1XOVw== @@ -146,6 +146,13 @@ "@jridgewell/gen-mapping" "^0.3.0" "@jridgewell/trace-mapping" "^0.3.9" +"@babel/code-frame@7.10.4", "@babel/code-frame@^7.5.5": + version "7.10.4" + resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz" + integrity sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg== + dependencies: + "@babel/highlight" "^7.10.4" + "@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.22.10", "@babel/code-frame@^7.22.13", "@babel/code-frame@^7.8.3": version "7.22.13" resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz" @@ -154,39 +161,11 @@ "@babel/highlight" "^7.22.13" chalk "^2.4.2" -"@babel/code-frame@^7.5.5", "@babel/code-frame@7.10.4": - version "7.10.4" - resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz" - integrity sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg== - dependencies: - "@babel/highlight" "^7.10.4" - "@babel/compat-data@^7.22.5", "@babel/compat-data@^7.22.6", "@babel/compat-data@^7.22.9": version "7.22.9" resolved "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.9.tgz" integrity sha512-5UamI7xkUcJ3i9qVDS+KFDEK8/7oJ55/sJMB1Ge7IEapr7KfdfV/HErR+koZwOfd+SgtFKOKRhRakdg++DcJpQ== -"@babel/core@^7.0.0", "@babel/core@^7.0.0-0", "@babel/core@^7.0.0-0 || ^8.0.0-0 <8.0.0", "@babel/core@^7.11.6", "@babel/core@^7.12.0", "@babel/core@^7.12.3", "@babel/core@^7.13.0", "@babel/core@^7.18.6", "@babel/core@^7.19.6", "@babel/core@^7.4.0 || ^8.0.0-0 <8.0.0": - version "7.22.10" - resolved "https://registry.npmjs.org/@babel/core/-/core-7.22.10.tgz" - integrity sha512-fTmqbbUBAwCcre6zPzNngvsI0aNrPZe77AeqvDxWM9Nm+04RrJ3CAmGHA9f7lJQY6ZMhRztNemy4uslDxTX4Qw== - dependencies: - "@ampproject/remapping" "^2.2.0" - "@babel/code-frame" "^7.22.10" - "@babel/generator" "^7.22.10" - "@babel/helper-compilation-targets" "^7.22.10" - "@babel/helper-module-transforms" "^7.22.9" - "@babel/helpers" "^7.22.10" - "@babel/parser" "^7.22.10" - "@babel/template" "^7.22.5" - "@babel/traverse" "^7.22.10" - "@babel/types" "^7.22.10" - convert-source-map "^1.7.0" - debug "^4.1.0" - gensync "^1.0.0-beta.2" - json5 "^2.2.2" - semver "^6.3.1" - "@babel/core@7.12.9": version "7.12.9" resolved "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz" @@ -209,6 +188,27 @@ semver "^5.4.1" source-map "^0.5.0" +"@babel/core@^7.12.3", "@babel/core@^7.18.6", "@babel/core@^7.19.6": + version "7.22.10" + resolved "https://registry.npmjs.org/@babel/core/-/core-7.22.10.tgz" + integrity sha512-fTmqbbUBAwCcre6zPzNngvsI0aNrPZe77AeqvDxWM9Nm+04RrJ3CAmGHA9f7lJQY6ZMhRztNemy4uslDxTX4Qw== + dependencies: + "@ampproject/remapping" "^2.2.0" + "@babel/code-frame" "^7.22.10" + "@babel/generator" "^7.22.10" + "@babel/helper-compilation-targets" "^7.22.10" + "@babel/helper-module-transforms" "^7.22.9" + "@babel/helpers" "^7.22.10" + "@babel/parser" "^7.22.10" + "@babel/template" "^7.22.5" + "@babel/traverse" "^7.22.10" + "@babel/types" "^7.22.10" + convert-source-map "^1.7.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.2" + semver "^6.3.1" + "@babel/generator@^7.12.5", "@babel/generator@^7.18.7", "@babel/generator@^7.22.10", "@babel/generator@^7.23.3": version "7.23.3" resolved "https://registry.npmjs.org/@babel/generator/-/generator-7.23.3.tgz" @@ -331,16 +331,16 @@ dependencies: "@babel/types" "^7.22.5" -"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3": - version "7.22.5" - resolved "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz" - integrity sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg== - "@babel/helper-plugin-utils@7.10.4": version "7.10.4" resolved "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz" integrity sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg== +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3": + version "7.22.5" + resolved "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz" + integrity sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg== + "@babel/helper-remap-async-to-generator@^7.22.5", "@babel/helper-remap-async-to-generator@^7.22.9": version "7.22.9" resolved "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.9.tgz" @@ -451,7 +451,7 @@ "@babel/helper-create-class-features-plugin" "^7.18.6" "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-proposal-object-rest-spread@^7.12.1", "@babel/plugin-proposal-object-rest-spread@7.12.1": +"@babel/plugin-proposal-object-rest-spread@7.12.1", "@babel/plugin-proposal-object-rest-spread@^7.12.1": version "7.12.1" resolved "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz" integrity sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA== @@ -528,13 +528,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-jsx@^7.22.5": - version "7.22.5" - resolved "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz" - integrity sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg== - dependencies: - "@babel/helper-plugin-utils" "^7.22.5" - "@babel/plugin-syntax-jsx@7.12.1": version "7.12.1" resolved "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz" @@ -542,6 +535,13 @@ dependencies: "@babel/helper-plugin-utils" "^7.10.4" +"@babel/plugin-syntax-jsx@^7.22.5": + version "7.22.5" + resolved "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz" + integrity sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-logical-assignment-operators@^7.10.4": version "7.10.4" resolved "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz" @@ -563,7 +563,7 @@ dependencies: "@babel/helper-plugin-utils" "^7.10.4" -"@babel/plugin-syntax-object-rest-spread@^7.8.0", "@babel/plugin-syntax-object-rest-spread@^7.8.3", "@babel/plugin-syntax-object-rest-spread@7.8.3": +"@babel/plugin-syntax-object-rest-spread@7.8.3", "@babel/plugin-syntax-object-rest-spread@^7.8.0", "@babel/plugin-syntax-object-rest-spread@^7.8.3": version "7.8.3" resolved "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz" integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== @@ -1279,7 +1279,7 @@ "@docsearch/css" "3.5.1" algoliasearch "^4.0.0" -"@docusaurus/core@^2.0.0-alpha.60 || ^2.0.0", "@docusaurus/core@2.4.1": +"@docusaurus/core@2.4.1": version "2.4.1" resolved "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.1.tgz" integrity sha512-SNsY7PshK3Ri7vtsLXVeAJGS50nJN3RgF836zkyUfAD01Fq+sAk5EwWgLw+nnm5KVNGDu7PRR2kRGDsWvqpo0g== @@ -1502,7 +1502,7 @@ "@docusaurus/utils-validation" "2.4.1" tslib "^2.4.0" -"@docusaurus/plugin-google-gtag@^2.4.1", "@docusaurus/plugin-google-gtag@2.4.1": +"@docusaurus/plugin-google-gtag@2.4.1", "@docusaurus/plugin-google-gtag@^2.4.1": version "2.4.1" resolved "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.1.tgz" integrity sha512-mKIefK+2kGTQBYvloNEKtDmnRD7bxHLsBcxgnbt4oZwzi2nxCGjPX6+9SQO2KCN5HZbNrYmGo5GJfMgoRvy6uA== @@ -1573,7 +1573,7 @@ "@docusaurus/theme-search-algolia" "2.4.1" "@docusaurus/types" "2.4.1" -"@docusaurus/react-loadable@5.5.2": +"@docusaurus/react-loadable@5.5.2", "react-loadable@npm:@docusaurus/react-loadable@5.5.2": version "5.5.2" resolved "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz" integrity sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ== @@ -1671,7 +1671,7 @@ fs-extra "^10.1.0" tslib "^2.4.0" -"@docusaurus/types@*", "@docusaurus/types@2.4.1": +"@docusaurus/types@2.4.1": version "2.4.1" resolved "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.1.tgz" integrity sha512-0R+cbhpMkhbRXX138UOc/2XZFF8hiZa6ooZAEEJFp5scytzCw4tC1gChMFXrpa3d2tYE6AX8IrOEpSonLmfQuQ== @@ -1857,16 +1857,16 @@ "@nodelib/fs.stat" "2.0.5" run-parallel "^1.1.9" +"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2": + version "2.0.5" + resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz" + integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== + "@nodelib/fs.stat@^1.1.2": version "1.1.3" resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz" integrity sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw== -"@nodelib/fs.stat@^2.0.2", "@nodelib/fs.stat@2.0.5": - version "2.0.5" - resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz" - integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== - "@nodelib/fs.walk@^1.2.3": version "1.2.8" resolved "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz" @@ -1975,7 +1975,7 @@ "@svgr/babel-plugin-transform-react-native-svg" "^6.5.1" "@svgr/babel-plugin-transform-svg-component" "^6.5.1" -"@svgr/core@*", "@svgr/core@^6.0.0", "@svgr/core@^6.5.1": +"@svgr/core@^6.5.1": version "6.5.1" resolved "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz" integrity sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw== @@ -2248,7 +2248,7 @@ "@types/history" "^4.7.11" "@types/react" "*" -"@types/react@*", "@types/react@>= 16.8.0 < 19.0.0": +"@types/react@*": version "18.2.20" resolved "https://registry.npmjs.org/@types/react/-/react-18.2.20.tgz" integrity sha512-WKNtmsLWJM/3D5mG4U84cysVY31ivmyw85dE84fOCk5Hx78wezB/XEjVPWl2JTZ5FkEeaTJf+VgUAUn3PE7Isw== @@ -2329,7 +2329,7 @@ dependencies: "@types/yargs-parser" "*" -"@webassemblyjs/ast@^1.11.5", "@webassemblyjs/ast@1.11.6": +"@webassemblyjs/ast@1.11.6", "@webassemblyjs/ast@^1.11.5": version "1.11.6" resolved "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz" integrity sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q== @@ -2430,7 +2430,7 @@ "@webassemblyjs/wasm-gen" "1.11.6" "@webassemblyjs/wasm-parser" "1.11.6" -"@webassemblyjs/wasm-parser@^1.11.5", "@webassemblyjs/wasm-parser@1.11.6": +"@webassemblyjs/wasm-parser@1.11.6", "@webassemblyjs/wasm-parser@^1.11.5": version "1.11.6" resolved "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz" integrity sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ== @@ -2483,21 +2483,21 @@ acorn-walk@^8.0.0: resolved "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz" integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA== -acorn@^8, acorn@^8.0.4, acorn@^8.7.1, acorn@^8.8.2: +acorn@^8.0.4, acorn@^8.7.1, acorn@^8.8.2: version "8.10.0" resolved "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz" integrity sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw== -address@^1.0.1, address@^1.1.2: - version "1.2.2" - resolved "https://registry.npmjs.org/address/-/address-1.2.2.tgz" - integrity sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA== - address@1.1.2: version "1.1.2" resolved "https://registry.npmjs.org/address/-/address-1.1.2.tgz" integrity sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA== +address@^1.0.1, address@^1.1.2: + version "1.2.2" + resolved "https://registry.npmjs.org/address/-/address-1.2.2.tgz" + integrity sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA== + aggregate-error@^3.0.0: version "3.1.0" resolved "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz" @@ -2540,7 +2540,7 @@ ajv-keywords@^5.1.0: dependencies: fast-deep-equal "^3.1.3" -ajv@^6.12.2, ajv@^6.12.3, ajv@^6.12.4, ajv@^6.12.5, ajv@^6.9.1: +ajv@^6.12.2, ajv@^6.12.3, ajv@^6.12.4, ajv@^6.12.5: version "6.12.6" resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz" integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== @@ -2550,17 +2550,7 @@ ajv@^6.12.2, ajv@^6.12.3, ajv@^6.12.4, ajv@^6.12.5, ajv@^6.9.1: json-schema-traverse "^0.4.1" uri-js "^4.2.2" -ajv@^8.0.0: - version "8.12.0" - resolved "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz" - integrity sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA== - dependencies: - fast-deep-equal "^3.1.1" - json-schema-traverse "^1.0.0" - require-from-string "^2.0.2" - uri-js "^4.2.2" - -ajv@^8.8.2, ajv@^8.9.0: +ajv@^8.0.0, ajv@^8.9.0: version "8.12.0" resolved "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz" integrity sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA== @@ -2577,7 +2567,7 @@ algoliasearch-helper@^3.10.0: dependencies: "@algolia/events" "^4.0.1" -algoliasearch@^4.0.0, algoliasearch@^4.13.1, "algoliasearch@>= 3.1 < 6", "algoliasearch@>= 4.9.1 < 6": +algoliasearch@^4.0.0, algoliasearch@^4.13.1: version "4.19.1" resolved "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.19.1.tgz" integrity sha512-IJF5b93b2MgAzcE/tuzW0yOPnuUyRgGAtaPv5UUywXM8kzqfdwZTO4sPJBzoGz1eOy6H9uEchsJsBFTELZSu+g== @@ -2735,16 +2725,16 @@ array-find-index@^1.0.1: resolved "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz" integrity sha512-M1HQyIXcBGtVywBt8WVdim+lrNaK7VHp99Qt5pSNziXznKHViIBbXWtfRTpEFpF/c4FdfxNAsCCwPp5phBYJtw== -array-flatten@^2.1.2: - version "2.1.2" - resolved "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz" - integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ== - array-flatten@1.1.1: version "1.1.1" resolved "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz" integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg== +array-flatten@^2.1.2: + version "2.1.2" + resolved "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz" + integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ== + array-union@^1.0.1: version "1.0.2" resolved "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz" @@ -2839,7 +2829,7 @@ asn1@~0.2.3: dependencies: safer-buffer "~2.1.0" -assert-plus@^1.0.0, assert-plus@1.0.0: +assert-plus@1.0.0, assert-plus@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz" integrity sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw== @@ -3015,6 +3005,16 @@ balanced-match@^1.0.0: resolved "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== +base16@^1.0.0: + version "1.0.0" + resolved "https://registry.npmjs.org/base16/-/base16-1.0.0.tgz" + integrity sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ== + +base64-js@^1.3.1: + version "1.5.1" + resolved "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz" + integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== + base@^0.11.1: version "0.11.2" resolved "https://registry.npmjs.org/base/-/base-0.11.2.tgz" @@ -3028,16 +3028,6 @@ base@^0.11.1: mixin-deep "^1.2.0" pascalcase "^0.1.1" -base16@^1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/base16/-/base16-1.0.0.tgz" - integrity sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ== - -base64-js@^1.3.1: - version "1.5.1" - resolved "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz" - integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== - batch@0.6.1: version "0.6.1" resolved "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz" @@ -3250,17 +3240,7 @@ braces@^3.0.2, braces@~3.0.2: dependencies: fill-range "^7.0.1" -browserslist@^4.0.0, browserslist@^4.14.5, browserslist@^4.18.1, browserslist@^4.21.4, browserslist@^4.21.5, browserslist@^4.21.9, "browserslist@>= 4.21.0": - version "4.21.10" - resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.21.10.tgz" - integrity sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ== - dependencies: - caniuse-lite "^1.0.30001517" - electron-to-chromium "^1.4.477" - node-releases "^2.0.13" - update-browserslist-db "^1.0.11" - -browserslist@^4.12.0, browserslist@4.14.2: +browserslist@4.14.2, browserslist@^4.12.0: version "4.14.2" resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.14.2.tgz" integrity sha512-HI4lPveGKUR0x2StIz+2FXfDk9SfVMrxn6PLh1JeGUwcuoDkdKZebWiyLRJ68iIPDpMI4JLVDf7S7XzslgWOhw== @@ -3270,6 +3250,16 @@ browserslist@^4.12.0, browserslist@4.14.2: escalade "^3.0.2" node-releases "^1.1.61" +browserslist@^4.0.0, browserslist@^4.14.5, browserslist@^4.18.1, browserslist@^4.21.4, browserslist@^4.21.5, browserslist@^4.21.9: + version "4.21.10" + resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.21.10.tgz" + integrity sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ== + dependencies: + caniuse-lite "^1.0.30001517" + electron-to-chromium "^1.4.477" + node-releases "^2.0.13" + update-browserslist-db "^1.0.11" + buffer-alloc-unsafe@^1.1.0: version "1.1.0" resolved "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz" @@ -3483,6 +3473,15 @@ chainsaw@~0.1.0: dependencies: traverse ">=0.3.0 <0.4" +chalk@2.4.2, chalk@^2.4.1, chalk@^2.4.2: + version "2.4.2" + resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + chalk@^1.0.0: version "1.1.3" resolved "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz" @@ -3494,24 +3493,6 @@ chalk@^1.0.0: strip-ansi "^3.0.0" supports-color "^2.0.0" -chalk@^2.4.1: - version "2.4.2" - resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz" - integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== - dependencies: - ansi-styles "^3.2.1" - escape-string-regexp "^1.0.5" - supports-color "^5.3.0" - -chalk@^2.4.2: - version "2.4.2" - resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz" - integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== - dependencies: - ansi-styles "^3.2.1" - escape-string-regexp "^1.0.5" - supports-color "^5.3.0" - chalk@^3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz" @@ -3528,15 +3509,6 @@ chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.2: ansi-styles "^4.1.0" supports-color "^7.1.0" -chalk@2.4.2: - version "2.4.2" - resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz" - integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== - dependencies: - ansi-styles "^3.2.1" - escape-string-regexp "^1.0.5" - supports-color "^5.3.0" - character-entities-legacy@^1.0.0: version "1.1.4" resolved "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz" @@ -3564,19 +3536,6 @@ cheerio-select@^2.1.0: domhandler "^5.0.3" domutils "^3.0.1" -cheerio@^1.0.0-rc.12, cheerio@^1.0.0-rc.3: - version "1.0.0-rc.12" - resolved "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz" - integrity sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q== - dependencies: - cheerio-select "^2.1.0" - dom-serializer "^2.0.0" - domhandler "^5.0.3" - domutils "^3.0.1" - htmlparser2 "^8.0.1" - parse5 "^7.0.0" - parse5-htmlparser2-tree-adapter "^7.0.0" - cheerio@0.22.0: version "0.22.0" resolved "https://registry.npmjs.org/cheerio/-/cheerio-0.22.0.tgz" @@ -3599,6 +3558,19 @@ cheerio@0.22.0: lodash.reject "^4.4.0" lodash.some "^4.4.0" +cheerio@^1.0.0-rc.12, cheerio@^1.0.0-rc.3: + version "1.0.0-rc.12" + resolved "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz" + integrity sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q== + dependencies: + cheerio-select "^2.1.0" + dom-serializer "^2.0.0" + domhandler "^5.0.3" + domutils "^3.0.1" + htmlparser2 "^8.0.1" + parse5 "^7.0.0" + parse5-htmlparser2-tree-adapter "^7.0.0" + chokidar@^3.4.2, chokidar@^3.5.3: version "3.5.3" resolved "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz" @@ -3689,13 +3661,6 @@ clone-deep@^4.0.1: kind-of "^6.0.2" shallow-clone "^3.0.0" -clone-response@^1.0.2: - version "1.0.3" - resolved "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz" - integrity sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA== - dependencies: - mimic-response "^1.0.0" - clone-response@1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz" @@ -3703,6 +3668,13 @@ clone-response@1.0.2: dependencies: mimic-response "^1.0.0" +clone-response@^1.0.2: + version "1.0.3" + resolved "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz" + integrity sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA== + dependencies: + mimic-response "^1.0.0" + clsx@^1.2.1: version "1.2.1" resolved "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz" @@ -3749,16 +3721,16 @@ color-convert@^2.0.1: dependencies: color-name "~1.1.4" -color-name@^1.0.0, color-name@~1.1.4: - version "1.1.4" - resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz" - integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== - color-name@1.1.3: version "1.1.3" resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz" integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== +color-name@^1.0.0, color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== + color-string@^1.6.0, color-string@^1.9.0: version "1.9.1" resolved "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz" @@ -3815,17 +3787,7 @@ comma-separated-tokens@^1.0.0: resolved "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz" integrity sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw== -commander@^2.19.0: - version "2.20.3" - resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz" - integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== - -commander@^2.20.0: - version "2.20.3" - resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz" - integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== - -commander@^2.8.1: +commander@^2.19.0, commander@^2.20.0, commander@^2.8.1: version "2.20.3" resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz" integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== @@ -3947,7 +3909,7 @@ console-stream@^0.1.1: resolved "https://registry.npmjs.org/consolidated-events/-/consolidated-events-2.0.2.tgz" integrity sha512-2/uRVMdRypf5z/TW/ncD/66l75P5hH2vM/GR8Jf8HLc2xnfJtmina6F6du8+v4Z2vTrMo7jC+W1tmEEuuELgkQ== -content-disposition@^0.5.2, content-disposition@0.5.2: +content-disposition@0.5.2, content-disposition@^0.5.2: version "0.5.2" resolved "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz" integrity sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA== @@ -4028,16 +3990,16 @@ core-js@^3.23.3: resolved "https://registry.npmjs.org/core-js/-/core-js-3.32.0.tgz" integrity sha512-rd4rYZNlF3WuoYuRIDEmbR/ga9CeuWX9U05umAvgrrZoHY4Z++cp/xwPQMvUpBB4Ag6J8KfD80G0zwCyaSxDww== -core-util-is@~1.0.0: - version "1.0.3" - resolved "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz" - integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== - core-util-is@1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz" integrity sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ== +core-util-is@~1.0.0: + version "1.0.3" + resolved "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz" + integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== + cosmiconfig@^5.0.0: version "5.2.1" resolved "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz" @@ -4087,6 +4049,15 @@ cross-fetch@^3.1.5: dependencies: node-fetch "^2.6.12" +cross-spawn@7.0.3, cross-spawn@^7.0.3: + version "7.0.3" + resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz" + integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + cross-spawn@^5.0.1: version "5.1.0" resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz" @@ -4107,15 +4078,6 @@ cross-spawn@^6.0.0: shebang-command "^1.2.0" which "^1.2.9" -cross-spawn@^7.0.3, cross-spawn@7.0.3: - version "7.0.3" - resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== - dependencies: - path-key "^3.1.0" - shebang-command "^2.0.0" - which "^2.0.1" - crowdin-cli@^0.3.0: version "0.3.0" resolved "https://registry.npmjs.org/crowdin-cli/-/crowdin-cli-0.3.0.tgz" @@ -4130,7 +4092,7 @@ crypto-random-string@^2.0.0: resolved "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz" integrity sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA== -css-color-names@^0.0.4, css-color-names@0.0.4: +css-color-names@0.0.4, css-color-names@^0.0.4: version "0.0.4" resolved "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz" integrity sha512-zj5D7X1U2h2zsXOAM8EyUREBnnts6H+Jm+d1M2DbiQQcUtnqgQsMrdo8JW9R80YFUmIdBZeMu5wvYM7hcgWP/Q== @@ -4226,6 +4188,14 @@ css-selector-parser@^1.0.0: resolved "https://registry.npmjs.org/css-selector-parser/-/css-selector-parser-1.4.1.tgz" integrity sha512-HYPSb7y/Z7BNDCOrakL4raGO2zltZkbeXyAd6Tg9obzix6QhzxCotdBl6VT0Dv4vZfJGVz3WL/xaEI9Ly3ul0g== +css-tree@1.0.0-alpha.37: + version "1.0.0-alpha.37" + resolved "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz" + integrity sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg== + dependencies: + mdn-data "2.0.4" + source-map "^0.6.1" + css-tree@^1.1.2, css-tree@^1.1.3: version "1.1.3" resolved "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz" @@ -4234,13 +4204,10 @@ css-tree@^1.1.2, css-tree@^1.1.3: mdn-data "2.0.14" source-map "^0.6.1" -css-tree@1.0.0-alpha.37: - version "1.0.0-alpha.37" - resolved "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz" - integrity sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg== - dependencies: - mdn-data "2.0.4" - source-map "^0.6.1" +css-what@2.1: + version "2.1.3" + resolved "https://registry.npmjs.org/css-what/-/css-what-2.1.3.tgz" + integrity sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg== css-what@^3.2.1: version "3.4.2" @@ -4252,11 +4219,6 @@ css-what@^6.0.1, css-what@^6.1.0: resolved "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz" integrity sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw== -css-what@2.1: - version "2.1.3" - resolved "https://registry.npmjs.org/css-what/-/css-what-2.1.3.tgz" - integrity sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg== - cssesc@^3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz" @@ -4417,55 +4379,20 @@ dashdash@^1.12.0: dependencies: assert-plus "^1.0.0" -debug@^2.2.0: +debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.0: version "2.6.9" resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz" integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== dependencies: ms "2.0.0" -debug@^2.3.3: - version "2.6.9" - resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz" - integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== - dependencies: - ms "2.0.0" - -debug@^2.6.0: - version "2.6.9" - resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz" - integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== - dependencies: - ms "2.0.0" - -debug@^3.1.0: - version "3.2.7" - resolved "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz" - integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ== - dependencies: - ms "^2.1.1" - -debug@^3.2.7: - version "3.2.7" - resolved "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz" - integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ== - dependencies: - ms "^2.1.1" - -debug@^4.1.0, debug@^4.1.1, debug@4: +debug@4, debug@^4.1.0, debug@^4.1.1: version "4.3.4" resolved "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz" integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== dependencies: ms "2.1.2" -debug@2.6.9: - version "2.6.9" - resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz" - integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== - dependencies: - ms "2.0.0" - debug@4.3.1: version "4.3.1" resolved "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz" @@ -4473,6 +4400,13 @@ debug@4.3.1: dependencies: ms "2.1.2" +debug@^3.1.0, debug@^3.2.7: + version "3.2.7" + resolved "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz" + integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ== + dependencies: + ms "^2.1.1" + decamelize@^1.1.2: version "1.2.0" resolved "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz" @@ -4640,16 +4574,16 @@ delayed-stream@~1.0.0: resolved "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz" integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== -depd@~1.1.2: - version "1.1.2" - resolved "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz" - integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ== - depd@2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz" integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw== +depd@~1.1.2: + version "1.1.2" + resolved "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz" + integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ== + destroy@1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz" @@ -4672,7 +4606,7 @@ detect-node@^2.0.4: resolved "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz" integrity sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g== -detect-port-alt@^1.1.6, detect-port-alt@1.1.6: +detect-port-alt@1.1.6, detect-port-alt@^1.1.6: version "1.1.6" resolved "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz" integrity sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q== @@ -4693,13 +4627,6 @@ diacritics-map@^0.1.0: resolved "https://registry.npmjs.org/diacritics-map/-/diacritics-map-0.1.0.tgz" integrity sha512-3omnDTYrGigU0i4cJjvaKwD52B8aoqyX/NEIkukFFkogBemsIbhSa1O414fpTp5nuszJG6lvQ5vBvDVNCbSsaQ== -dir-glob@^3.0.1: - version "3.0.1" - resolved "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz" - integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== - dependencies: - path-type "^4.0.0" - dir-glob@2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/dir-glob/-/dir-glob-2.0.0.tgz" @@ -4708,6 +4635,13 @@ dir-glob@2.0.0: arrify "^1.0.1" path-type "^3.0.0" +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + direction@^1.0.0: version "1.0.4" resolved "https://registry.npmjs.org/direction/-/direction-1.0.4.tgz" @@ -4811,6 +4745,14 @@ dom-converter@^0.2.0: dependencies: utila "~0.4" +dom-serializer@0: + version "0.2.2" + resolved "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz" + integrity sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g== + dependencies: + domelementtype "^2.0.1" + entities "^2.0.0" + dom-serializer@^1.0.1: version "1.4.1" resolved "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz" @@ -4837,15 +4779,7 @@ dom-serializer@~0.1.0: domelementtype "^1.3.0" entities "^1.1.1" -dom-serializer@0: - version "0.2.2" - resolved "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz" - integrity sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g== - dependencies: - domelementtype "^2.0.1" - entities "^2.0.0" - -domelementtype@^1.3.0, domelementtype@^1.3.1, domelementtype@1: +domelementtype@1, domelementtype@^1.3.0, domelementtype@^1.3.1: version "1.3.1" resolved "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz" integrity sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w== @@ -4876,7 +4810,7 @@ domhandler@^5.0.2, domhandler@^5.0.3: dependencies: domelementtype "^2.3.0" -domutils@^1.5.1, domutils@1.5.1: +domutils@1.5.1, domutils@^1.5.1: version "1.5.1" resolved "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz" integrity sha512-gSu5Oi/I+3wDENBsOWBiRK1eoGxcywYSqg3rR960/+EfY0CF4EX1VPkgHOZ3WiS/Jg2DtliF6BhWcHlfpYUcGw== @@ -4960,11 +4894,6 @@ download@^7.1.0: p-event "^2.1.0" pify "^3.0.0" -duplexer@^0.1.1, duplexer@^0.1.2: - version "0.1.2" - resolved "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz" - integrity sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg== - duplexer2@~0.1.4: version "0.1.4" resolved "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz" @@ -4977,6 +4906,11 @@ duplexer3@^0.1.4: resolved "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.5.tgz" integrity sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA== +duplexer@^0.1.1, duplexer@^0.1.2: + version "0.1.2" + resolved "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz" + integrity sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg== + eastasianwidth@^0.2.0: version "0.2.0" resolved "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz" @@ -5091,7 +5025,7 @@ enzyme-shallow-equal@^1.0.1, enzyme-shallow-equal@^1.0.5: has "^1.0.3" object-is "^1.1.5" -enzyme@^3.0.0, enzyme@^3.10.0: +enzyme@^3.10.0: version "3.11.0" resolved "https://registry.npmjs.org/enzyme/-/enzyme-3.11.0.tgz" integrity sha512-Dw8/Gs4vRjxY6/6i9wU0V+utmQO9kvh9XLnz3LIudviOnVYDEe2ec+0k+NQoMamn1VrjKgCUOWj5jG/5M5M0Qw== @@ -5228,21 +5162,16 @@ escape-html@^1.0.3, escape-html@~1.0.3: resolved "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz" integrity sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow== -escape-string-regexp@^1.0.2: - version "1.0.5" - resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz" - integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== - -escape-string-regexp@^1.0.5: - version "1.0.5" - resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz" - integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== - -escape-string-regexp@^2.0.0, escape-string-regexp@2.0.0: +escape-string-regexp@2.0.0, escape-string-regexp@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz" integrity sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w== +escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz" + integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== + escape-string-regexp@^4.0.0: version "4.0.0" resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz" @@ -5454,15 +5383,7 @@ extend-shallow@^2.0.1: dependencies: is-extendable "^0.1.0" -extend-shallow@^3.0.0: - version "3.0.2" - resolved "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz" - integrity sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q== - dependencies: - assign-symbols "^1.0.0" - is-extendable "^1.0.1" - -extend-shallow@^3.0.2: +extend-shallow@^3.0.0, extend-shallow@^3.0.2: version "3.0.2" resolved "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz" integrity sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q== @@ -5489,7 +5410,7 @@ extglob@^2.0.4: snapdragon "^0.8.1" to-regex "^3.0.1" -extsprintf@^1.2.0, extsprintf@1.3.0: +extsprintf@1.3.0, extsprintf@^1.2.0: version "1.3.0" resolved "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz" integrity sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g== @@ -5621,7 +5542,7 @@ figures@^1.3.5: escape-string-regexp "^1.0.5" object-assign "^4.1.0" -file-loader@*, file-loader@^6.2.0: +file-loader@^6.2.0: version "6.2.0" resolved "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz" integrity sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw== @@ -5629,6 +5550,11 @@ file-loader@*, file-loader@^6.2.0: loader-utils "^2.0.0" schema-utils "^3.0.0" +file-type@5.2.0, file-type@^5.2.0: + version "5.2.0" + resolved "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz" + integrity sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ== + file-type@^10.4.0, file-type@^10.7.0: version "10.11.0" resolved "https://registry.npmjs.org/file-type/-/file-type-10.11.0.tgz" @@ -5644,11 +5570,6 @@ file-type@^4.2.0: resolved "https://registry.npmjs.org/file-type/-/file-type-4.4.0.tgz" integrity sha512-f2UbFQEk7LXgWpi5ntcO86OeA/cC80fuDDDaX/fZ2ZGel+AF7leRQqBBW1eJNiiQkrZlAoM6P+VYP5P6bOlDEQ== -file-type@^5.2.0: - version "5.2.0" - resolved "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz" - integrity sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ== - file-type@^6.1.0: version "6.2.0" resolved "https://registry.npmjs.org/file-type/-/file-type-6.2.0.tgz" @@ -5659,11 +5580,6 @@ file-type@^8.1.0: resolved "https://registry.npmjs.org/file-type/-/file-type-8.1.0.tgz" integrity sha512-qyQ0pzAy78gVoJsmYeNgl8uH8yKhr1lVhW7JbzJmnlRi0I4R2eEDEJZVKG8agpDnLpacwNbDhLNG/LMdxHD2YQ== -file-type@5.2.0: - version "5.2.0" - resolved "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz" - integrity sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ== - filename-reserved-regex@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/filename-reserved-regex/-/filename-reserved-regex-2.0.0.tgz" @@ -5678,16 +5594,16 @@ filenamify@^2.0.0: strip-outer "^1.0.0" trim-repeated "^1.0.0" -filesize@^8.0.6: - version "8.0.7" - resolved "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz" - integrity sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ== - filesize@6.1.0: version "6.1.0" resolved "https://registry.npmjs.org/filesize/-/filesize-6.1.0.tgz" integrity sha512-LpCHtPQ3sFx67z+uh2HnSyWSLLu5Jxo21795uRDuar/EOuYWXib5EmPaGIBuSnRqH2IODiKA2k5re/K9OnN/Yg== +filesize@^8.0.6: + version "8.0.7" + resolved "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz" + integrity sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ== + fill-range@^2.1.0: version "2.2.4" resolved "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz" @@ -5747,6 +5663,14 @@ find-cache-dir@^3.3.1: make-dir "^3.0.2" pkg-dir "^4.1.0" +find-up@4.1.0, find-up@^4.0.0: + version "4.1.0" + resolved "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz" + integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== + dependencies: + locate-path "^5.0.0" + path-exists "^4.0.0" + find-up@^1.0.0: version "1.1.2" resolved "https://registry.npmjs.org/find-up/-/find-up-1.1.2.tgz" @@ -5762,14 +5686,6 @@ find-up@^3.0.0: dependencies: locate-path "^3.0.0" -find-up@^4.0.0, find-up@4.1.0: - version "4.1.0" - resolved "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz" - integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== - dependencies: - locate-path "^5.0.0" - path-exists "^4.0.0" - find-up@^5.0.0: version "5.0.0" resolved "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz" @@ -5815,6 +5731,19 @@ forever-agent@~0.6.1: resolved "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz" integrity sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw== +fork-ts-checker-webpack-plugin@4.1.6: + version "4.1.6" + resolved "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-4.1.6.tgz" + integrity sha512-DUxuQaKoqfNne8iikd14SAkh5uw4+8vNifp6gmA73yYNS6ywLIWSLD/n/mBzHQRpW3J7rbATEakmiA8JvkTyZw== + dependencies: + "@babel/code-frame" "^7.5.5" + chalk "^2.4.1" + micromatch "^3.1.10" + minimatch "^3.0.4" + semver "^5.6.0" + tapable "^1.0.0" + worker-rpc "^0.1.0" + fork-ts-checker-webpack-plugin@^6.5.0: version "6.5.3" resolved "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz" @@ -5834,19 +5763,6 @@ fork-ts-checker-webpack-plugin@^6.5.0: semver "^7.3.2" tapable "^1.0.0" -fork-ts-checker-webpack-plugin@4.1.6: - version "4.1.6" - resolved "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-4.1.6.tgz" - integrity sha512-DUxuQaKoqfNne8iikd14SAkh5uw4+8vNifp6gmA73yYNS6ywLIWSLD/n/mBzHQRpW3J7rbATEakmiA8JvkTyZw== - dependencies: - "@babel/code-frame" "^7.5.5" - chalk "^2.4.1" - micromatch "^3.1.10" - minimatch "^3.0.4" - semver "^5.6.0" - tapable "^1.0.0" - worker-rpc "^0.1.0" - form-data@~2.3.2: version "2.3.3" resolved "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz" @@ -5900,17 +5816,7 @@ fs-extra@^10.1.0: jsonfile "^6.0.1" universalify "^2.0.0" -fs-extra@^9.0.0: - version "9.1.0" - resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz" - integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== - dependencies: - at-least-node "^1.0.0" - graceful-fs "^4.2.0" - jsonfile "^6.0.1" - universalify "^2.0.0" - -fs-extra@^9.0.1: +fs-extra@^9.0.0, fs-extra@^9.0.1: version "9.1.0" resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz" integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== @@ -6019,6 +5925,11 @@ get-stdin@^4.0.1: resolved "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz" integrity sha512-F5aQMywwJ2n85s4hJPTT9RPxGmubonuB10MNYo17/xph174n2MIR33HRguhzVag10O/npM7SPk73LMZNP+FaWw== +get-stream@3.0.0, get-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz" + integrity sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ== + get-stream@^2.2.0: version "2.3.1" resolved "https://registry.npmjs.org/get-stream/-/get-stream-2.3.1.tgz" @@ -6027,11 +5938,6 @@ get-stream@^2.2.0: object-assign "^4.0.1" pinkie-promise "^2.0.0" -get-stream@^3.0.0, get-stream@3.0.0: - version "3.0.0" - resolved "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz" - integrity sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ== - get-stream@^4.0.0, get-stream@^4.1.0: version "4.1.0" resolved "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz" @@ -6154,7 +6060,7 @@ global-dirs@^3.0.0: dependencies: ini "2.0.0" -global-modules@^2.0.0, global-modules@2.0.0: +global-modules@2.0.0, global-modules@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz" integrity sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A== @@ -6182,6 +6088,18 @@ globalthis@^1.0.3: dependencies: define-properties "^1.1.3" +globby@11.0.1: + version "11.0.1" + resolved "https://registry.npmjs.org/globby/-/globby-11.0.1.tgz" + integrity sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.1.1" + ignore "^5.1.4" + merge2 "^1.3.0" + slash "^3.0.0" + globby@^11.0.1, globby@^11.0.4, globby@^11.1.0: version "11.1.0" resolved "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz" @@ -6218,18 +6136,6 @@ globby@^8.0.1: pify "^3.0.0" slash "^1.0.0" -globby@11.0.1: - version "11.0.1" - resolved "https://registry.npmjs.org/globby/-/globby-11.0.1.tgz" - integrity sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ== - dependencies: - array-union "^2.1.0" - dir-glob "^3.0.1" - fast-glob "^3.1.1" - ignore "^5.1.4" - merge2 "^1.3.0" - slash "^3.0.0" - globule@^1.0.0: version "1.3.4" resolved "https://registry.npmjs.org/globule/-/globule-1.3.4.tgz" @@ -6341,13 +6247,6 @@ gulp-header@^1.7.1: lodash.template "^4.4.0" through2 "^2.0.0" -gzip-size@^6.0.0: - version "6.0.0" - resolved "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz" - integrity sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q== - dependencies: - duplexer "^0.1.2" - gzip-size@5.1.1: version "5.1.1" resolved "https://registry.npmjs.org/gzip-size/-/gzip-size-5.1.1.tgz" @@ -6356,6 +6255,13 @@ gzip-size@5.1.1: duplexer "^0.1.1" pify "^4.0.1" +gzip-size@^6.0.0: + version "6.0.0" + resolved "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz" + integrity sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q== + dependencies: + duplexer "^0.1.2" + handle-thing@^2.0.0: version "2.0.1" resolved "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz" @@ -6750,31 +6656,21 @@ htmlparser2@^8.0.1: domutils "^3.0.1" entities "^4.4.0" -http-cache-semantics@^4.0.0: - version "4.1.1" - resolved "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz" - integrity sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ== - http-cache-semantics@3.8.1: version "3.8.1" resolved "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz" integrity sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w== +http-cache-semantics@^4.0.0: + version "4.1.1" + resolved "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz" + integrity sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ== + http-deceiver@^1.2.7: version "1.2.7" resolved "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz" integrity sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw== -http-errors@~1.6.2: - version "1.6.3" - resolved "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz" - integrity sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A== - dependencies: - depd "~1.1.2" - inherits "2.0.3" - setprototypeof "1.1.0" - statuses ">= 1.4.0 < 2" - http-errors@2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz" @@ -6786,6 +6682,16 @@ http-errors@2.0.0: statuses "2.0.1" toidentifier "1.0.1" +http-errors@~1.6.2: + version "1.6.3" + resolved "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz" + integrity sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A== + dependencies: + depd "~1.1.2" + inherits "2.0.3" + setprototypeof "1.1.0" + statuses ">= 1.4.0 < 2" + http-parser-js@>=0.5.1: version "0.5.8" resolved "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz" @@ -6911,16 +6817,16 @@ immediate@^3.2.3: resolved "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz" integrity sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q== -immer@^9.0.7: - version "9.0.21" - resolved "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz" - integrity sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA== - immer@8.0.1: version "8.0.1" resolved "https://registry.npmjs.org/immer/-/immer-8.0.1.tgz" integrity sha512-aqXhGP7//Gui2+UrEtvxZxSquQVXTpZ7KDxfCcKAF3Vysvw0CViVaW9RZ1j1xlIYqaaaipBoqdqeibkc18PNvA== +immer@^9.0.7: + version "9.0.21" + resolved "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz" + integrity sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA== + import-fresh@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz" @@ -6982,7 +6888,7 @@ inflight@^1.0.4: once "^1.3.0" wrappy "1" -inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.0, inherits@~2.0.3, inherits@2, inherits@2.0.4: +inherits@2, inherits@2.0.4, inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.0, inherits@~2.0.3: version "2.0.4" resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== @@ -6992,16 +6898,16 @@ inherits@2.0.3: resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz" integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw== -ini@^1.3.4, ini@^1.3.5, ini@~1.3.0: - version "1.3.8" - resolved "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz" - integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== - ini@2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz" integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA== +ini@^1.3.4, ini@^1.3.5, ini@~1.3.0: + version "1.3.8" + resolved "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz" + integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== + inline-style-parser@0.1.1: version "0.1.1" resolved "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz" @@ -7041,16 +6947,16 @@ ip-regex@^4.1.0: resolved "https://registry.npmjs.org/ip-regex/-/ip-regex-4.3.0.tgz" integrity sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q== -ipaddr.js@^2.0.1: - version "2.1.0" - resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz" - integrity sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ== - ipaddr.js@1.9.1: version "1.9.1" resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz" integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== +ipaddr.js@^2.0.1: + version "2.1.0" + resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz" + integrity sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ== + is-absolute-url@^2.0.0: version "2.1.0" resolved "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz" @@ -7063,7 +6969,7 @@ is-accessor-descriptor@^1.0.1: dependencies: hasown "^2.0.0" -is-alphabetical@^1.0.0, is-alphabetical@1.0.4: +is-alphabetical@1.0.4, is-alphabetical@^1.0.0: version "1.0.4" resolved "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz" integrity sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg== @@ -7340,12 +7246,7 @@ is-path-inside@^3.0.2: resolved "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz" integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ== -is-plain-obj@^1.0.0: - version "1.1.0" - resolved "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz" - integrity sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg== - -is-plain-obj@^1.1.0: +is-plain-obj@^1.0.0, is-plain-obj@^1.1.0: version "1.1.0" resolved "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz" integrity sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg== @@ -7395,7 +7296,7 @@ is-retry-allowed@^1.0.0, is-retry-allowed@^1.1.0: resolved "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz" integrity sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg== -is-root@^2.1.0, is-root@2.1.0: +is-root@2.1.0, is-root@^2.1.0: version "2.1.0" resolved "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz" integrity sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg== @@ -7407,12 +7308,7 @@ is-shared-array-buffer@^1.0.2: dependencies: call-bind "^1.0.2" -is-stream@^1.0.0: - version "1.1.0" - resolved "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz" - integrity sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ== - -is-stream@^1.1.0: +is-stream@^1.0.0, is-stream@^1.1.0: version "1.1.0" resolved "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz" integrity sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ== @@ -7513,26 +7409,21 @@ is2@^2.0.6: ip-regex "^4.1.0" is-url "^1.2.4" -isarray@^2.0.5: - version "2.0.5" - resolved "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz" - integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw== - -isarray@~1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz" - integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ== - isarray@0.0.1: version "0.0.1" resolved "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz" integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ== -isarray@1.0.0: +isarray@1.0.0, isarray@~1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz" integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ== +isarray@^2.0.5: + version "2.0.5" + resolved "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz" + integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw== + isexe@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz" @@ -7624,15 +7515,7 @@ jpegtran-bin@^4.0.0: resolved "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz" integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== -js-yaml@^3.13.1: - version "3.14.1" - resolved "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz" - integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== - dependencies: - argparse "^1.0.7" - esprima "^4.0.0" - -js-yaml@^3.8.1: +js-yaml@^3.13.1, js-yaml@^3.8.1: version "3.14.1" resolved "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz" integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== @@ -7721,13 +7604,6 @@ jsprim@^1.2.2: json-schema "0.4.0" verror "1.10.0" -keyv@^3.0.0: - version "3.1.0" - resolved "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz" - integrity sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA== - dependencies: - json-buffer "3.0.0" - keyv@3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/keyv/-/keyv-3.0.0.tgz" @@ -7735,21 +7611,14 @@ keyv@3.0.0: dependencies: json-buffer "3.0.0" -kind-of@^3.0.2: - version "3.2.2" - resolved "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz" - integrity sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ== +keyv@^3.0.0: + version "3.1.0" + resolved "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz" + integrity sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA== dependencies: - is-buffer "^1.1.5" + json-buffer "3.0.0" -kind-of@^3.0.3: - version "3.2.2" - resolved "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz" - integrity sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ== - dependencies: - is-buffer "^1.1.5" - -kind-of@^3.2.0: +kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0: version "3.2.2" resolved "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz" integrity sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ== @@ -7846,6 +7715,15 @@ loader-runner@^4.2.0: resolved "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz" integrity sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg== +loader-utils@2.0.0: + version "2.0.0" + resolved "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz" + integrity sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ== + dependencies: + big.js "^5.2.2" + emojis-list "^3.0.0" + json5 "^2.1.2" + loader-utils@^2.0.0: version "2.0.4" resolved "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz" @@ -7860,15 +7738,6 @@ loader-utils@^3.2.0: resolved "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz" integrity sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw== -loader-utils@2.0.0: - version "2.0.0" - resolved "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.0.tgz" - integrity sha512-rP4F0h2RaWSvPEkD7BLDFQnvSf+nK+wr3ESUjNTyAGobqrijmW92zc+SO6d4p4B1wh7+B/Jg1mkQe5NYUEHtHQ== - dependencies: - big.js "^5.2.2" - emojis-list "^3.0.0" - json5 "^2.1.2" - locate-path@^3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz" @@ -8021,7 +7890,7 @@ lodash.templatesettings@^4.0.0: dependencies: lodash._reinterpolate "^3.0.0" -lodash.uniq@^4.5.0, lodash.uniq@4.5.0: +lodash.uniq@4.5.0, lodash.uniq@^4.5.0: version "4.5.0" resolved "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz" integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ== @@ -8066,6 +7935,11 @@ lower-case@^2.0.2: dependencies: tslib "^2.0.3" +lowercase-keys@1.0.0: + version "1.0.0" + resolved "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz" + integrity sha512-RPlX0+PHuvxVDZ7xX+EBVAp4RsVxP/TdDSN2mJYdiq1Lc4Hz7EUSjUI7RZrKKlmrIzVhf6Jo2stj7++gVarS0A== + lowercase-keys@^1.0.0, lowercase-keys@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz" @@ -8076,11 +7950,6 @@ lowercase-keys@^2.0.0: resolved "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz" integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA== -lowercase-keys@1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz" - integrity sha512-RPlX0+PHuvxVDZ7xX+EBVAp4RsVxP/TdDSN2mJYdiq1Lc4Hz7EUSjUI7RZrKKlmrIzVhf6Jo2stj7++gVarS0A== - lpad-align@^1.0.1: version "1.1.2" resolved "https://registry.npmjs.org/lpad-align/-/lpad-align-1.1.2.tgz" @@ -8123,14 +7992,7 @@ lunr@^2.3.8: resolved "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz" integrity sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow== -make-dir@^1.0.0: - version "1.3.0" - resolved "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz" - integrity sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ== - dependencies: - pify "^3.0.0" - -make-dir@^1.2.0: +make-dir@^1.0.0, make-dir@^1.2.0: version "1.3.0" resolved "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz" integrity sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ== @@ -8330,57 +8192,24 @@ micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5: braces "^3.0.2" picomatch "^2.3.1" +mime-db@1.52.0, "mime-db@>= 1.43.0 < 2": + version "1.52.0" + resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz" + integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== + mime-db@^1.28.0, mime-db@~1.33.0: version "1.33.0" resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz" integrity sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ== -"mime-db@>= 1.43.0 < 2": - version "1.52.0" - resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz" - integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== - -mime-db@1.52.0: - version "1.52.0" - resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz" - integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== - -mime-types@^2.1.12, mime-types@~2.1.17, mime-types@2.1.18: +mime-types@2.1.18, mime-types@^2.1.12, mime-types@~2.1.17: version "2.1.18" resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz" integrity sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ== dependencies: mime-db "~1.33.0" -mime-types@^2.1.27: - version "2.1.35" - resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz" - integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== - dependencies: - mime-db "1.52.0" - -mime-types@^2.1.31: - version "2.1.35" - resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz" - integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== - dependencies: - mime-db "1.52.0" - -mime-types@~2.1.19: - version "2.1.35" - resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz" - integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== - dependencies: - mime-db "1.52.0" - -mime-types@~2.1.24: - version "2.1.35" - resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz" - integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== - dependencies: - mime-db "1.52.0" - -mime-types@~2.1.34: +mime-types@^2.1.27, mime-types@^2.1.31, mime-types@~2.1.19, mime-types@~2.1.24, mime-types@~2.1.34: version "2.1.35" resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz" integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== @@ -8419,7 +8248,14 @@ minimalistic-assert@^1.0.0: resolved "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz" integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A== -minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@3.1.2: +minimatch@3.0.4: + version "3.0.4" + resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== + dependencies: + brace-expansion "^1.1.7" + +minimatch@3.1.2, minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1: version "3.1.2" resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz" integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== @@ -8433,13 +8269,6 @@ minimatch@~3.0.2: dependencies: brace-expansion "^1.1.7" -minimatch@3.0.4: - version "3.0.4" - resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz" - integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== - dependencies: - brace-expansion "^1.1.7" - minimist@^1.1.3, minimist@^1.2.0, minimist@^1.2.3, minimist@^1.2.5, minimist@^1.2.6: version "1.2.8" resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz" @@ -8458,32 +8287,18 @@ mkdirp-classic@^0.5.2, mkdirp-classic@^0.5.3: resolved "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz" integrity sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A== -mkdirp@^0.5.1, mkdirp@~0.5.1: - version "0.5.6" - resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz" - integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw== - dependencies: - minimist "^1.2.6" - -mkdirp@^0.5.6: - version "0.5.6" - resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz" - integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw== - dependencies: - minimist "^1.2.6" - -"mkdirp@>=0.5 0": - version "0.5.6" - resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz" - integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw== - dependencies: - minimist "^1.2.6" - mkdirp@0.3.0: version "0.3.0" resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz" integrity sha512-OHsdUcVAQ6pOtg5JYWpCBo9W/GySVuwvP9hueRMW7UqshC0tbfzLv8wjySTPm3tfUZ/21CE9E1pJagOA91Pxew== +"mkdirp@>=0.5 0", mkdirp@^0.5.1, mkdirp@^0.5.6, mkdirp@~0.5.1: + version "0.5.6" + resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz" + integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw== + dependencies: + minimist "^1.2.6" + moo@^0.5.0: version "0.5.2" resolved "https://registry.npmjs.org/moo/-/moo-0.5.2.tgz" @@ -8494,16 +8309,16 @@ mrmime@^1.0.0: resolved "https://registry.npmjs.org/mrmime/-/mrmime-1.0.1.tgz" integrity sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw== -ms@^2.1.1, ms@2.1.2: - version "2.1.2" - resolved "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz" - integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== - ms@2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz" integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A== +ms@2.1.2, ms@^2.1.1: + version "2.1.2" + resolved "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== + ms@2.1.3: version "2.1.3" resolved "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz" @@ -8650,6 +8465,15 @@ normalize-range@^0.1.2: resolved "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz" integrity sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA== +normalize-url@2.0.1: + version "2.0.1" + resolved "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz" + integrity sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw== + dependencies: + prepend-http "^2.0.0" + query-string "^5.0.1" + sort-keys "^2.0.0" + normalize-url@^3.0.0: version "3.3.0" resolved "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz" @@ -8665,15 +8489,6 @@ normalize-url@^6.0.1: resolved "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz" integrity sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A== -normalize-url@2.0.1: - version "2.0.1" - resolved "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz" - integrity sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw== - dependencies: - prepend-http "^2.0.0" - query-string "^5.0.1" - sort-keys "^2.0.0" - not@^0.1.0: version "0.1.0" resolved "https://registry.npmjs.org/not/-/not-0.1.0.tgz" @@ -8706,7 +8521,7 @@ nprogress@^0.2.0: resolved "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz" integrity sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA== -nth-check@^1.0.2: +nth-check@^1.0.2, nth-check@~1.0.1: version "1.0.2" resolved "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz" integrity sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg== @@ -8720,13 +8535,6 @@ nth-check@^2.0.0, nth-check@^2.0.1: dependencies: boolbase "^1.0.0" -nth-check@~1.0.1: - version "1.0.2" - resolved "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz" - integrity sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg== - dependencies: - boolbase "~1.0.0" - num2fraction@^1.2.2: version "1.2.2" resolved "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz" @@ -9177,13 +8985,6 @@ path-parse@^1.0.7: resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -path-to-regexp@^1.7.0: - version "1.8.0" - resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz" - integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA== - dependencies: - isarray "0.0.1" - path-to-regexp@0.1.7: version "0.1.7" resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz" @@ -9194,6 +8995,13 @@ path-to-regexp@2.2.1: resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz" integrity sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ== +path-to-regexp@^1.7.0: + version "1.8.0" + resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz" + integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA== + dependencies: + isarray "0.0.1" + path-type@^1.0.0: version "1.1.0" resolved "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz" @@ -9240,17 +9048,7 @@ picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1: resolved "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== -pify@^2.0.0: - version "2.3.0" - resolved "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz" - integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog== - -pify@^2.2.0: - version "2.3.0" - resolved "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz" - integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog== - -pify@^2.3.0: +pify@^2.0.0, pify@^2.2.0, pify@^2.3.0: version "2.3.0" resolved "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz" integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog== @@ -9296,7 +9094,7 @@ pkg-dir@^4.1.0: dependencies: find-up "^4.0.0" -pkg-up@^3.1.0, pkg-up@3.1.0: +pkg-up@3.1.0, pkg-up@^3.1.0: version "3.1.0" resolved "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz" integrity sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA== @@ -9858,15 +9656,6 @@ postcss-zindex@^5.1.0: resolved "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz" integrity sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A== -"postcss@^7.0.0 || ^8.0.1", postcss@^8.0.9, postcss@^8.1.0, postcss@^8.2.15, postcss@^8.2.2, postcss@^8.3.11, postcss@^8.4.14, postcss@^8.4.16, postcss@^8.4.17, postcss@^8.4.21: - version "8.4.31" - resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz" - integrity sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ== - dependencies: - nanoid "^3.3.6" - picocolors "^1.0.0" - source-map-js "^1.0.2" - postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.23, postcss@^7.0.27, postcss@^7.0.32: version "7.0.39" resolved "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz" @@ -9875,6 +9664,15 @@ postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.23, postcss@^7.0.27, postcss@^7.0.3 picocolors "^0.2.1" source-map "^0.6.1" +postcss@^8.3.11, postcss@^8.4.14, postcss@^8.4.17, postcss@^8.4.21: + version "8.4.31" + resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz" + integrity sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ== + dependencies: + nanoid "^3.3.6" + picocolors "^1.0.0" + source-map-js "^1.0.2" + prebuild-install@^7.1.1: version "7.1.1" resolved "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz" @@ -9943,14 +9741,6 @@ promise@^7.1.1: dependencies: asap "~2.0.3" -prompts@^2.4.2: - version "2.4.2" - resolved "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz" - integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q== - dependencies: - kleur "^3.0.3" - sisteransi "^1.0.5" - prompts@2.4.0: version "2.4.0" resolved "https://registry.npmjs.org/prompts/-/prompts-2.4.0.tgz" @@ -9959,6 +9749,14 @@ prompts@2.4.0: kleur "^3.0.3" sisteransi "^1.0.5" +prompts@^2.4.2: + version "2.4.2" + resolved "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz" + integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q== + dependencies: + kleur "^3.0.3" + sisteransi "^1.0.5" + prop-types-exact@^1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/prop-types-exact/-/prop-types-exact-1.2.0.tgz" @@ -9968,7 +9766,7 @@ prop-types-exact@^1.2.0: object.assign "^4.1.0" reflect.ownkeys "^0.2.0" -prop-types@^15.0.0, prop-types@^15.6.2, prop-types@^15.7.2, prop-types@^15.8.1, prop-types@>=15: +prop-types@^15.0.0, prop-types@^15.6.2, prop-types@^15.7.2, prop-types@^15.8.1: version "15.8.1" resolved "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz" integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== @@ -10020,12 +9818,7 @@ punycode@^1.3.2: resolved "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz" integrity sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ== -punycode@^2.1.0: - version "2.3.1" - resolved "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz" - integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== - -punycode@^2.1.1: +punycode@^2.1.0, punycode@^2.1.1: version "2.3.1" resolved "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz" integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== @@ -10047,7 +9840,7 @@ q@^1.1.2: resolved "https://registry.npmjs.org/q/-/q-1.5.1.tgz" integrity sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw== -qs@^6.4.0, qs@6.11.0: +qs@6.11.0, qs@^6.4.0: version "6.11.0" resolved "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz" integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q== @@ -10121,28 +9914,15 @@ randombytes@^2.1.0: dependencies: safe-buffer "^5.1.0" -range-parser@^1.2.1: - version "1.2.1" - resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz" - integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== - -range-parser@~1.2.1: - version "1.2.1" - resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz" - integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== - range-parser@1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz" integrity sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A== -raw-body@~1.1.0: - version "1.1.7" - resolved "https://registry.npmjs.org/raw-body/-/raw-body-1.1.7.tgz" - integrity sha512-WmJJU2e9Y6M5UzTOkHaM7xJGAPQD8PNzx3bAd2+uhZAim6wDk6dAZxPVYLF67XhbR4hmKGh33Lpmh4XWrCH5Mg== - dependencies: - bytes "1" - string_decoder "0.10" +range-parser@^1.2.1, range-parser@~1.2.1: + version "1.2.1" + resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz" + integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== raw-body@2.5.1: version "2.5.1" @@ -10154,7 +9934,15 @@ raw-body@2.5.1: iconv-lite "0.4.24" unpipe "1.0.0" -rc@^1.2.7, rc@^1.2.8, rc@1.2.8: +raw-body@~1.1.0: + version "1.1.7" + resolved "https://registry.npmjs.org/raw-body/-/raw-body-1.1.7.tgz" + integrity sha512-WmJJU2e9Y6M5UzTOkHaM7xJGAPQD8PNzx3bAd2+uhZAim6wDk6dAZxPVYLF67XhbR4hmKGh33Lpmh4XWrCH5Mg== + dependencies: + bytes "1" + string_decoder "0.10" + +rc@1.2.8, rc@^1.2.7, rc@^1.2.8: version "1.2.8" resolved "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz" integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== @@ -10234,16 +10022,7 @@ react-dev-utils@^12.0.1: strip-ansi "^6.0.1" text-table "^0.2.0" -react-dom@*, "react-dom@^16.6.0 || ^17.0.0 || ^18.0.0", "react-dom@^16.8.4 || ^17", "react-dom@^16.8.4 || ^17.0.0", "react-dom@^17.0.0 || ^16.3.0 || ^15.5.4", react-dom@^17.0.2, "react-dom@>= 16.8.0 < 19.0.0": - version "17.0.2" - resolved "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz" - integrity sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA== - dependencies: - loose-envify "^1.1.0" - object-assign "^4.1.1" - scheduler "^0.20.2" - -react-dom@^16.0.0-0, react-dom@^16.8.4: +react-dom@^16.8.4: version "16.14.0" resolved "https://registry.npmjs.org/react-dom/-/react-dom-16.14.0.tgz" integrity sha512-1gCeQXDLoIqMgqD3IO2Ah9bnf0w9kzhwN5q4FGnHZ67hBm9yePzB5JJAIQCc8x3pFnNlwFq4RidZggNAAkzWWw== @@ -10253,6 +10032,15 @@ react-dom@^16.0.0-0, react-dom@^16.8.4: prop-types "^15.6.2" scheduler "^0.19.1" +react-dom@^17.0.2: + version "17.0.2" + resolved "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz" + integrity sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + scheduler "^0.20.2" + react-error-overlay@^6.0.11, react-error-overlay@^6.0.9: version "6.0.11" resolved "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz" @@ -10306,14 +10094,6 @@ react-loadable-ssr-addon-v5-slorber@^1.0.1: dependencies: "@babel/runtime" "^7.10.3" -react-loadable@*, "react-loadable@npm:@docusaurus/react-loadable@5.5.2": - version "5.5.2" - resolved "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz" - integrity sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ== - dependencies: - "@types/react" "*" - prop-types "^15.6.2" - react-router-config@^5.1.1: version "5.1.1" resolved "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz" @@ -10334,7 +10114,7 @@ react-router-dom@^5.3.3: tiny-invariant "^1.0.2" tiny-warning "^1.0.0" -react-router@^5.3.3, react-router@>=5, react-router@5.3.4: +react-router@5.3.4, react-router@^5.3.3: version "5.3.4" resolved "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz" integrity sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA== @@ -10368,7 +10148,7 @@ react-textarea-autosize@^8.3.2: use-composed-ref "^1.3.0" use-latest "^1.2.1" -react-waypoint@^10.3.0, react-waypoint@>=9.0.2: +react-waypoint@^10.3.0: version "10.3.0" resolved "https://registry.npmjs.org/react-waypoint/-/react-waypoint-10.3.0.tgz" integrity sha512-iF1y2c1BsoXuEGz08NoahaLFIGI9gTUAAOKip96HUmylRT6DUtpgoBPjk/Y8dfcFVmfVDvUzWjNXpZyKTOV0SQ== @@ -10378,15 +10158,7 @@ react-waypoint@^10.3.0, react-waypoint@>=9.0.2: prop-types "^15.0.0" react-is "^17.0.1 || ^18.0.0" -react@*, "react@^15.0.2 || ^16.0.0 || ^17.0.0", "react@^15.3.0 || ^16.0.0 || ^17.0.0 || ^18.0.0", "react@^16.13.1 || ^17.0.0", "react@^16.6.0 || ^17.0.0 || ^18.0.0", "react@^16.8.0 || ^17.0.0 || ^18.0.0", "react@^16.8.4 || ^17", "react@^16.8.4 || ^17.0.0", "react@^17.0.0 || ^16.3.0 || ^15.5.4", react@^17.0.2, "react@>= 16.8.0 < 19.0.0", react@>=0.14.9, react@>=0.14.x, react@>=15, react@17.0.2: - version "17.0.2" - resolved "https://registry.npmjs.org/react/-/react-17.0.2.tgz" - integrity sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA== - dependencies: - loose-envify "^1.1.0" - object-assign "^4.1.1" - -"react@^0.14 || ^15.0.0 || ^16.0.0-alpha", react@^16.0.0-0, react@^16.14.0, react@^16.8.4, "react@0.13.x || 0.14.x || ^15.0.0-0 || ^16.0.0-0": +react@^16.8.4: version "16.14.0" resolved "https://registry.npmjs.org/react/-/react-16.14.0.tgz" integrity sha512-0X2CImDkJGApiAlcf0ODKIneSwBPhqJawOa5wCtKbu7ZECrmS26NvtSILynQ66cgkT/RJ4LidJOc3bUESwmU8g== @@ -10395,6 +10167,14 @@ react@*, "react@^15.0.2 || ^16.0.0 || ^17.0.0", "react@^15.3.0 || ^16.0.0 || ^17 object-assign "^4.1.1" prop-types "^15.6.2" +react@^17.0.2: + version "17.0.2" + resolved "https://registry.npmjs.org/react/-/react-17.0.2.tgz" + integrity sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + read-pkg-up@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-1.0.1.tgz" @@ -10412,59 +10192,7 @@ read-pkg@^1.0.0: normalize-package-data "^2.3.2" path-type "^1.0.0" -readable-stream@^2.0.0: - version "2.3.8" - resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz" - integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== - dependencies: - core-util-is "~1.0.0" - inherits "~2.0.3" - isarray "~1.0.0" - process-nextick-args "~2.0.0" - safe-buffer "~5.1.1" - string_decoder "~1.1.1" - util-deprecate "~1.0.1" - -readable-stream@^2.0.1: - version "2.3.8" - resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz" - integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== - dependencies: - core-util-is "~1.0.0" - inherits "~2.0.3" - isarray "~1.0.0" - process-nextick-args "~2.0.0" - safe-buffer "~5.1.1" - string_decoder "~1.1.1" - util-deprecate "~1.0.1" - -readable-stream@^2.0.2: - version "2.3.8" - resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz" - integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== - dependencies: - core-util-is "~1.0.0" - inherits "~2.0.3" - isarray "~1.0.0" - process-nextick-args "~2.0.0" - safe-buffer "~5.1.1" - string_decoder "~1.1.1" - util-deprecate "~1.0.1" - -readable-stream@^2.2.2: - version "2.3.8" - resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz" - integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== - dependencies: - core-util-is "~1.0.0" - inherits "~2.0.3" - isarray "~1.0.0" - process-nextick-args "~2.0.0" - safe-buffer "~5.1.1" - string_decoder "~1.1.1" - util-deprecate "~1.0.1" - -readable-stream@^2.3.0, readable-stream@^2.3.5: +readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.2.2, readable-stream@^2.3.0, readable-stream@^2.3.5, readable-stream@~2.3.6: version "2.3.8" resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz" integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== @@ -10486,19 +10214,6 @@ readable-stream@^3.0.6, readable-stream@^3.1.1, readable-stream@^3.4.0: string_decoder "^1.1.1" util-deprecate "^1.0.1" -readable-stream@~2.3.6: - version "2.3.8" - resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz" - integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== - dependencies: - core-util-is "~1.0.0" - inherits "~2.0.3" - isarray "~1.0.0" - process-nextick-args "~2.0.0" - safe-buffer "~5.1.1" - string_decoder "~1.1.1" - util-deprecate "~1.0.1" - readdirp@~3.6.0: version "3.6.0" resolved "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz" @@ -10518,13 +10233,6 @@ rechoir@^0.6.2: dependencies: resolve "^1.1.6" -recursive-readdir@^2.2.2: - version "2.2.3" - resolved "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz" - integrity sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA== - dependencies: - minimatch "^3.0.5" - recursive-readdir@2.2.2: version "2.2.2" resolved "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.2.tgz" @@ -10532,6 +10240,13 @@ recursive-readdir@2.2.2: dependencies: minimatch "3.0.4" +recursive-readdir@^2.2.2: + version "2.2.3" + resolved "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz" + integrity sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA== + dependencies: + minimatch "^3.0.5" + redent@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/redent/-/redent-1.0.0.tgz" @@ -10813,7 +10528,7 @@ resolve@^1.1.6, resolve@^1.10.0, resolve@^1.14.2, resolve@^1.3.2: path-parse "^1.0.7" supports-preserve-symlinks-flag "^1.0.0" -responselike@^1.0.2, responselike@1.0.2: +responselike@1.0.2, responselike@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz" integrity sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ== @@ -10845,7 +10560,7 @@ rgba-regex@^1.0.0: resolved "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz" integrity sha512-zgn5OjNQXLUTdq8m17KdaicF6w89TZs8ZU8y0AYENIU6wG8GG6LLm0yLSiPY8DmaYmHdgRW8rnApjoT0fQRfMg== -rimraf@^2.5.4: +rimraf@2, rimraf@^2.5.4: version "2.7.1" resolved "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz" integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== @@ -10859,13 +10574,6 @@ rimraf@^3.0.2: dependencies: glob "^7.1.3" -rimraf@2: - version "2.7.1" - resolved "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz" - integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== - dependencies: - glob "^7.1.3" - rst-selector-parser@^2.2.3: version "2.2.3" resolved "https://registry.npmjs.org/rst-selector-parser/-/rst-selector-parser-2.2.3.tgz" @@ -10913,21 +10621,16 @@ safe-array-concat@^1.0.0, safe-array-concat@^1.0.1: has-symbols "^1.0.3" isarray "^2.0.5" -safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@>=5.1.0, safe-buffer@~5.2.0, safe-buffer@5.2.1: +safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + +safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.2.0: version "5.2.1" resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz" integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== -safe-buffer@~5.1.0, safe-buffer@~5.1.1: - version "5.1.2" - resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz" - integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== - -safe-buffer@5.1.2: - version "5.1.2" - resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz" - integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== - safe-json-parse@~1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/safe-json-parse/-/safe-json-parse-1.0.1.tgz" @@ -10949,7 +10652,7 @@ safe-regex@^1.1.0: dependencies: ret "~0.1.10" -safer-buffer@^2.0.2, safer-buffer@^2.1.0, "safer-buffer@>= 2.1.2 < 3", safer-buffer@~2.1.0: +"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: version "2.1.2" resolved "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== @@ -10975,6 +10678,15 @@ scheduler@^0.20.2: loose-envify "^1.1.0" object-assign "^4.1.1" +schema-utils@2.7.0: + version "2.7.0" + resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz" + integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A== + dependencies: + "@types/json-schema" "^7.0.4" + ajv "^6.12.2" + ajv-keywords "^3.4.1" + schema-utils@^2.6.5: version "2.7.1" resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz" @@ -10984,25 +10696,7 @@ schema-utils@^2.6.5: ajv "^6.12.4" ajv-keywords "^3.5.2" -schema-utils@^3.0.0: - version "3.3.0" - resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz" - integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg== - dependencies: - "@types/json-schema" "^7.0.8" - ajv "^6.12.5" - ajv-keywords "^3.5.2" - -schema-utils@^3.1.1: - version "3.3.0" - resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz" - integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg== - dependencies: - "@types/json-schema" "^7.0.8" - ajv "^6.12.5" - ajv-keywords "^3.5.2" - -schema-utils@^3.2.0: +schema-utils@^3.0.0, schema-utils@^3.1.1, schema-utils@^3.2.0: version "3.3.0" resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz" integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg== @@ -11021,20 +10715,6 @@ schema-utils@^4.0.0: ajv-formats "^2.1.1" ajv-keywords "^5.1.0" -schema-utils@2.7.0: - version "2.7.0" - resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz" - integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A== - dependencies: - "@types/json-schema" "^7.0.4" - ajv "^6.12.2" - ajv-keywords "^3.4.1" - -"search-insights@>= 1 < 3": - version "2.7.0" - resolved "https://registry.npmjs.org/search-insights/-/search-insights-2.7.0.tgz" - integrity sha512-GLbVaGgzYEKMvuJbHRhLi1qoBFnjXZGZ6l4LxOYPCp4lI2jDRB3jPU9/XNhMwv6kvnA9slTreq6pvK+b3o3aqg== - section-matter@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz" @@ -11081,42 +10761,12 @@ semver-truncate@^1.1.2: dependencies: semver "^5.3.0" -semver@^5.3.0: +"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.6.0, semver@^5.7.0, semver@^5.7.1: version "5.7.2" resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz" integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== -semver@^5.4.1: - version "5.7.2" - resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz" - integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== - -semver@^5.5.0: - version "5.7.2" - resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz" - integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== - -semver@^5.6.0, semver@^5.7.0, semver@^5.7.1: - version "5.7.2" - resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz" - integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== - -semver@^6.0.0: - version "6.3.1" - resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz" - integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== - -semver@^6.2.0: - version "6.3.1" - resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz" - integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== - -semver@^6.3.0: - version "6.3.1" - resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz" - integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== - -semver@^6.3.1: +semver@^6.0.0, semver@^6.2.0, semver@^6.3.0, semver@^6.3.1: version "6.3.1" resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== @@ -11128,11 +10778,6 @@ semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.3.8, semve dependencies: lru-cache "^6.0.0" -"semver@2 || 3 || 4 || 5": - version "5.7.2" - resolved "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz" - integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== - send@0.18.0: version "0.18.0" resolved "https://registry.npmjs.org/send/-/send-0.18.0.tgz" @@ -11259,20 +10904,6 @@ shallowequal@^1.1.0: resolved "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz" integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ== -sharp@*, sharp@^0.32.6: - version "0.32.6" - resolved "https://registry.npmjs.org/sharp/-/sharp-0.32.6.tgz" - integrity sha512-KyLTWwgcR9Oe4d9HwCwNM2l7+J0dUQwn/yf7S0EnTtb0eVS4RxO0eUSvxPtzT4F3SY+C4K6fqdv/DO27sJ/v/w== - dependencies: - color "^4.2.3" - detect-libc "^2.0.2" - node-addon-api "^6.1.0" - prebuild-install "^7.1.1" - semver "^7.5.4" - simple-get "^4.0.1" - tar-fs "^3.0.4" - tunnel-agent "^0.6.0" - sharp@^0.30.7: version "0.30.7" resolved "https://registry.npmjs.org/sharp/-/sharp-0.30.7.tgz" @@ -11287,6 +10918,20 @@ sharp@^0.30.7: tar-fs "^2.1.1" tunnel-agent "^0.6.0" +sharp@^0.32.6: + version "0.32.6" + resolved "https://registry.npmjs.org/sharp/-/sharp-0.32.6.tgz" + integrity sha512-KyLTWwgcR9Oe4d9HwCwNM2l7+J0dUQwn/yf7S0EnTtb0eVS4RxO0eUSvxPtzT4F3SY+C4K6fqdv/DO27sJ/v/w== + dependencies: + color "^4.2.3" + detect-libc "^2.0.2" + node-addon-api "^6.1.0" + prebuild-install "^7.1.1" + semver "^7.5.4" + simple-get "^4.0.1" + tar-fs "^3.0.4" + tunnel-agent "^0.6.0" + shebang-command@^1.2.0: version "1.2.0" resolved "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz" @@ -11311,16 +10956,16 @@ shebang-regex@^3.0.0: resolved "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz" integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== -shell-quote@^1.7.3: - version "1.8.1" - resolved "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz" - integrity sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA== - shell-quote@1.7.2: version "1.7.2" resolved "https://registry.npmjs.org/shell-quote/-/shell-quote-1.7.2.tgz" integrity sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg== +shell-quote@^1.7.3: + version "1.8.1" + resolved "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz" + integrity sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA== + shelljs@^0.8.4, shelljs@^0.8.5: version "0.8.5" resolved "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz" @@ -11508,12 +11153,7 @@ source-map-url@^0.4.0: resolved "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz" integrity sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw== -source-map@^0.5.0: - version "0.5.7" - resolved "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz" - integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ== - -source-map@^0.5.6: +source-map@^0.5.0, source-map@^0.5.6: version "0.5.7" resolved "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz" integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ== @@ -11631,16 +11271,16 @@ static-extend@^0.1.1: define-property "^0.2.5" object-copy "^0.1.0" -"statuses@>= 1.4.0 < 2": - version "1.5.0" - resolved "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz" - integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA== - statuses@2.0.1: version "2.0.1" resolved "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz" integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== +"statuses@>= 1.4.0 < 2": + version "1.5.0" + resolved "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz" + integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA== + std-env@^3.0.1: version "3.3.3" resolved "https://registry.npmjs.org/std-env/-/std-env-3.3.3.tgz" @@ -11659,58 +11299,12 @@ strict-uri-encode@^1.0.0: resolved "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz" integrity sha512-R3f198pcvnB+5IpnBlRkphuE9n46WyVl8I39W/ZUTZLz4nqSP/oLYUrcnJrw462Ds8he4YKMov2efsTIw1BDGQ== -string_decoder@^1.1.1: - version "1.3.0" - resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz" - integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== - dependencies: - safe-buffer "~5.2.0" - -string_decoder@~1.1.1: - version "1.1.1" - resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz" - integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== - dependencies: - safe-buffer "~5.1.0" - -string_decoder@0.10: - version "0.10.31" - resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz" - integrity sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ== - string-template@~0.2.1: version "0.2.1" resolved "https://registry.npmjs.org/string-template/-/string-template-0.2.1.tgz" integrity sha512-Yptehjogou2xm4UJbxJ4CxgZx12HBfeystp0y3x7s4Dj32ltVVG1Gg8YhKjHZkHicuKpZX/ffilA8505VbUbpw== -"string-width@^1.0.2 || 2 || 3 || 4": - version "4.2.3" - resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - -string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.2: - version "4.2.3" - resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - -string-width@^4.2.0: - version "4.2.3" - resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - -string-width@^4.2.3: +"string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: version "4.2.3" resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -11755,6 +11349,25 @@ string.prototype.trimstart@^1.0.7: define-properties "^1.2.0" es-abstract "^1.22.1" +string_decoder@0.10: + version "0.10.31" + resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz" + integrity sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ== + +string_decoder@^1.1.1: + version "1.3.0" + resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz" + integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== + dependencies: + safe-buffer "~5.2.0" + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz" + integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== + dependencies: + safe-buffer "~5.1.0" + stringify-object@^3.3.0: version "3.3.0" resolved "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz" @@ -11764,6 +11377,13 @@ stringify-object@^3.3.0: is-obj "^1.0.1" is-regexp "^1.0.0" +strip-ansi@6.0.0: + version "6.0.0" + resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz" + integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w== + dependencies: + ansi-regex "^5.0.0" + strip-ansi@^3.0.0: version "3.0.1" resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz" @@ -11785,13 +11405,6 @@ strip-ansi@^7.0.1: dependencies: ansi-regex "^6.0.1" -strip-ansi@6.0.0: - version "6.0.0" - resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz" - integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w== - dependencies: - ansi-regex "^5.0.0" - strip-bom-string@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz" @@ -11855,7 +11468,7 @@ strnum@^1.0.5: resolved "https://registry.npmjs.org/strnum/-/strnum-1.0.5.tgz" integrity sha512-J8bbNyKKXl5qYcR36TIO8W3mVGVHrmmxsd5PAItGkmyzwJvybiw2IVq5nqd0i4LSNSkB/sx9VHllbfFdr9k1JA== -style-to-object@^0.3.0, style-to-object@0.3.0: +style-to-object@0.3.0, style-to-object@^0.3.0: version "0.3.0" resolved "https://registry.npmjs.org/style-to-object/-/style-to-object-0.3.0.tgz" integrity sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA== @@ -11915,26 +11528,7 @@ svg-parser@^2.0.4: resolved "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz" integrity sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ== -svgo@^1.0.0: - version "1.3.2" - resolved "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz" - integrity sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw== - dependencies: - chalk "^2.4.1" - coa "^2.0.2" - css-select "^2.0.0" - css-select-base-adapter "^0.1.1" - css-tree "1.0.0-alpha.37" - csso "^4.0.2" - js-yaml "^3.13.1" - mkdirp "~0.5.1" - object.values "^1.1.0" - sax "~1.2.4" - stable "^0.1.8" - unquote "~1.1.1" - util.promisify "~1.0.0" - -svgo@^1.3.2: +svgo@^1.0.0, svgo@^1.3.2: version "1.3.2" resolved "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz" integrity sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw== @@ -12070,16 +11664,11 @@ terser@^5.10.0, terser@^5.16.8: commander "^2.20.0" source-map-support "~0.5.20" -text-table@^0.2.0, text-table@0.2.0: +text-table@0.2.0, text-table@^0.2.0: version "0.2.0" resolved "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz" integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== -through@^2.3.8: - version "2.3.8" - resolved "https://registry.npmjs.org/through/-/through-2.3.8.tgz" - integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== - through2@^2.0.0: version "2.0.5" resolved "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz" @@ -12088,6 +11677,11 @@ through2@^2.0.0: readable-stream "~2.3.6" xtend "~4.0.1" +through@^2.3.8: + version "2.3.8" + resolved "https://registry.npmjs.org/through/-/through-2.3.8.tgz" + integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== + thunky@^1.0.2: version "1.1.0" resolved "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz" @@ -12350,11 +11944,6 @@ typedarray@^0.0.6: resolved "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz" integrity sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA== -"typescript@>= 2.7": - version "5.1.6" - resolved "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz" - integrity sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA== - ua-parser-js@^1.0.35: version "1.0.35" resolved "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.35.tgz" @@ -12409,10 +11998,10 @@ unicode-property-aliases-ecmascript@^2.0.0: resolved "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz" integrity sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w== -unified@^9.0.0, unified@^9.2.2: - version "9.2.2" - resolved "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz" - integrity sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ== +unified@9.2.0: + version "9.2.0" + resolved "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz" + integrity sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg== dependencies: bail "^1.0.0" extend "^3.0.0" @@ -12421,10 +12010,10 @@ unified@^9.0.0, unified@^9.2.2: trough "^1.0.0" vfile "^4.0.0" -unified@9.2.0: - version "9.2.0" - resolved "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz" - integrity sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg== +unified@^9.0.0, unified@^9.2.2: + version "9.2.2" + resolved "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz" + integrity sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ== dependencies: bail "^1.0.0" extend "^3.0.0" @@ -12460,7 +12049,7 @@ unique-string@^2.0.0: dependencies: crypto-random-string "^2.0.0" -unist-builder@^2.0.0, unist-builder@2.0.3: +unist-builder@2.0.3, unist-builder@^2.0.0: version "2.0.3" resolved "https://registry.npmjs.org/unist-builder/-/unist-builder-2.0.3.tgz" integrity sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw== @@ -12516,7 +12105,7 @@ unist-util-visit-parents@^3.0.0: "@types/unist" "^2.0.0" unist-util-is "^4.0.0" -unist-util-visit@^2.0.0, unist-util-visit@^2.0.3, unist-util-visit@2.0.3: +unist-util-visit@2.0.3, unist-util-visit@^2.0.0, unist-util-visit@^2.0.3: version "2.0.3" resolved "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz" integrity sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q== @@ -12530,7 +12119,7 @@ universalify@^2.0.0: resolved "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz" integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ== -unpipe@~1.0.0, unpipe@1.0.0: +unpipe@1.0.0, unpipe@~1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz" integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ== @@ -12689,12 +12278,7 @@ utils-merge@1.0.1: resolved "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz" integrity sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA== -uuid@^3.0.1: - version "3.4.0" - resolved "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz" - integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== - -uuid@^3.3.2: +uuid@^3.0.1, uuid@^3.3.2: version "3.4.0" resolved "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz" integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== @@ -12881,7 +12465,7 @@ webpack-sources@^3.2.2, webpack-sources@^3.2.3: resolved "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz" integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w== -"webpack@^4.0.0 || ^5.0.0", "webpack@^4.37.0 || ^5.0.0", webpack@^5.0.0, webpack@^5.1.0, webpack@^5.20.0, webpack@^5.73.0, "webpack@>= 4", webpack@>=2, "webpack@>=4.41.1 || 5.x", "webpack@3 || 4 || 5": +webpack@^5.73.0: version "5.88.2" resolved "https://registry.npmjs.org/webpack/-/webpack-5.88.2.tgz" integrity sha512-JmcgNZ1iKj+aiR0OvTYtWQqJwq37Pf683dY9bVORwVbUrDhLhdn/PlO2sHsFHPkj7sHNQF3JwaAkp49V+Sq1tQ== @@ -12921,7 +12505,7 @@ webpackbar@^5.0.2: pretty-time "^1.1.0" std-env "^3.0.1" -websocket-driver@^0.7.4, websocket-driver@>=0.5.1: +websocket-driver@>=0.5.1, websocket-driver@^0.7.4: version "0.7.4" resolved "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz" integrity sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg== @@ -12974,14 +12558,7 @@ which-typed-array@^1.1.11, which-typed-array@^1.1.13: gopd "^1.0.1" has-tostringtag "^1.0.0" -which@^1.2.9: - version "1.3.1" - resolved "https://registry.npmjs.org/which/-/which-1.3.1.tgz" - integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== - dependencies: - isexe "^2.0.0" - -which@^1.3.1: +which@^1.2.9, which@^1.3.1: version "1.3.1" resolved "https://registry.npmjs.org/which/-/which-1.3.1.tgz" integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== diff --git a/litellm/__init__.py b/litellm/__init__.py index b7aeeb210..36ce31cfa 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -10,7 +10,7 @@ success_callback: List[Union[str, Callable]] = [] failure_callback: List[Union[str, Callable]] = [] callbacks: List[Callable] = [] _async_input_callback: List[Callable] = [] # internal variable - async custom callbacks are routed here. -_async_success_callback: List[Callable] = [] # internal variable - async custom callbacks are routed here. +_async_success_callback: List[Union[str, Callable]] = [] # internal variable - async custom callbacks are routed here. _async_failure_callback: List[Callable] = [] # internal variable - async custom callbacks are routed here. pre_call_rules: List[Callable] = [] post_call_rules: List[Callable] = [] @@ -48,6 +48,8 @@ cache: Optional[Cache] = None # cache object <- use this - https://docs.litellm. model_alias_map: Dict[str, str] = {} model_group_alias_map: Dict[str, str] = {} max_budget: float = 0.0 # set the max budget across all providers +_openai_completion_params = ["functions", "function_call", "temperature", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "request_timeout", "api_base", "api_version", "api_key", "deployment_id", "organization", "base_url", "default_headers", "timeout", "response_format", "seed", "tools", "tool_choice", "max_retries"] +_litellm_completion_params = ["metadata", "acompletion", "caching", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "fallbacks", "azure", "headers", "model_list", "num_retries", "context_window_fallback_dict", "roles", "final_prompt_value", "bos_token", "eos_token", "request_timeout", "complete_response", "self", "client", "rpm", "tpm", "input_cost_per_token", "output_cost_per_token", "hf_model_name", "model_info", "proxy_server_request", "preset_cache_key"] _current_cost = 0 # private variable, used if max budget is set error_logs: Dict = {} add_function_to_prompt: bool = False # if function calling not supported by api, append function call details to system prompt @@ -56,6 +58,7 @@ aclient_session: Optional[httpx.AsyncClient] = None model_fallbacks: Optional[List] = None # Deprecated for 'litellm.fallbacks' model_cost_map_url: str = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json" suppress_debug_info = False +dynamodb_table_name: Optional[str] = None #### RELIABILITY #### request_timeout: Optional[float] = 6000 num_retries: Optional[int] = None @@ -107,6 +110,8 @@ open_ai_text_completion_models: List = [] cohere_models: List = [] anthropic_models: List = [] openrouter_models: List = [] +vertex_language_models: List = [] +vertex_vision_models: List = [] vertex_chat_models: List = [] vertex_code_chat_models: List = [] vertex_text_models: List = [] @@ -133,6 +138,10 @@ for key, value in model_cost.items(): vertex_text_models.append(key) elif value.get('litellm_provider') == 'vertex_ai-code-text-models': vertex_code_text_models.append(key) + elif value.get('litellm_provider') == 'vertex_ai-language-models': + vertex_language_models.append(key) + elif value.get('litellm_provider') == 'vertex_ai-vision-models': + vertex_vision_models.append(key) elif value.get('litellm_provider') == 'vertex_ai-chat-models': vertex_chat_models.append(key) elif value.get('litellm_provider') == 'vertex_ai-code-chat-models': @@ -154,7 +163,16 @@ for key, value in model_cost.items(): openai_compatible_endpoints: List = [ "api.perplexity.ai", "api.endpoints.anyscale.com/v1", - "api.deepinfra.com/v1/openai" + "api.deepinfra.com/v1/openai", + "api.mistral.ai/v1" +] + +# this is maintained for Exception Mapping +openai_compatible_providers: List = [ + "anyscale", + "mistral", + "deepinfra", + "perplexity" ] @@ -266,6 +284,7 @@ model_list = ( provider_list: List = [ "openai", "custom_openai", + "text-completion-openai", "cohere", "anthropic", "replicate", @@ -287,6 +306,7 @@ provider_list: List = [ "deepinfra", "perplexity", "anyscale", + "mistral", "maritalk", "custom", # custom apis ] @@ -396,6 +416,7 @@ from .exceptions import ( AuthenticationError, InvalidRequestError, BadRequestError, + NotFoundError, RateLimitError, ServiceUnavailableError, OpenAIError, @@ -404,7 +425,8 @@ from .exceptions import ( APIError, Timeout, APIConnectionError, - APIResponseValidationError + APIResponseValidationError, + UnprocessableEntityError ) from .budget_manager import BudgetManager from .proxy.proxy_cli import run_server diff --git a/litellm/caching.py b/litellm/caching.py index 1d993927b..73dde7cf9 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -10,19 +10,7 @@ import litellm import time, logging import json, traceback, ast -from typing import Optional - -def get_prompt(*args, **kwargs): - # make this safe checks, it should not throw any exceptions - if len(args) > 1: - messages = args[1] - prompt = " ".join(message["content"] for message in messages) - return prompt - if "messages" in kwargs: - messages = kwargs["messages"] - prompt = " ".join(message["content"] for message in messages) - return prompt - return None +from typing import Optional, Literal, List def print_verbose(print_statement): try: @@ -174,34 +162,36 @@ class DualCache(BaseCache): if self.redis_cache is not None: self.redis_cache.flush_cache() -#### LiteLLM.Completion Cache #### +#### LiteLLM.Completion / Embedding Cache #### class Cache: def __init__( self, - type="local", - host=None, - port=None, - password=None, + type: Optional[Literal["local", "redis"]] = "local", + host: Optional[str] = None, + port: Optional[str] = None, + password: Optional[str] = None, + supported_call_types: Optional[List[Literal["completion", "acompletion", "embedding", "aembedding"]]] = ["completion", "acompletion", "embedding", "aembedding"], **kwargs ): """ Initializes the cache based on the given type. Args: - type (str, optional): The type of cache to initialize. Defaults to "local". + type (str, optional): The type of cache to initialize. Can be "local" or "redis". Defaults to "local". host (str, optional): The host address for the Redis cache. Required if type is "redis". port (int, optional): The port number for the Redis cache. Required if type is "redis". password (str, optional): The password for the Redis cache. Required if type is "redis". + supported_call_types (list, optional): List of call types to cache for. Defaults to cache == on for all call types. **kwargs: Additional keyword arguments for redis.Redis() cache Raises: ValueError: If an invalid cache type is provided. Returns: - None + None. Cache is set as a litellm param """ if type == "redis": - self.cache = RedisCache(host, port, password, **kwargs) + self.cache: BaseCache = RedisCache(host, port, password, **kwargs) if type == "local": self.cache = InMemoryCache() if "cache" not in litellm.input_callback: @@ -210,6 +200,7 @@ class Cache: litellm.success_callback.append("cache") if "cache" not in litellm._async_success_callback: litellm._async_success_callback.append("cache") + self.supported_call_types = supported_call_types # default to ["completion", "acompletion", "embedding", "aembedding"] def get_cache_key(self, *args, **kwargs): """ @@ -222,29 +213,55 @@ class Cache: Returns: str: The cache key generated from the arguments, or None if no cache key could be generated. """ - cache_key ="" + cache_key = "" + print_verbose(f"\nGetting Cache key. Kwargs: {kwargs}") + + # for streaming, we use preset_cache_key. It's created in wrapper(), we do this because optional params like max_tokens, get transformed for bedrock -> max_new_tokens + if kwargs.get("litellm_params", {}).get("preset_cache_key", None) is not None: + print_verbose(f"\nReturning preset cache key: {cache_key}") + return kwargs.get("litellm_params", {}).get("preset_cache_key", None) + # sort kwargs by keys, since model: [gpt-4, temperature: 0.2, max_tokens: 200] == [temperature: 0.2, max_tokens: 200, model: gpt-4] completion_kwargs = ["model", "messages", "temperature", "top_p", "n", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", "seed", "tools", "tool_choice"] - for param in completion_kwargs: + embedding_only_kwargs = ["input", "encoding_format"] # embedding kwargs = model, input, user, encoding_format. Model, user are checked in completion_kwargs + + # combined_kwargs - NEEDS to be ordered across get_cache_key(). Do not use a set() + combined_kwargs = completion_kwargs + embedding_only_kwargs + for param in combined_kwargs: # ignore litellm params here if param in kwargs: # check if param == model and model_group is passed in, then override model with model_group if param == "model": model_group = None + caching_group = None metadata = kwargs.get("metadata", None) litellm_params = kwargs.get("litellm_params", {}) if metadata is not None: model_group = metadata.get("model_group") + model_group = metadata.get("model_group", None) + caching_groups = metadata.get("caching_groups", None) + if caching_groups: + for group in caching_groups: + if model_group in group: + caching_group = group + break if litellm_params is not None: metadata = litellm_params.get("metadata", None) if metadata is not None: model_group = metadata.get("model_group", None) - param_value = model_group or kwargs[param] # use model_group if it exists, else use kwargs["model"] + caching_groups = metadata.get("caching_groups", None) + if caching_groups: + for group in caching_groups: + if model_group in group: + caching_group = group + break + param_value = caching_group or model_group or kwargs[param] # use caching_group, if set then model_group if it exists, else use kwargs["model"] else: if kwargs[param] is None: continue # ignore None params param_value = kwargs[param] cache_key+= f"{str(param)}: {str(param_value)}" + print_verbose(f"\nCreated cache key: {cache_key}") return cache_key def generate_streaming_content(self, content): @@ -297,4 +314,9 @@ class Cache: result = result.model_dump_json() self.cache.set_cache(cache_key, result, **kwargs) except Exception as e: + print_verbose(f"LiteLLM Cache: Excepton add_cache: {str(e)}") + traceback.print_exc() pass + + async def _async_add_cache(self, result, *args, **kwargs): + self.add_cache(result, *args, **kwargs) \ No newline at end of file diff --git a/litellm/exceptions.py b/litellm/exceptions.py index ec0fe0049..ae714cfed 100644 --- a/litellm/exceptions.py +++ b/litellm/exceptions.py @@ -12,16 +12,19 @@ from openai import ( AuthenticationError, BadRequestError, + NotFoundError, RateLimitError, APIStatusError, OpenAIError, APIError, APITimeoutError, APIConnectionError, - APIResponseValidationError + APIResponseValidationError, + UnprocessableEntityError ) import httpx + class AuthenticationError(AuthenticationError): # type: ignore def __init__(self, message, llm_provider, model, response: httpx.Response): self.status_code = 401 @@ -34,6 +37,20 @@ class AuthenticationError(AuthenticationError): # type: ignore body=None ) # Call the base class constructor with the parameters it needs +# raise when invalid models passed, example gpt-8 +class NotFoundError(NotFoundError): # type: ignore + def __init__(self, message, model, llm_provider, response: httpx.Response): + self.status_code = 404 + self.message = message + self.model = model + self.llm_provider = llm_provider + super().__init__( + self.message, + response=response, + body=None + ) # Call the base class constructor with the parameters it needs + + class BadRequestError(BadRequestError): # type: ignore def __init__(self, message, model, llm_provider, response: httpx.Response): self.status_code = 400 @@ -46,6 +63,18 @@ class BadRequestError(BadRequestError): # type: ignore body=None ) # Call the base class constructor with the parameters it needs +class UnprocessableEntityError(UnprocessableEntityError): # type: ignore + def __init__(self, message, model, llm_provider, response: httpx.Response): + self.status_code = 422 + self.message = message + self.model = model + self.llm_provider = llm_provider + super().__init__( + self.message, + response=response, + body=None + ) # Call the base class constructor with the parameters it needs + class Timeout(APITimeoutError): # type: ignore def __init__(self, message, model, llm_provider): self.status_code = 408 diff --git a/litellm/integrations/custom_logger.py b/litellm/integrations/custom_logger.py index d0efc2fb0..64c5fc4cf 100644 --- a/litellm/integrations/custom_logger.py +++ b/litellm/integrations/custom_logger.py @@ -2,8 +2,9 @@ # On success, logs events to Promptlayer import dotenv, os import requests -import requests - +from litellm.proxy._types import UserAPIKeyAuth +from litellm.caching import DualCache +from typing import Literal dotenv.load_dotenv() # Loading env variables using dotenv import traceback @@ -27,7 +28,12 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callback def log_failure_event(self, kwargs, response_obj, start_time, end_time): pass + + #### ASYNC #### + async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time): + pass + async def async_log_pre_api_call(self, model, messages, kwargs): pass @@ -37,6 +43,16 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callback async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): pass + #### CALL HOOKS - proxy only #### + """ + Control the modify incoming / outgoung data before calling the model + """ + async def async_pre_call_hook(self, user_api_key_dict: UserAPIKeyAuth, cache: DualCache, data: dict, call_type: Literal["completion", "embeddings"]): + pass + + async def async_post_call_failure_hook(self, original_exception: Exception, user_api_key_dict: UserAPIKeyAuth): + pass + #### SINGLE-USE #### - https://docs.litellm.ai/docs/observability/custom_callback#using-your-custom-callback-function def log_input_event(self, model, messages, kwargs, print_verbose, callback_func): diff --git a/litellm/integrations/dynamodb.py b/litellm/integrations/dynamodb.py new file mode 100644 index 000000000..c025a0edc --- /dev/null +++ b/litellm/integrations/dynamodb.py @@ -0,0 +1,82 @@ +#### What this does #### +# On success + failure, log events to Supabase + +import dotenv, os +import requests + +dotenv.load_dotenv() # Loading env variables using dotenv +import traceback +import datetime, subprocess, sys +import litellm, uuid +from litellm._logging import print_verbose + +class DyanmoDBLogger: + # Class variables or attributes + + def __init__(self): + # Instance variables + import boto3 + self.dynamodb = boto3.resource('dynamodb', region_name=os.environ["AWS_REGION_NAME"]) + if litellm.dynamodb_table_name is None: + raise ValueError("LiteLLM Error, trying to use DynamoDB but not table name passed. Create a table and set `litellm.dynamodb_table_name=`") + self.table_name = litellm.dynamodb_table_name + + async def _async_log_event(self, kwargs, response_obj, start_time, end_time, print_verbose): + self.log_event(kwargs, response_obj, start_time, end_time, print_verbose) + def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose): + try: + print_verbose( + f"DynamoDB Logging - Enters logging function for model {kwargs}" + ) + + # construct payload to send to DynamoDB + # follows the same params as langfuse.py + litellm_params = kwargs.get("litellm_params", {}) + metadata = litellm_params.get("metadata", {}) or {} # if litellm_params['metadata'] == None + messages = kwargs.get("messages") + optional_params = kwargs.get("optional_params", {}) + call_type = kwargs.get("call_type", "litellm.completion") + usage = response_obj["usage"] + id = response_obj.get("id", str(uuid.uuid4())) + + # Build the initial payload + payload = { + "id": id, + "call_type": call_type, + "startTime": start_time, + "endTime": end_time, + "model": kwargs.get("model", ""), + "user": kwargs.get("user", ""), + "modelParameters": optional_params, + "messages": messages, + "response": response_obj, + "usage": usage, + "metadata": metadata + } + + # Ensure everything in the payload is converted to str + for key, value in payload.items(): + try: + payload[key] = str(value) + except: + # non blocking if it can't cast to a str + pass + + + print_verbose(f"\nDynamoDB Logger - Logging payload = {payload}") + + # put data in dyanmo DB + table = self.dynamodb.Table(self.table_name) + # Assuming log_data is a dictionary with log information + response = table.put_item(Item=payload) + + print_verbose(f"Response from DynamoDB:{str(response)}") + + print_verbose( + f"DynamoDB Layer Logging - final response object: {response_obj}" + ) + return response + except: + traceback.print_exc() + print_verbose(f"DynamoDB Layer Error - {traceback.format_exc()}") + pass \ No newline at end of file diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index ea5276db2..ea32fa9d1 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -58,7 +58,7 @@ class LangFuseLogger: model=kwargs['model'], modelParameters=optional_params, prompt=prompt, - completion=response_obj['choices'][0]['message'], + completion=response_obj['choices'][0]['message'].json(), usage=Usage( prompt_tokens=response_obj['usage']['prompt_tokens'], completion_tokens=response_obj['usage']['completion_tokens'] @@ -70,6 +70,9 @@ class LangFuseLogger: f"Langfuse Layer Logging - final response object: {response_obj}" ) except: - # traceback.print_exc() + traceback.print_exc() print_verbose(f"Langfuse Layer Error - {traceback.format_exc()}") pass + + async def _async_log_event(self, kwargs, response_obj, start_time, end_time, print_verbose): + self.log_event(kwargs, response_obj, start_time, end_time, print_verbose) diff --git a/litellm/integrations/langsmith.py b/litellm/integrations/langsmith.py index 43e9de4d3..de9dd2f71 100644 --- a/litellm/integrations/langsmith.py +++ b/litellm/integrations/langsmith.py @@ -58,7 +58,7 @@ class LangsmithLogger: "inputs": { **new_kwargs }, - "outputs": response_obj, + "outputs": response_obj.json(), "session_name": project_name, "start_time": start_time, "end_time": end_time, diff --git a/litellm/integrations/traceloop.py b/litellm/integrations/traceloop.py index be53de0e9..33b9c92da 100644 --- a/litellm/integrations/traceloop.py +++ b/litellm/integrations/traceloop.py @@ -1,7 +1,8 @@ class TraceloopLogger: def __init__(self): from traceloop.sdk.tracing.tracing import TracerWrapper - + from traceloop.sdk import Traceloop + Traceloop.init(app_name="Litellm-Server", disable_batch=True) self.tracer_wrapper = TracerWrapper() def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose): diff --git a/litellm/llms/azure.py b/litellm/llms/azure.py index f760e9fce..b014667df 100644 --- a/litellm/llms/azure.py +++ b/litellm/llms/azure.py @@ -196,8 +196,19 @@ class AzureChatCompletion(BaseLLM): else: azure_client = client response = azure_client.chat.completions.create(**data) # type: ignore - response.model = "azure/" + str(response.model) - return convert_to_model_response_object(response_object=json.loads(response.model_dump_json()), model_response_object=model_response) + stringified_response = response.model_dump_json() + ## LOGGING + logging_obj.post_call( + input=messages, + api_key=api_key, + original_response=stringified_response, + additional_args={ + "headers": headers, + "api_version": api_version, + "api_base": api_base, + }, + ) + return convert_to_model_response_object(response_object=json.loads(stringified_response), model_response_object=model_response) except AzureOpenAIError as e: exception_mapping_worked = True raise e @@ -318,7 +329,10 @@ class AzureChatCompletion(BaseLLM): data: dict, model_response: ModelResponse, azure_client_params: dict, + api_key: str, + input: list, client=None, + logging_obj=None ): response = None try: @@ -327,8 +341,23 @@ class AzureChatCompletion(BaseLLM): else: openai_aclient = client response = await openai_aclient.embeddings.create(**data) - return convert_to_model_response_object(response_object=json.loads(response.model_dump_json()), model_response_object=model_response, response_type="embedding") + stringified_response = response.model_dump_json() + ## LOGGING + logging_obj.post_call( + input=input, + api_key=api_key, + additional_args={"complete_input_dict": data}, + original_response=stringified_response, + ) + return convert_to_model_response_object(response_object=json.loads(stringified_response), model_response_object=model_response, response_type="embedding") except Exception as e: + ## LOGGING + logging_obj.post_call( + input=input, + api_key=api_key, + additional_args={"complete_input_dict": data}, + original_response=str(e), + ) raise e def embedding(self, @@ -372,13 +401,7 @@ class AzureChatCompletion(BaseLLM): azure_client_params["api_key"] = api_key elif azure_ad_token is not None: azure_client_params["azure_ad_token"] = azure_ad_token - if aembedding == True: - response = self.aembedding(data=data, model_response=model_response, azure_client_params=azure_client_params) - return response - if client is None: - azure_client = AzureOpenAI(**azure_client_params) # type: ignore - else: - azure_client = client + ## LOGGING logging_obj.pre_call( input=input, @@ -391,6 +414,14 @@ class AzureChatCompletion(BaseLLM): } }, ) + + if aembedding == True: + response = self.aembedding(data=data, input=input, logging_obj=logging_obj, api_key=api_key, model_response=model_response, azure_client_params=azure_client_params) + return response + if client is None: + azure_client = AzureOpenAI(**azure_client_params) # type: ignore + else: + azure_client = client ## COMPLETION CALL response = azure_client.embeddings.create(**data) # type: ignore ## LOGGING diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index 9b6510400..616b9264e 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -482,7 +482,7 @@ def completion( logging_obj.post_call( input=prompt, api_key="", - original_response=response_body, + original_response=json.dumps(response_body), additional_args={"complete_input_dict": data}, ) print_verbose(f"raw model_response: {response}") @@ -552,6 +552,7 @@ def _embedding_func_single( ## FORMAT EMBEDDING INPUT ## provider = model.split(".")[0] inference_params = copy.deepcopy(optional_params) + inference_params.pop("user", None) # make sure user is not passed in for bedrock call if provider == "amazon": input = input.replace(os.linesep, " ") data = {"inputText": input, **inference_params} @@ -587,7 +588,7 @@ def _embedding_func_single( input=input, api_key="", additional_args={"complete_input_dict": data}, - original_response=response_body, + original_response=json.dumps(response_body), ) if provider == "cohere": response = response_body.get("embeddings") @@ -650,14 +651,5 @@ def embedding( total_tokens=input_tokens + 0 ) model_response.usage = usage - - ## LOGGING - logging_obj.post_call( - input=input, - api_key=api_key, - additional_args={"complete_input_dict": {"model": model, - "texts": input}}, - original_response=embeddings, - ) return model_response diff --git a/litellm/llms/huggingface_restapi.py b/litellm/llms/huggingface_restapi.py index c347910f8..4dae6e88f 100644 --- a/litellm/llms/huggingface_restapi.py +++ b/litellm/llms/huggingface_restapi.py @@ -542,7 +542,7 @@ class Huggingface(BaseLLM): logging_obj.pre_call( input=input, api_key=api_key, - additional_args={"complete_input_dict": data}, + additional_args={"complete_input_dict": data, "headers": headers, "api_base": embed_url}, ) ## COMPLETION CALL response = requests.post( @@ -584,6 +584,14 @@ class Huggingface(BaseLLM): "embedding": embedding # flatten list returned from hf } ) + elif isinstance(embedding, list) and isinstance(embedding[0], float): + output_data.append( + { + "object": "embedding", + "index": idx, + "embedding": embedding # flatten list returned from hf + } + ) else: output_data.append( { diff --git a/litellm/llms/ollama.py b/litellm/llms/ollama.py index a24e47c07..30aaa5381 100644 --- a/litellm/llms/ollama.py +++ b/litellm/llms/ollama.py @@ -1,10 +1,9 @@ -import requests, types +import requests, types, time import json import traceback from typing import Optional import litellm -import httpx - +import httpx, aiohttp, asyncio try: from async_generator import async_generator, yield_ # optional dependency async_generator_imported = True @@ -115,6 +114,9 @@ def get_ollama_response_stream( prompt="Why is the sky blue?", optional_params=None, logging_obj=None, + acompletion: bool = False, + model_response=None, + encoding=None ): if api_base.endswith("/api/generate"): url = api_base @@ -136,8 +138,19 @@ def get_ollama_response_stream( logging_obj.pre_call( input=None, api_key=None, - additional_args={"api_base": url, "complete_input_dict": data}, + additional_args={"api_base": url, "complete_input_dict": data, "headers": {}, "acompletion": acompletion,}, ) + if acompletion is True: + if optional_params.get("stream", False): + response = ollama_async_streaming(url=url, data=data, model_response=model_response, encoding=encoding, logging_obj=logging_obj) + else: + response = ollama_acompletion(url=url, data=data, model_response=model_response, encoding=encoding, logging_obj=logging_obj) + return response + + else: + return ollama_completion_stream(url=url, data=data) + +def ollama_completion_stream(url, data): session = requests.Session() with session.post(url, json=data, stream=True) as resp: @@ -169,41 +182,38 @@ def get_ollama_response_stream( traceback.print_exc() session.close() -if async_generator_imported: - # ollama implementation - @async_generator - async def async_get_ollama_response_stream( - api_base="http://localhost:11434", - model="llama2", - prompt="Why is the sky blue?", - optional_params=None, - logging_obj=None, - ): - url = f"{api_base}/api/generate" - - ## Load Config - config=litellm.OllamaConfig.get_config() - for k, v in config.items(): - if k not in optional_params: # completion(top_k=3) > cohere_config(top_k=3) <- allows for dynamic variables to be passed in - optional_params[k] = v - data = { - "model": model, - "prompt": prompt, - **optional_params - } - ## LOGGING - logging_obj.pre_call( - input=None, - api_key=None, - additional_args={"api_base": url, "complete_input_dict": data}, - ) - session = requests.Session() +async def ollama_async_streaming(url, data, model_response, encoding, logging_obj): + try: + client = httpx.AsyncClient() + async with client.stream( + url=f"{url}", + json=data, + method="POST", + timeout=litellm.request_timeout + ) as response: + if response.status_code != 200: + raise OllamaError(status_code=response.status_code, message=response.text) + + streamwrapper = litellm.CustomStreamWrapper(completion_stream=response.aiter_lines(), model=data['model'], custom_llm_provider="ollama",logging_obj=logging_obj) + async for transformed_chunk in streamwrapper: + yield transformed_chunk + except Exception as e: + traceback.print_exc() - with session.post(url, json=data, stream=True) as resp: - if resp.status_code != 200: - raise OllamaError(status_code=resp.status_code, message=resp.text) - for line in resp.iter_lines(): +async def ollama_acompletion(url, data, model_response, encoding, logging_obj): + data["stream"] = False + try: + timeout = aiohttp.ClientTimeout(total=600) # 10 minutes + async with aiohttp.ClientSession(timeout=timeout) as session: + resp = await session.post(url, json=data) + + if resp.status != 200: + text = await resp.text() + raise OllamaError(status_code=resp.status, message=text) + + completion_string = "" + async for line in resp.content.iter_any(): if line: try: json_chunk = line.decode("utf-8") @@ -217,15 +227,24 @@ if async_generator_imported: "content": "", "error": j } - await yield_({"choices": [{"delta": completion_obj}]}) + raise Exception(f"OllamError - {chunk}") if "response" in j: completion_obj = { "role": "assistant", - "content": "", + "content": j["response"], } - completion_obj["content"] = j["response"] - await yield_({"choices": [{"delta": completion_obj}]}) + completion_string = completion_string + completion_obj["content"] except Exception as e: - import logging - logging.debug(f"Error decoding JSON: {e}") - session.close() \ No newline at end of file + traceback.print_exc() + + ## RESPONSE OBJECT + model_response["choices"][0]["finish_reason"] = "stop" + model_response["choices"][0]["message"]["content"] = completion_string + model_response["created"] = int(time.time()) + model_response["model"] = "ollama/" + data['model'] + prompt_tokens = len(encoding.encode(data['prompt'])) # type: ignore + completion_tokens = len(encoding.encode(completion_string)) + model_response["usage"] = litellm.Usage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=prompt_tokens + completion_tokens) + return model_response + except Exception as e: + traceback.print_exc() diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 29934d130..4fdecce10 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -195,23 +195,23 @@ class OpenAIChatCompletion(BaseLLM): **optional_params } - ## LOGGING - logging_obj.pre_call( - input=messages, - api_key=api_key, - additional_args={"headers": headers, "api_base": api_base, "acompletion": acompletion, "complete_input_dict": data}, - ) - try: max_retries = data.pop("max_retries", 2) if acompletion is True: if optional_params.get("stream", False): - return self.async_streaming(logging_obj=logging_obj, data=data, model=model, api_base=api_base, api_key=api_key, timeout=timeout, client=client, max_retries=max_retries) + return self.async_streaming(logging_obj=logging_obj, headers=headers, data=data, model=model, api_base=api_base, api_key=api_key, timeout=timeout, client=client, max_retries=max_retries) else: - return self.acompletion(data=data, model_response=model_response, api_base=api_base, api_key=api_key, timeout=timeout, client=client, max_retries=max_retries) + return self.acompletion(data=data, headers=headers, logging_obj=logging_obj, model_response=model_response, api_base=api_base, api_key=api_key, timeout=timeout, client=client, max_retries=max_retries) elif optional_params.get("stream", False): - return self.streaming(logging_obj=logging_obj, data=data, model=model, api_base=api_base, api_key=api_key, timeout=timeout, client=client, max_retries=max_retries) + return self.streaming(logging_obj=logging_obj, headers=headers, data=data, model=model, api_base=api_base, api_key=api_key, timeout=timeout, client=client, max_retries=max_retries) else: + ## LOGGING + logging_obj.pre_call( + input=messages, + api_key=api_key, + additional_args={"headers": headers, "api_base": api_base, "acompletion": acompletion, "complete_input_dict": data}, + ) + if not isinstance(max_retries, int): raise OpenAIError(status_code=422, message="max retries must be an int") if client is None: @@ -219,13 +219,14 @@ class OpenAIChatCompletion(BaseLLM): else: openai_client = client response = openai_client.chat.completions.create(**data) # type: ignore + stringified_response = response.model_dump_json() logging_obj.post_call( - input=None, + input=messages, api_key=api_key, - original_response=response, + original_response=stringified_response, additional_args={"complete_input_dict": data}, ) - return convert_to_model_response_object(response_object=json.loads(response.model_dump_json()), model_response_object=model_response) + return convert_to_model_response_object(response_object=json.loads(stringified_response), model_response_object=model_response) except Exception as e: if "Conversation roles must alternate user/assistant" in str(e) or "user and assistant roles should be alternating" in str(e): # reformat messages to ensure user/assistant are alternating, if there's either 2 consecutive 'user' messages or 2 consecutive 'assistant' message, add a blank 'user' or 'assistant' message to ensure compatibility @@ -259,6 +260,8 @@ class OpenAIChatCompletion(BaseLLM): api_base: Optional[str]=None, client=None, max_retries=None, + logging_obj=None, + headers=None ): response = None try: @@ -266,16 +269,23 @@ class OpenAIChatCompletion(BaseLLM): openai_aclient = AsyncOpenAI(api_key=api_key, base_url=api_base, http_client=litellm.aclient_session, timeout=timeout, max_retries=max_retries) else: openai_aclient = client + ## LOGGING + logging_obj.pre_call( + input=data['messages'], + api_key=openai_aclient.api_key, + additional_args={"headers": {"Authorization": f"Bearer {openai_aclient.api_key}"}, "api_base": openai_aclient._base_url._uri_reference, "acompletion": True, "complete_input_dict": data}, + ) response = await openai_aclient.chat.completions.create(**data) - return convert_to_model_response_object(response_object=json.loads(response.model_dump_json()), model_response_object=model_response) + stringified_response = response.model_dump_json() + logging_obj.post_call( + input=data['messages'], + api_key=api_key, + original_response=stringified_response, + additional_args={"complete_input_dict": data}, + ) + return convert_to_model_response_object(response_object=json.loads(stringified_response), model_response_object=model_response) except Exception as e: - if response and hasattr(response, "text"): - raise OpenAIError(status_code=500, message=f"{str(e)}\n\nOriginal Response: {response.text}") - else: - if type(e).__name__ == "ReadTimeout": - raise OpenAIError(status_code=408, message=f"{type(e).__name__}") - else: - raise OpenAIError(status_code=500, message=f"{str(e)}") + raise e def streaming(self, logging_obj, @@ -285,12 +295,19 @@ class OpenAIChatCompletion(BaseLLM): api_key: Optional[str]=None, api_base: Optional[str]=None, client = None, - max_retries=None + max_retries=None, + headers=None ): if client is None: openai_client = OpenAI(api_key=api_key, base_url=api_base, http_client=litellm.client_session, timeout=timeout, max_retries=max_retries) else: openai_client = client + ## LOGGING + logging_obj.pre_call( + input=data['messages'], + api_key=api_key, + additional_args={"headers": headers, "api_base": api_base, "acompletion": False, "complete_input_dict": data}, + ) response = openai_client.chat.completions.create(**data) streamwrapper = CustomStreamWrapper(completion_stream=response, model=model, custom_llm_provider="openai",logging_obj=logging_obj) return streamwrapper @@ -304,6 +321,7 @@ class OpenAIChatCompletion(BaseLLM): api_base: Optional[str]=None, client=None, max_retries=None, + headers=None ): response = None try: @@ -311,6 +329,13 @@ class OpenAIChatCompletion(BaseLLM): openai_aclient = AsyncOpenAI(api_key=api_key, base_url=api_base, http_client=litellm.aclient_session, timeout=timeout, max_retries=max_retries) else: openai_aclient = client + ## LOGGING + logging_obj.pre_call( + input=data['messages'], + api_key=api_key, + additional_args={"headers": headers, "api_base": api_base, "acompletion": True, "complete_input_dict": data}, + ) + response = await openai_aclient.chat.completions.create(**data) streamwrapper = CustomStreamWrapper(completion_stream=response, model=model, custom_llm_provider="openai",logging_obj=logging_obj) async for transformed_chunk in streamwrapper: @@ -325,6 +350,7 @@ class OpenAIChatCompletion(BaseLLM): raise OpenAIError(status_code=500, message=f"{str(e)}") async def aembedding( self, + input: list, data: dict, model_response: ModelResponse, timeout: float, @@ -332,6 +358,7 @@ class OpenAIChatCompletion(BaseLLM): api_base: Optional[str]=None, client=None, max_retries=None, + logging_obj=None ): response = None try: @@ -340,9 +367,24 @@ class OpenAIChatCompletion(BaseLLM): else: openai_aclient = client response = await openai_aclient.embeddings.create(**data) # type: ignore - return response + stringified_response = response.model_dump_json() + ## LOGGING + logging_obj.post_call( + input=input, + api_key=api_key, + additional_args={"complete_input_dict": data}, + original_response=stringified_response, + ) + return convert_to_model_response_object(response_object=json.loads(stringified_response), model_response_object=model_response, response_type="embedding") # type: ignore except Exception as e: + ## LOGGING + logging_obj.post_call( + input=input, + api_key=api_key, + original_response=str(e), + ) raise e + def embedding(self, model: str, input: list, @@ -367,13 +409,6 @@ class OpenAIChatCompletion(BaseLLM): max_retries = data.pop("max_retries", 2) if not isinstance(max_retries, int): raise OpenAIError(status_code=422, message="max retries must be an int") - if aembedding == True: - response = self.aembedding(data=data, model_response=model_response, api_base=api_base, api_key=api_key, timeout=timeout, client=client, max_retries=max_retries) # type: ignore - return response - if client is None: - openai_client = OpenAI(api_key=api_key, base_url=api_base, http_client=litellm.client_session, timeout=timeout, max_retries=max_retries) - else: - openai_client = client ## LOGGING logging_obj.pre_call( input=input, @@ -381,6 +416,14 @@ class OpenAIChatCompletion(BaseLLM): additional_args={"complete_input_dict": data, "api_base": api_base}, ) + if aembedding == True: + response = self.aembedding(data=data, input=input, logging_obj=logging_obj, model_response=model_response, api_base=api_base, api_key=api_key, timeout=timeout, client=client, max_retries=max_retries) # type: ignore + return response + if client is None: + openai_client = OpenAI(api_key=api_key, base_url=api_base, http_client=litellm.client_session, timeout=timeout, max_retries=max_retries) + else: + openai_client = client + ## COMPLETION CALL response = openai_client.embeddings.create(**data) # type: ignore ## LOGGING @@ -472,12 +515,14 @@ class OpenAITextCompletion(BaseLLM): else: prompt = " ".join([message["content"] for message in messages]) # type: ignore + # don't send max retries to the api, if set + optional_params.pop("max_retries", None) + data = { "model": model, "prompt": prompt, **optional_params } - ## LOGGING logging_obj.pre_call( input=messages, diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 37c96d773..4596e2b62 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -73,8 +73,27 @@ def ollama_pt(model, messages): # https://github.com/jmorganca/ollama/blob/af4cf final_prompt_value="### Response:", messages=messages ) + elif "llava" in model: + prompt = "" + images = [] + for message in messages: + if isinstance(message["content"], str): + prompt += message["content"] + elif isinstance(message["content"], list): + # see https://docs.litellm.ai/docs/providers/openai#openai-vision-models + for element in message["content"]: + if isinstance(element, dict): + if element["type"] == "text": + prompt += element["text"] + elif element["type"] == "image_url": + image_url = element["image_url"]["url"] + images.append(image_url) + return { + "prompt": prompt, + "images": images + } else: - prompt = "".join(m["content"] for m in messages) + prompt = "".join(m["content"] if isinstance(m['content'], str) is str else "".join(m['content']) for m in messages) return prompt def mistral_instruct_pt(messages): @@ -161,6 +180,8 @@ def phind_codellama_pt(messages): def hf_chat_template(model: str, messages: list, chat_template: Optional[Any]=None): ## get the tokenizer config from huggingface + bos_token = "" + eos_token = "" if chat_template is None: def _get_tokenizer_config(hf_model_name): url = f"https://huggingface.co/{hf_model_name}/raw/main/tokenizer_config.json" @@ -187,7 +208,10 @@ def hf_chat_template(model: str, messages: list, chat_template: Optional[Any]=No # Create a template object from the template text env = Environment() env.globals['raise_exception'] = raise_exception - template = env.from_string(chat_template) + try: + template = env.from_string(chat_template) + except Exception as e: + raise e def _is_system_in_template(): try: @@ -227,8 +251,8 @@ def hf_chat_template(model: str, messages: list, chat_template: Optional[Any]=No new_messages.append(reformatted_messages[-1]) rendered_text = template.render(bos_token=bos_token, eos_token=eos_token, messages=new_messages) return rendered_text - except: - raise Exception("Error rendering template") + except Exception as e: + raise Exception(f"Error rendering template - {str(e)}") # Anthropic template def claude_2_1_pt(messages: list): # format - https://docs.anthropic.com/claude/docs/how-to-use-system-prompts @@ -266,20 +290,26 @@ def claude_2_1_pt(messages: list): # format - https://docs.anthropic.com/claude/ ### TOGETHER AI def get_model_info(token, model): - headers = { - 'Authorization': f'Bearer {token}' - } - response = requests.get('https://api.together.xyz/models/info', headers=headers) - if response.status_code == 200: - model_info = response.json() - for m in model_info: - if m["name"].lower().strip() == model.strip(): - return m['config'].get('prompt_format', None), m['config'].get('chat_template', None) - return None, None - else: + try: + headers = { + 'Authorization': f'Bearer {token}' + } + response = requests.get('https://api.together.xyz/models/info', headers=headers) + if response.status_code == 200: + model_info = response.json() + for m in model_info: + if m["name"].lower().strip() == model.strip(): + return m['config'].get('prompt_format', None), m['config'].get('chat_template', None) + return None, None + else: + return None, None + except Exception as e: # safely fail a prompt template request return None, None def format_prompt_togetherai(messages, prompt_format, chat_template): + if prompt_format is None: + return default_pt(messages) + human_prompt, assistant_prompt = prompt_format.split('{prompt}') if chat_template is not None: @@ -397,4 +427,4 @@ def prompt_factory(model: str, messages: list, custom_llm_provider: Optional[str return hf_chat_template(original_model_name, messages) except: return default_pt(messages=messages) # default that covers Bloom, T-5, any non-chat tuned model (e.g. base Llama2) - \ No newline at end of file + diff --git a/litellm/llms/replicate.py b/litellm/llms/replicate.py index a4ecbdc1c..b952193e6 100644 --- a/litellm/llms/replicate.py +++ b/litellm/llms/replicate.py @@ -232,7 +232,8 @@ def completion( if system_prompt is not None: input_data = { "prompt": prompt, - "system_prompt": system_prompt + "system_prompt": system_prompt, + **optional_params } # Otherwise, use the prompt as is else: diff --git a/litellm/llms/sagemaker.py b/litellm/llms/sagemaker.py index 96d06bc20..2bfa9f82a 100644 --- a/litellm/llms/sagemaker.py +++ b/litellm/llms/sagemaker.py @@ -158,6 +158,7 @@ def completion( ) except Exception as e: raise SagemakerError(status_code=500, message=f"{str(e)}") + response = response["Body"].read().decode("utf8") ## LOGGING logging_obj.post_call( @@ -171,10 +172,17 @@ def completion( completion_response = json.loads(response) try: completion_response_choices = completion_response[0] + completion_output = "" if "generation" in completion_response_choices: - model_response["choices"][0]["message"]["content"] = completion_response_choices["generation"] + completion_output += completion_response_choices["generation"] elif "generated_text" in completion_response_choices: - model_response["choices"][0]["message"]["content"] = completion_response_choices["generated_text"] + completion_output += completion_response_choices["generated_text"] + + # check if the prompt template is part of output, if so - filter it out + if completion_output.startswith(prompt) and "" in prompt: + completion_output = completion_output.replace(prompt, "", 1) + + model_response["choices"][0]["message"]["content"] = completion_output except: raise SagemakerError(message=f"LiteLLM Error: Unable to parse sagemaker RAW RESPONSE {json.dumps(completion_response)}", status_code=500) diff --git a/litellm/llms/together_ai.py b/litellm/llms/together_ai.py index 210ed497e..540dbe202 100644 --- a/litellm/llms/together_ai.py +++ b/litellm/llms/together_ai.py @@ -173,10 +173,11 @@ def completion( message=json.dumps(completion_response["output"]), status_code=response.status_code ) - if len(completion_response["output"]["choices"][0]["text"]) > 0: + if len(completion_response["output"]["choices"][0]["text"]) >= 0: model_response["choices"][0]["message"]["content"] = completion_response["output"]["choices"][0]["text"] ## CALCULATING USAGE + print_verbose(f"CALCULATING TOGETHERAI TOKEN USAGE. Model Response: {model_response}; model_response['choices'][0]['message'].get('content', ''): {model_response['choices'][0]['message'].get('content', None)}") prompt_tokens = len(encoding.encode(prompt)) completion_tokens = len( encoding.encode(model_response["choices"][0]["message"].get("content", "")) diff --git a/litellm/llms/vertex_ai.py b/litellm/llms/vertex_ai.py index 11a1e0c6e..5457ee40d 100644 --- a/litellm/llms/vertex_ai.py +++ b/litellm/llms/vertex_ai.py @@ -4,7 +4,7 @@ from enum import Enum import requests import time from typing import Callable, Optional -from litellm.utils import ModelResponse, Usage +from litellm.utils import ModelResponse, Usage, CustomStreamWrapper import litellm import httpx @@ -57,6 +57,108 @@ class VertexAIConfig(): and not isinstance(v, (types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod)) and v is not None} +def _get_image_bytes_from_url(image_url: str) -> bytes: + try: + response = requests.get(image_url) + response.raise_for_status() # Raise an error for bad responses (4xx and 5xx) + image_bytes = response.content + return image_bytes + except requests.exceptions.RequestException as e: + # Handle any request exceptions (e.g., connection error, timeout) + return b'' # Return an empty bytes object or handle the error as needed + + +def _load_image_from_url(image_url: str): + """ + Loads an image from a URL. + + Args: + image_url (str): The URL of the image. + + Returns: + Image: The loaded image. + """ + from vertexai.preview.generative_models import GenerativeModel, Part, GenerationConfig, Image + image_bytes = _get_image_bytes_from_url(image_url) + return Image.from_bytes(image_bytes) + +def _gemini_vision_convert_messages( + messages: list +): + """ + Converts given messages for GPT-4 Vision to Gemini format. + + Args: + messages (list): The messages to convert. Each message can be a dictionary with a "content" key. The content can be a string or a list of elements. If it is a string, it will be concatenated to the prompt. If it is a list, each element will be processed based on its type: + - If the element is a dictionary with a "type" key equal to "text", its "text" value will be concatenated to the prompt. + - If the element is a dictionary with a "type" key equal to "image_url", its "image_url" value will be added to the list of images. + + Returns: + tuple: A tuple containing the prompt (a string) and the processed images (a list of objects representing the images). + + Raises: + VertexAIError: If the import of the 'vertexai' module fails, indicating that 'google-cloud-aiplatform' needs to be installed. + Exception: If any other exception occurs during the execution of the function. + + Note: + This function is based on the code from the 'gemini/getting-started/intro_gemini_python.ipynb' notebook in the 'generative-ai' repository on GitHub. + The supported MIME types for images include 'image/png' and 'image/jpeg'. + + Examples: + >>> messages = [ + ... {"content": "Hello, world!"}, + ... {"content": [{"type": "text", "text": "This is a text message."}, {"type": "image_url", "image_url": "example.com/image.png"}]}, + ... ] + >>> _gemini_vision_convert_messages(messages) + ('Hello, world!This is a text message.', [, ]) + """ + try: + import vertexai + except: + raise VertexAIError(status_code=400,message="vertexai import failed please run `pip install google-cloud-aiplatform`") + try: + from vertexai.preview.language_models import ChatModel, CodeChatModel, InputOutputTextPair + from vertexai.language_models import TextGenerationModel, CodeGenerationModel + from vertexai.preview.generative_models import GenerativeModel, Part, GenerationConfig, Image + + # given messages for gpt-4 vision, convert them for gemini + # https://github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/getting-started/intro_gemini_python.ipynb + prompt = "" + images = [] + for message in messages: + if isinstance(message["content"], str): + prompt += message["content"] + elif isinstance(message["content"], list): + # see https://docs.litellm.ai/docs/providers/openai#openai-vision-models + for element in message["content"]: + if isinstance(element, dict): + if element["type"] == "text": + prompt += element["text"] + elif element["type"] == "image_url": + image_url = element["image_url"]["url"] + images.append(image_url) + # processing images passed to gemini + processed_images = [] + for img in images: + if "gs://" in img: + # Case 1: Images with Cloud Storage URIs + # The supported MIME types for images include image/png and image/jpeg. + part_mime = "image/png" if "png" in img else "image/jpeg" + google_clooud_part = Part.from_uri(img, mime_type=part_mime) + processed_images.append(google_clooud_part) + elif "https:/" in img: + # Case 2: Images with direct links + image = _load_image_from_url(img) + processed_images.append(image) + elif ".mp4" in img and "gs://" in img: + # Case 3: Videos with Cloud Storage URIs + part_mime = "video/mp4" + google_clooud_part = Part.from_uri(img, mime_type=part_mime) + processed_images.append(google_clooud_part) + return prompt, processed_images + except Exception as e: + raise e + def completion( model: str, messages: list, @@ -69,6 +171,7 @@ def completion( optional_params=None, litellm_params=None, logger_fn=None, + acompletion: bool=False ): try: import vertexai @@ -77,6 +180,8 @@ def completion( try: from vertexai.preview.language_models import ChatModel, CodeChatModel, InputOutputTextPair from vertexai.language_models import TextGenerationModel, CodeGenerationModel + from vertexai.preview.generative_models import GenerativeModel, Part, GenerationConfig + vertexai.init( project=vertex_project, location=vertex_location @@ -90,34 +195,94 @@ def completion( # vertexai does not use an API key, it looks for credentials.json in the environment - prompt = " ".join([message["content"] for message in messages]) + prompt = " ".join([message["content"] for message in messages if isinstance(message["content"], str)]) mode = "" request_str = "" - if model in litellm.vertex_chat_models: - chat_model = ChatModel.from_pretrained(model) + response_obj = None + if model in litellm.vertex_language_models: + llm_model = GenerativeModel(model) + mode = "" + request_str += f"llm_model = GenerativeModel({model})\n" + elif model in litellm.vertex_vision_models: + llm_model = GenerativeModel(model) + request_str += f"llm_model = GenerativeModel({model})\n" + mode = "vision" + elif model in litellm.vertex_chat_models: + llm_model = ChatModel.from_pretrained(model) mode = "chat" - request_str += f"chat_model = ChatModel.from_pretrained({model})\n" + request_str += f"llm_model = ChatModel.from_pretrained({model})\n" elif model in litellm.vertex_text_models: - text_model = TextGenerationModel.from_pretrained(model) + llm_model = TextGenerationModel.from_pretrained(model) mode = "text" - request_str += f"text_model = TextGenerationModel.from_pretrained({model})\n" + request_str += f"llm_model = TextGenerationModel.from_pretrained({model})\n" elif model in litellm.vertex_code_text_models: - text_model = CodeGenerationModel.from_pretrained(model) + llm_model = CodeGenerationModel.from_pretrained(model) mode = "text" - request_str += f"text_model = CodeGenerationModel.from_pretrained({model})\n" - else: # vertex_code_chat_models - chat_model = CodeChatModel.from_pretrained(model) + request_str += f"llm_model = CodeGenerationModel.from_pretrained({model})\n" + else: # vertex_code_llm_models + llm_model = CodeChatModel.from_pretrained(model) mode = "chat" - request_str += f"chat_model = CodeChatModel.from_pretrained({model})\n" + request_str += f"llm_model = CodeChatModel.from_pretrained({model})\n" - if mode == "chat": - chat = chat_model.start_chat() - request_str+= f"chat = chat_model.start_chat()\n" + if acompletion == True: # [TODO] expand support to vertex ai chat + text models + if optional_params.get("stream", False) is True: + # async streaming + return async_streaming(llm_model=llm_model, mode=mode, prompt=prompt, logging_obj=logging_obj, request_str=request_str, model=model, model_response=model_response, messages=messages, print_verbose=print_verbose, **optional_params) + return async_completion(llm_model=llm_model, mode=mode, prompt=prompt, logging_obj=logging_obj, request_str=request_str, model=model, model_response=model_response, encoding=encoding, messages=messages,print_verbose=print_verbose,**optional_params) + if mode == "": + chat = llm_model.start_chat() + request_str+= f"chat = llm_model.start_chat()\n" + + if "stream" in optional_params and optional_params["stream"] == True: + stream = optional_params.pop("stream") + request_str += f"chat.send_message({prompt}, generation_config=GenerationConfig(**{optional_params}), stream={stream})\n" + ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + model_response = chat.send_message(prompt, generation_config=GenerationConfig(**optional_params), stream=stream) + optional_params["stream"] = True + return model_response + request_str += f"chat.send_message({prompt}, generation_config=GenerationConfig(**{optional_params})).text\n" ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + response_obj = chat.send_message(prompt, generation_config=GenerationConfig(**optional_params)) + completion_response = response_obj.text + response_obj = response_obj._raw_response + elif mode == "vision": + print_verbose("\nMaking VertexAI Gemini Pro Vision Call") + print_verbose(f"\nProcessing input messages = {messages}") + + prompt, images = _gemini_vision_convert_messages(messages=messages) + content = [prompt] + images + if "stream" in optional_params and optional_params["stream"] == True: + stream = optional_params.pop("stream") + request_str += f"response = llm_model.generate_content({content}, generation_config=GenerationConfig(**{optional_params}), stream={stream})\n" + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + + model_response = llm_model.generate_content( + contents=content, + generation_config=GenerationConfig(**optional_params), + stream=True + ) + optional_params["stream"] = True + return model_response + + request_str += f"response = llm_model.generate_content({content})\n" + ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + ## LLM Call + response = llm_model.generate_content( + contents=content, + generation_config=GenerationConfig(**optional_params) + ) + completion_response = response.text + response_obj = response._raw_response + elif mode == "chat": + chat = llm_model.start_chat() + request_str+= f"chat = llm_model.start_chat()\n" if "stream" in optional_params and optional_params["stream"] == True: # NOTE: VertexAI does not accept stream=True as a param and raises an error, @@ -125,27 +290,30 @@ def completion( # after we get the response we add optional_params["stream"] = True, since main.py needs to know it's a streaming response to then transform it for the OpenAI format optional_params.pop("stream", None) # vertex ai raises an error when passing stream in optional params request_str += f"chat.send_message_streaming({prompt}, **{optional_params})\n" + ## LOGGING logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) model_response = chat.send_message_streaming(prompt, **optional_params) optional_params["stream"] = True return model_response request_str += f"chat.send_message({prompt}, **{optional_params}).text\n" + ## LOGGING logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) completion_response = chat.send_message(prompt, **optional_params).text elif mode == "text": - if "stream" in optional_params and optional_params["stream"] == True: optional_params.pop("stream", None) # See note above on handling streaming for vertex ai - request_str += f"text_model.predict_streaming({prompt}, **{optional_params})\n" + request_str += f"llm_model.predict_streaming({prompt}, **{optional_params})\n" + ## LOGGING logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) - model_response = text_model.predict_streaming(prompt, **optional_params) + model_response = llm_model.predict_streaming(prompt, **optional_params) optional_params["stream"] = True return model_response - request_str += f"text_model.predict({prompt}, **{optional_params}).text\n" + request_str += f"llm_model.predict({prompt}, **{optional_params}).text\n" + ## LOGGING logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) - completion_response = text_model.predict(prompt, **optional_params).text + completion_response = llm_model.predict(prompt, **optional_params).text ## LOGGING logging_obj.post_call( @@ -161,22 +329,162 @@ def completion( model_response["created"] = int(time.time()) model_response["model"] = model ## CALCULATING USAGE - prompt_tokens = len( - encoding.encode(prompt) - ) - completion_tokens = len( - encoding.encode(model_response["choices"][0]["message"].get("content", "")) - ) - usage = Usage( - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=prompt_tokens + completion_tokens + if model in litellm.vertex_language_models and response_obj is not None: + model_response["choices"][0].finish_reason = response_obj.candidates[0].finish_reason.name + usage = Usage(prompt_tokens=response_obj.usage_metadata.prompt_token_count, + completion_tokens=response_obj.usage_metadata.candidates_token_count, + total_tokens=response_obj.usage_metadata.total_token_count) + else: + prompt_tokens = len( + encoding.encode(prompt) + ) + completion_tokens = len( + encoding.encode(model_response["choices"][0]["message"].get("content", "")) ) + usage = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens + ) model_response.usage = usage return model_response except Exception as e: raise VertexAIError(status_code=500, message=str(e)) +async def async_completion(llm_model, mode: str, prompt: str, model: str, model_response: ModelResponse, logging_obj=None, request_str=None, encoding=None, messages = None, print_verbose = None, **optional_params): + """ + Add support for acompletion calls for gemini-pro + """ + try: + from vertexai.preview.generative_models import GenerationConfig + + if mode == "": + # gemini-pro + chat = llm_model.start_chat() + ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + response_obj = await chat.send_message_async(prompt, generation_config=GenerationConfig(**optional_params)) + completion_response = response_obj.text + response_obj = response_obj._raw_response + elif mode == "vision": + print_verbose("\nMaking VertexAI Gemini Pro Vision Call") + print_verbose(f"\nProcessing input messages = {messages}") + + prompt, images = _gemini_vision_convert_messages(messages=messages) + content = [prompt] + images + + request_str += f"response = llm_model.generate_content({content})\n" + ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + + ## LLM Call + response = await llm_model._generate_content_async( + contents=content, + generation_config=GenerationConfig(**optional_params) + ) + completion_response = response.text + response_obj = response._raw_response + elif mode == "chat": + # chat-bison etc. + chat = llm_model.start_chat() + ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + response_obj = await chat.send_message_async(prompt, **optional_params) + completion_response = response_obj.text + elif mode == "text": + # gecko etc. + request_str += f"llm_model.predict({prompt}, **{optional_params}).text\n" + ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + response_obj = await llm_model.predict_async(prompt, **optional_params) + completion_response = response_obj.text + + ## LOGGING + logging_obj.post_call( + input=prompt, api_key=None, original_response=completion_response + ) + + ## RESPONSE OBJECT + if len(str(completion_response)) > 0: + model_response["choices"][0]["message"][ + "content" + ] = str(completion_response) + model_response["choices"][0]["message"]["content"] = str(completion_response) + model_response["created"] = int(time.time()) + model_response["model"] = model + ## CALCULATING USAGE + if model in litellm.vertex_language_models and response_obj is not None: + model_response["choices"][0].finish_reason = response_obj.candidates[0].finish_reason.name + usage = Usage(prompt_tokens=response_obj.usage_metadata.prompt_token_count, + completion_tokens=response_obj.usage_metadata.candidates_token_count, + total_tokens=response_obj.usage_metadata.total_token_count) + else: + prompt_tokens = len( + encoding.encode(prompt) + ) + completion_tokens = len( + encoding.encode(model_response["choices"][0]["message"].get("content", "")) + ) + usage = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens + ) + model_response.usage = usage + return model_response + except Exception as e: + raise VertexAIError(status_code=500, message=str(e)) + +async def async_streaming(llm_model, mode: str, prompt: str, model: str, model_response: ModelResponse, logging_obj=None, request_str=None, messages = None, print_verbose = None, **optional_params): + """ + Add support for async streaming calls for gemini-pro + """ + from vertexai.preview.generative_models import GenerationConfig + if mode == "": + # gemini-pro + chat = llm_model.start_chat() + stream = optional_params.pop("stream") + request_str += f"chat.send_message_async({prompt},generation_config=GenerationConfig(**{optional_params}), stream={stream})\n" + ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + response = await chat.send_message_async(prompt, generation_config=GenerationConfig(**optional_params), stream=stream) + optional_params["stream"] = True + elif mode == "vision": + stream = optional_params.pop("stream") + + print_verbose("\nMaking VertexAI Gemini Pro Vision Call") + print_verbose(f"\nProcessing input messages = {messages}") + + prompt, images = _gemini_vision_convert_messages(messages=messages) + content = [prompt] + images + stream = optional_params.pop("stream") + request_str += f"response = llm_model.generate_content({content}, generation_config=GenerationConfig(**{optional_params}), stream={stream})\n" + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + + response = llm_model._generate_content_streaming_async( + contents=content, + generation_config=GenerationConfig(**optional_params), + stream=True + ) + optional_params["stream"] = True + elif mode == "chat": + chat = llm_model.start_chat() + optional_params.pop("stream", None) # vertex ai raises an error when passing stream in optional params + request_str += f"chat.send_message_streaming_async({prompt}, **{optional_params})\n" + ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + response = chat.send_message_streaming_async(prompt, **optional_params) + optional_params["stream"] = True + elif mode == "text": + optional_params.pop("stream", None) # See note above on handling streaming for vertex ai + request_str += f"llm_model.predict_streaming_async({prompt}, **{optional_params})\n" + ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + response = llm_model.predict_streaming_async(prompt, **optional_params) + + streamwrapper = CustomStreamWrapper(completion_stream=response, model=model, custom_llm_provider="vertex_ai",logging_obj=logging_obj) + async for transformed_chunk in streamwrapper: + yield transformed_chunk def embedding(): # logic for parsing in - calling - parsing out model embedding calls diff --git a/litellm/main.py b/litellm/main.py index f97659a32..d666cfb2c 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -14,6 +14,7 @@ import dotenv, traceback, random, asyncio, time, contextvars from copy import deepcopy import httpx import litellm + from litellm import ( # type: ignore client, exception_type, @@ -31,7 +32,8 @@ from litellm.utils import ( mock_completion_streaming_obj, convert_to_model_response_object, token_counter, - Usage + Usage, + get_optional_params_embeddings ) from .llms import ( anthropic, @@ -171,11 +173,14 @@ async def acompletion(*args, **kwargs): or custom_llm_provider == "azure" or custom_llm_provider == "custom_openai" or custom_llm_provider == "anyscale" + or custom_llm_provider == "mistral" or custom_llm_provider == "openrouter" or custom_llm_provider == "deepinfra" or custom_llm_provider == "perplexity" or custom_llm_provider == "text-completion-openai" - or custom_llm_provider == "huggingface"): # currently implemented aiohttp calls for just azure and openai, soon all. + or custom_llm_provider == "huggingface" + or custom_llm_provider == "ollama" + or custom_llm_provider == "vertex_ai"): # currently implemented aiohttp calls for just azure and openai, soon all. if kwargs.get("stream", False): response = completion(*args, **kwargs) else: @@ -200,9 +205,12 @@ async def acompletion(*args, **kwargs): async def _async_streaming(response, model, custom_llm_provider, args): try: + print_verbose(f"received response in _async_streaming: {response}") async for line in response: + print_verbose(f"line in async streaming: {line}") yield line except Exception as e: + print_verbose(f"error raised _async_streaming: {traceback.format_exc()}") raise exception_type( model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args, ) @@ -278,7 +286,7 @@ def completion( # Optional liteLLM function params **kwargs, -) -> ModelResponse: +) -> Union[ModelResponse, CustomStreamWrapper]: """ Perform a completion() using any of litellm supported llms (example gpt-4, gpt-3.5-turbo, claude-2, command-nightly) Parameters: @@ -319,7 +327,6 @@ def completion( ######### unpacking kwargs ##################### args = locals() api_base = kwargs.get('api_base', None) - return_async = kwargs.get('return_async', False) mock_response = kwargs.get('mock_response', None) force_timeout= kwargs.get('force_timeout', 600) ## deprecated logger_fn = kwargs.get('logger_fn', None) @@ -344,13 +351,14 @@ def completion( final_prompt_value = kwargs.get("final_prompt_value", None) bos_token = kwargs.get("bos_token", None) eos_token = kwargs.get("eos_token", None) + preset_cache_key = kwargs.get("preset_cache_key", None) hf_model_name = kwargs.get("hf_model_name", None) ### ASYNC CALLS ### acompletion = kwargs.get("acompletion", False) client = kwargs.get("client", None) ######## end of unpacking kwargs ########### openai_params = ["functions", "function_call", "temperature", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "request_timeout", "api_base", "api_version", "api_key", "deployment_id", "organization", "base_url", "default_headers", "timeout", "response_format", "seed", "tools", "tool_choice", "max_retries"] - litellm_params = ["metadata", "acompletion", "caching", "return_async", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "fallbacks", "azure", "headers", "model_list", "num_retries", "context_window_fallback_dict", "roles", "final_prompt_value", "bos_token", "eos_token", "request_timeout", "complete_response", "self", "client", "rpm", "tpm", "input_cost_per_token", "output_cost_per_token", "hf_model_name", "model_info", "proxy_server_request"] + litellm_params = ["metadata", "acompletion", "caching", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "fallbacks", "azure", "headers", "model_list", "num_retries", "context_window_fallback_dict", "roles", "final_prompt_value", "bos_token", "eos_token", "request_timeout", "complete_response", "self", "client", "rpm", "tpm", "input_cost_per_token", "output_cost_per_token", "hf_model_name", "model_info", "proxy_server_request", "preset_cache_key", "caching_groups"] default_params = openai_params + litellm_params non_default_params = {k: v for k,v in kwargs.items() if k not in default_params} # model-specific params - pass them straight to the model/provider if mock_response: @@ -384,7 +392,6 @@ def completion( model=deployment_id custom_llm_provider="azure" model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base, api_key=api_key) - ### REGISTER CUSTOM MODEL PRICING -- IF GIVEN ### if input_cost_per_token is not None and output_cost_per_token is not None: litellm.register_model({ @@ -448,7 +455,6 @@ def completion( # For logging - save the values of the litellm-specific params passed in litellm_params = get_litellm_params( acompletion=acompletion, - return_async=return_async, api_key=api_key, force_timeout=force_timeout, logger_fn=logger_fn, @@ -460,7 +466,8 @@ def completion( completion_call_id=id, metadata=metadata, model_info=model_info, - proxy_server_request=proxy_server_request + proxy_server_request=proxy_server_request, + preset_cache_key=preset_cache_key ) logging.update_environment_variables(model=model, user=user, optional_params=optional_params, litellm_params=litellm_params) if custom_llm_provider == "azure": @@ -524,23 +531,25 @@ def completion( client=client # pass AsyncAzureOpenAI, AzureOpenAI client ) - ## LOGGING - logging.post_call( - input=messages, - api_key=api_key, - original_response=response, - additional_args={ - "headers": headers, - "api_version": api_version, - "api_base": api_base, - }, - ) + if optional_params.get("stream", False) or acompletion == True: + ## LOGGING + logging.post_call( + input=messages, + api_key=api_key, + original_response=response, + additional_args={ + "headers": headers, + "api_version": api_version, + "api_base": api_base, + }, + ) elif ( model in litellm.open_ai_chat_completion_models or custom_llm_provider == "custom_openai" or custom_llm_provider == "deepinfra" or custom_llm_provider == "perplexity" or custom_llm_provider == "anyscale" + or custom_llm_provider == "mistral" or custom_llm_provider == "openai" or "ft:gpt-3.5-turbo" in model # finetune gpt-3.5-turbo ): # allow user to make an openai call with a custom base @@ -604,19 +613,19 @@ def completion( ) raise e - ## LOGGING - logging.post_call( - input=messages, - api_key=api_key, - original_response=response, - additional_args={"headers": headers}, - ) + if optional_params.get("stream", False): + ## LOGGING + logging.post_call( + input=messages, + api_key=api_key, + original_response=response, + additional_args={"headers": headers}, + ) elif ( custom_llm_provider == "text-completion-openai" or "ft:babbage-002" in model or "ft:davinci-002" in model # support for finetuned completion models ): - # print("calling custom openai provider") openai.api_type = "openai" api_base = ( @@ -655,17 +664,6 @@ def completion( prompt = messages[0]["content"] else: prompt = " ".join([message["content"] for message in messages]) # type: ignore - ## LOGGING - logging.pre_call( - input=prompt, - api_key=api_key, - additional_args={ - "openai_organization": litellm.organization, - "headers": headers, - "api_base": api_base, - "api_type": openai.api_type, - }, - ) ## COMPLETION CALL model_response = openai_text_completions.completion( model=model, @@ -681,9 +679,14 @@ def completion( logger_fn=logger_fn ) - # if "stream" in optional_params and optional_params["stream"] == True: - # response = CustomStreamWrapper(model_response, model, custom_llm_provider="text-completion-openai", logging_obj=logging) - # return response + if optional_params.get("stream", False) or acompletion == True: + ## LOGGING + logging.post_call( + input=messages, + api_key=api_key, + original_response=model_response, + additional_args={"headers": headers}, + ) response = model_response elif ( "replicate" in model or @@ -728,8 +731,16 @@ def completion( ) if "stream" in optional_params and optional_params["stream"] == True: # don't try to access stream object, - response = CustomStreamWrapper(model_response, model, logging_obj=logging, custom_llm_provider="replicate") - return response + model_response = CustomStreamWrapper(model_response, model, logging_obj=logging, custom_llm_provider="replicate") # type: ignore + + if optional_params.get("stream", False) or acompletion == True: + ## LOGGING + logging.post_call( + input=messages, + api_key=replicate_key, + original_response=model_response, + ) + response = model_response elif custom_llm_provider=="anthropic": @@ -749,7 +760,7 @@ def completion( custom_prompt_dict or litellm.custom_prompt_dict ) - model_response = anthropic.completion( + response = anthropic.completion( model=model, messages=messages, api_base=api_base, @@ -765,9 +776,16 @@ def completion( ) if "stream" in optional_params and optional_params["stream"] == True: # don't try to access stream object, - response = CustomStreamWrapper(model_response, model, custom_llm_provider="anthropic", logging_obj=logging) - return response - response = model_response + response = CustomStreamWrapper(response, model, custom_llm_provider="anthropic", logging_obj=logging) + + if optional_params.get("stream", False) or acompletion == True: + ## LOGGING + logging.post_call( + input=messages, + api_key=api_key, + original_response=response, + ) + response = response elif custom_llm_provider == "nlp_cloud": nlp_cloud_key = ( api_key or litellm.nlp_cloud_key or get_secret("NLP_CLOUD_API_KEY") or litellm.api_key @@ -780,7 +798,7 @@ def completion( or "https://api.nlpcloud.io/v1/gpu/" ) - model_response = nlp_cloud.completion( + response = nlp_cloud.completion( model=model, messages=messages, api_base=api_base, @@ -796,9 +814,17 @@ def completion( if "stream" in optional_params and optional_params["stream"] == True: # don't try to access stream object, - response = CustomStreamWrapper(model_response, model, custom_llm_provider="nlp_cloud", logging_obj=logging) - return response - response = model_response + response = CustomStreamWrapper(response, model, custom_llm_provider="nlp_cloud", logging_obj=logging) + + if optional_params.get("stream", False) or acompletion == True: + ## LOGGING + logging.post_call( + input=messages, + api_key=api_key, + original_response=response, + ) + + response = response elif custom_llm_provider == "aleph_alpha": aleph_alpha_key = ( api_key or litellm.aleph_alpha_key or get_secret("ALEPH_ALPHA_API_KEY") or get_secret("ALEPHALPHA_API_KEY") or litellm.api_key @@ -1100,7 +1126,7 @@ def completion( ) return response response = model_response - elif model in litellm.vertex_chat_models or model in litellm.vertex_code_chat_models or model in litellm.vertex_text_models or model in litellm.vertex_code_text_models: + elif custom_llm_provider == "vertex_ai": vertex_ai_project = (litellm.vertex_project or get_secret("VERTEXAI_PROJECT")) vertex_ai_location = (litellm.vertex_location @@ -1117,10 +1143,11 @@ def completion( encoding=encoding, vertex_location=vertex_ai_location, vertex_project=vertex_ai_project, - logging_obj=logging + logging_obj=logging, + acompletion=acompletion ) - if "stream" in optional_params and optional_params["stream"] == True: + if "stream" in optional_params and optional_params["stream"] == True and acompletion == False: response = CustomStreamWrapper( model_response, model, custom_llm_provider="vertex_ai", logging_obj=logging ) @@ -1186,6 +1213,7 @@ def completion( # "SageMaker is currently not supporting streaming responses." # fake streaming for sagemaker + print_verbose(f"ENTERS SAGEMAKER CUSTOMSTREAMWRAPPER") resp_string = model_response["choices"][0]["message"]["content"] response = CustomStreamWrapper( resp_string, model, custom_llm_provider="sagemaker", logging_obj=logging @@ -1200,7 +1228,7 @@ def completion( custom_prompt_dict or litellm.custom_prompt_dict ) - model_response = bedrock.completion( + response = bedrock.completion( model=model, messages=messages, custom_prompt_dict=litellm.custom_prompt_dict, @@ -1218,16 +1246,24 @@ def completion( # don't try to access stream object, if "ai21" in model: response = CustomStreamWrapper( - model_response, model, custom_llm_provider="bedrock", logging_obj=logging + response, model, custom_llm_provider="bedrock", logging_obj=logging ) else: response = CustomStreamWrapper( - iter(model_response), model, custom_llm_provider="bedrock", logging_obj=logging + iter(response), model, custom_llm_provider="bedrock", logging_obj=logging ) - return response + + if optional_params.get("stream", False): + ## LOGGING + logging.post_call( + input=messages, + api_key=None, + original_response=response, + ) + ## RESPONSE OBJECT - response = model_response + response = response elif custom_llm_provider == "vllm": model_response = vllm.completion( model=model, @@ -1273,14 +1309,18 @@ def completion( ) else: prompt = prompt_factory(model=model, messages=messages, custom_llm_provider=custom_llm_provider) - ## LOGGING - if kwargs.get('acompletion', False) == True: - if optional_params.get("stream", False) == True: - # assume all ollama responses are streamed - async_generator = ollama.async_get_ollama_response_stream(api_base, model, prompt, optional_params, logging_obj=logging) - return async_generator + if isinstance(prompt, dict): + # for multimode models - ollama/llava prompt_factory returns a dict { + # "prompt": prompt, + # "images": images + # } + prompt, images = prompt["prompt"], prompt["images"] + optional_params["images"] = images - generator = ollama.get_ollama_response_stream(api_base, model, prompt, optional_params, logging_obj=logging) + ## LOGGING + generator = ollama.get_ollama_response_stream(api_base, model, prompt, optional_params, logging_obj=logging, acompletion=acompletion, model_response=model_response, encoding=encoding) + if acompletion is True: + return generator if optional_params.get("stream", False) == True: # assume all ollama responses are streamed response = CustomStreamWrapper( @@ -1716,8 +1756,7 @@ async def aembedding(*args, **kwargs): or custom_llm_provider == "anyscale" or custom_llm_provider == "openrouter" or custom_llm_provider == "deepinfra" - or custom_llm_provider == "perplexity" - or custom_llm_provider == "huggingface"): # currently implemented aiohttp calls for just azure and openai, soon all. + or custom_llm_provider == "perplexity"): # currently implemented aiohttp calls for just azure and openai, soon all. # Await normally init_response = await loop.run_in_executor(None, func_with_context) if isinstance(init_response, dict) or isinstance(init_response, ModelResponse): ## CACHING SCENARIO @@ -1781,22 +1820,21 @@ def embedding( rpm = kwargs.pop("rpm", None) tpm = kwargs.pop("tpm", None) model_info = kwargs.get("model_info", None) + metadata = kwargs.get("metadata", None) + encoding_format = kwargs.get("encoding_format", None) proxy_server_request = kwargs.get("proxy_server_request", None) - aembedding = kwargs.pop("aembedding", None) - openai_params = ["functions", "function_call", "temperature", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "request_timeout", "api_base", "api_version", "api_key", "deployment_id", "organization", "base_url", "default_headers", "timeout", "response_format", "seed", "tools", "tool_choice", "max_retries", "encoding_format"] - litellm_params = ["metadata", "acompletion", "caching", "return_async", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "fallbacks", "azure", "headers", "model_list", "num_retries", "context_window_fallback_dict", "roles", "final_prompt_value", "bos_token", "eos_token", "request_timeout", "complete_response", "self", "client", "rpm", "tpm", "input_cost_per_token", "output_cost_per_token", "hf_model_name", "proxy_server_request", "model_info"] + aembedding = kwargs.get("aembedding", None) + openai_params = ["user", "request_timeout", "api_base", "api_version", "api_key", "deployment_id", "organization", "base_url", "default_headers", "timeout", "max_retries", "encoding_format"] + litellm_params = ["metadata", "aembedding", "caching", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "fallbacks", "azure", "headers", "model_list", "num_retries", "context_window_fallback_dict", "roles", "final_prompt_value", "bos_token", "eos_token", "request_timeout", "complete_response", "self", "client", "rpm", "tpm", "input_cost_per_token", "output_cost_per_token", "hf_model_name", "proxy_server_request", "model_info", "preset_cache_key", "caching_groups"] default_params = openai_params + litellm_params non_default_params = {k: v for k,v in kwargs.items() if k not in default_params} # model-specific params - pass them straight to the model/provider - optional_params = {} - for param in non_default_params: - optional_params[param] = kwargs[param] - model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base, api_key=api_key) - + model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base, api_key=api_key) + optional_params = get_optional_params_embeddings(user=user, encoding_format=encoding_format, custom_llm_provider=custom_llm_provider, **non_default_params) try: response = None logging = litellm_logging_obj - logging.update_environment_variables(model=model, user="", optional_params=optional_params, litellm_params={"timeout": timeout, "azure": azure, "litellm_call_id": litellm_call_id, "logger_fn": logger_fn, "proxy_server_request": proxy_server_request, "model_info": model_info}) + logging.update_environment_variables(model=model, user=user, optional_params=optional_params, litellm_params={"timeout": timeout, "azure": azure, "litellm_call_id": litellm_call_id, "logger_fn": logger_fn, "proxy_server_request": proxy_server_request, "model_info": model_info, "metadata": metadata, "aembedding": aembedding, "preset_cache_key": None, "stream_response": {}}) if azure == True or custom_llm_provider == "azure": # azure configs api_type = get_secret("AZURE_API_TYPE") or "azure" @@ -1936,7 +1974,7 @@ def embedding( ## LOGGING logging.post_call( input=input, - api_key=openai.api_key, + api_key=api_key, original_response=str(e), ) ## Map to OpenAI Exception @@ -1948,6 +1986,59 @@ def embedding( ###### Text Completion ################ +async def atext_completion(*args, **kwargs): + """ + Implemented to handle async streaming for the text completion endpoint + """ + loop = asyncio.get_event_loop() + model = args[0] if len(args) > 0 else kwargs["model"] + ### PASS ARGS TO COMPLETION ### + kwargs["acompletion"] = True + custom_llm_provider = None + try: + # Use a partial function to pass your keyword arguments + func = partial(text_completion, *args, **kwargs) + + # Add the context to the function + ctx = contextvars.copy_context() + func_with_context = partial(ctx.run, func) + + _, custom_llm_provider, _, _ = get_llm_provider(model=model, api_base=kwargs.get("api_base", None)) + + if (custom_llm_provider == "openai" + or custom_llm_provider == "azure" + or custom_llm_provider == "custom_openai" + or custom_llm_provider == "anyscale" + or custom_llm_provider == "mistral" + or custom_llm_provider == "openrouter" + or custom_llm_provider == "deepinfra" + or custom_llm_provider == "perplexity" + or custom_llm_provider == "text-completion-openai" + or custom_llm_provider == "huggingface" + or custom_llm_provider == "ollama" + or custom_llm_provider == "vertex_ai"): # currently implemented aiohttp calls for just azure and openai, soon all. + if kwargs.get("stream", False): + response = text_completion(*args, **kwargs) + else: + # Await normally + init_response = await loop.run_in_executor(None, func_with_context) + if isinstance(init_response, dict) or isinstance(init_response, ModelResponse): ## CACHING SCENARIO + response = init_response + elif asyncio.iscoroutine(init_response): + response = await init_response + else: + # Call the synchronous function using run_in_executor + response = await loop.run_in_executor(None, func_with_context) + if kwargs.get("stream", False): # return an async generator + return _async_streaming(response=response, model=model, custom_llm_provider=custom_llm_provider, args=args) + else: + return response + except Exception as e: + custom_llm_provider = custom_llm_provider or "openai" + raise exception_type( + model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args, + ) + def text_completion( prompt: Union[str, List[Union[str, List[Union[str, List[int]]]]]], # Required: The prompt(s) to generate completions for. model: Optional[str]=None, # Optional: either `model` or `engine` can be set @@ -2079,7 +2170,7 @@ def text_completion( *args, **all_params, ) - #print(response) + text_completion_response["id"] = response.get("id", None) text_completion_response["object"] = "text_completion" text_completion_response["created"] = response.get("created", None) @@ -2294,6 +2385,8 @@ def stream_chunk_builder(chunks: list, messages: Optional[list]=None): completion_output = combined_content elif len(combined_arguments) > 0: completion_output = combined_arguments + else: + completion_output = "" # # Update usage information if needed try: response["usage"]["prompt_tokens"] = token_counter(model=model, messages=messages) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 2c49dc02d..454b2504a 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -41,6 +41,20 @@ "litellm_provider": "openai", "mode": "chat" }, + "gpt-4-1106-preview": { + "max_tokens": 128000, + "input_cost_per_token": 0.00001, + "output_cost_per_token": 0.00003, + "litellm_provider": "openai", + "mode": "chat" + }, + "gpt-4-vision-preview": { + "max_tokens": 128000, + "input_cost_per_token": 0.00001, + "output_cost_per_token": 0.00003, + "litellm_provider": "openai", + "mode": "chat" + }, "gpt-3.5-turbo": { "max_tokens": 4097, "input_cost_per_token": 0.0000015, @@ -62,6 +76,13 @@ "litellm_provider": "openai", "mode": "chat" }, + "gpt-3.5-turbo-1106": { + "max_tokens": 16385, + "input_cost_per_token": 0.0000010, + "output_cost_per_token": 0.0000020, + "litellm_provider": "openai", + "mode": "chat" + }, "gpt-3.5-turbo-16k": { "max_tokens": 16385, "input_cost_per_token": 0.000003, @@ -76,6 +97,62 @@ "litellm_provider": "openai", "mode": "chat" }, + "ft:gpt-3.5-turbo": { + "max_tokens": 4097, + "input_cost_per_token": 0.000012, + "output_cost_per_token": 0.000016, + "litellm_provider": "openai", + "mode": "chat" + }, + "text-embedding-ada-002": { + "max_tokens": 8191, + "input_cost_per_token": 0.0000001, + "output_cost_per_token": 0.000000, + "litellm_provider": "openai", + "mode": "embedding" + }, + "azure/gpt-4-1106-preview": { + "max_tokens": 128000, + "input_cost_per_token": 0.00001, + "output_cost_per_token": 0.00003, + "litellm_provider": "azure", + "mode": "chat" + }, + "azure/gpt-4-32k": { + "max_tokens": 8192, + "input_cost_per_token": 0.00006, + "output_cost_per_token": 0.00012, + "litellm_provider": "azure", + "mode": "chat" + }, + "azure/gpt-4": { + "max_tokens": 16385, + "input_cost_per_token": 0.00003, + "output_cost_per_token": 0.00006, + "litellm_provider": "azure", + "mode": "chat" + }, + "azure/gpt-3.5-turbo-16k": { + "max_tokens": 16385, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000004, + "litellm_provider": "azure", + "mode": "chat" + }, + "azure/gpt-3.5-turbo": { + "max_tokens": 4097, + "input_cost_per_token": 0.0000015, + "output_cost_per_token": 0.000002, + "litellm_provider": "azure", + "mode": "chat" + }, + "azure/text-embedding-ada-002": { + "max_tokens": 8191, + "input_cost_per_token": 0.0000001, + "output_cost_per_token": 0.000000, + "litellm_provider": "azure", + "mode": "embedding" + }, "text-davinci-003": { "max_tokens": 4097, "input_cost_per_token": 0.000002, @@ -127,6 +204,7 @@ }, "claude-instant-1": { "max_tokens": 100000, + "max_output_tokens": 8191, "input_cost_per_token": 0.00000163, "output_cost_per_token": 0.00000551, "litellm_provider": "anthropic", @@ -134,15 +212,25 @@ }, "claude-instant-1.2": { "max_tokens": 100000, - "input_cost_per_token": 0.00000163, - "output_cost_per_token": 0.00000551, + "max_output_tokens": 8191, + "input_cost_per_token": 0.000000163, + "output_cost_per_token": 0.000000551, "litellm_provider": "anthropic", "mode": "chat" }, "claude-2": { "max_tokens": 100000, - "input_cost_per_token": 0.00001102, - "output_cost_per_token": 0.00003268, + "max_output_tokens": 8191, + "input_cost_per_token": 0.000008, + "output_cost_per_token": 0.000024, + "litellm_provider": "anthropic", + "mode": "chat" + }, + "claude-2.1": { + "max_tokens": 200000, + "max_output_tokens": 8191, + "input_cost_per_token": 0.000008, + "output_cost_per_token": 0.000024, "litellm_provider": "anthropic", "mode": "chat" }, @@ -227,9 +315,51 @@ "max_tokens": 32000, "input_cost_per_token": 0.000000125, "output_cost_per_token": 0.000000125, - "litellm_provider": "vertex_ai-chat-models", + "litellm_provider": "vertex_ai-code-chat-models", "mode": "chat" }, + "palm/chat-bison": { + "max_tokens": 4096, + "input_cost_per_token": 0.000000125, + "output_cost_per_token": 0.000000125, + "litellm_provider": "palm", + "mode": "chat" + }, + "palm/chat-bison-001": { + "max_tokens": 4096, + "input_cost_per_token": 0.000000125, + "output_cost_per_token": 0.000000125, + "litellm_provider": "palm", + "mode": "chat" + }, + "palm/text-bison": { + "max_tokens": 8196, + "input_cost_per_token": 0.000000125, + "output_cost_per_token": 0.000000125, + "litellm_provider": "palm", + "mode": "completion" + }, + "palm/text-bison-001": { + "max_tokens": 8196, + "input_cost_per_token": 0.000000125, + "output_cost_per_token": 0.000000125, + "litellm_provider": "palm", + "mode": "completion" + }, + "palm/text-bison-safety-off": { + "max_tokens": 8196, + "input_cost_per_token": 0.000000125, + "output_cost_per_token": 0.000000125, + "litellm_provider": "palm", + "mode": "completion" + }, + "palm/text-bison-safety-recitation-off": { + "max_tokens": 8196, + "input_cost_per_token": 0.000000125, + "output_cost_per_token": 0.000000125, + "litellm_provider": "palm", + "mode": "completion" + }, "command-nightly": { "max_tokens": 4096, "input_cost_per_token": 0.000015, @@ -267,6 +397,8 @@ }, "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1": { "max_tokens": 4096, + "input_cost_per_token": 0.0000, + "output_cost_per_token": 0.0000, "litellm_provider": "replicate", "mode": "chat" }, @@ -293,6 +425,7 @@ }, "openrouter/anthropic/claude-instant-v1": { "max_tokens": 100000, + "max_output_tokens": 8191, "input_cost_per_token": 0.00000163, "output_cost_per_token": 0.00000551, "litellm_provider": "openrouter", @@ -300,6 +433,7 @@ }, "openrouter/anthropic/claude-2": { "max_tokens": 100000, + "max_output_tokens": 8191, "input_cost_per_token": 0.00001102, "output_cost_per_token": 0.00003268, "litellm_provider": "openrouter", @@ -496,20 +630,31 @@ }, "anthropic.claude-v1": { "max_tokens": 100000, - "input_cost_per_token": 0.00001102, - "output_cost_per_token": 0.00003268, + "max_output_tokens": 8191, + "input_cost_per_token": 0.000008, + "output_cost_per_token": 0.000024, "litellm_provider": "bedrock", "mode": "chat" }, "anthropic.claude-v2": { "max_tokens": 100000, - "input_cost_per_token": 0.00001102, - "output_cost_per_token": 0.00003268, + "max_output_tokens": 8191, + "input_cost_per_token": 0.000008, + "output_cost_per_token": 0.000024, + "litellm_provider": "bedrock", + "mode": "chat" + }, + "anthropic.claude-v2:1": { + "max_tokens": 200000, + "max_output_tokens": 8191, + "input_cost_per_token": 0.000008, + "output_cost_per_token": 0.000024, "litellm_provider": "bedrock", "mode": "chat" }, "anthropic.claude-instant-v1": { "max_tokens": 100000, + "max_output_tokens": 8191, "input_cost_per_token": 0.00000163, "output_cost_per_token": 0.00000551, "litellm_provider": "bedrock", @@ -529,26 +674,80 @@ "litellm_provider": "bedrock", "mode": "chat" }, + "meta.llama2-70b-chat-v1": { + "max_tokens": 4096, + "input_cost_per_token": 0.00000195, + "output_cost_per_token": 0.00000256, + "litellm_provider": "bedrock", + "mode": "chat" + }, + "sagemaker/meta-textgeneration-llama-2-7b": { + "max_tokens": 4096, + "input_cost_per_token": 0.000, + "output_cost_per_token": 0.000, + "litellm_provider": "sagemaker", + "mode": "completion" + }, + "sagemaker/meta-textgeneration-llama-2-7b-f": { + "max_tokens": 4096, + "input_cost_per_token": 0.000, + "output_cost_per_token": 0.000, + "litellm_provider": "sagemaker", + "mode": "chat" + }, + "sagemaker/meta-textgeneration-llama-2-13b": { + "max_tokens": 4096, + "input_cost_per_token": 0.000, + "output_cost_per_token": 0.000, + "litellm_provider": "sagemaker", + "mode": "completion" + }, + "sagemaker/meta-textgeneration-llama-2-13b-f": { + "max_tokens": 4096, + "input_cost_per_token": 0.000, + "output_cost_per_token": 0.000, + "litellm_provider": "sagemaker", + "mode": "chat" + }, + "sagemaker/meta-textgeneration-llama-2-70b": { + "max_tokens": 4096, + "input_cost_per_token": 0.000, + "output_cost_per_token": 0.000, + "litellm_provider": "sagemaker", + "mode": "completion" + }, + "sagemaker/meta-textgeneration-llama-2-70b-b-f": { + "max_tokens": 4096, + "input_cost_per_token": 0.000, + "output_cost_per_token": 0.000, + "litellm_provider": "sagemaker", + "mode": "chat" + }, "together-ai-up-to-3b": { "input_cost_per_token": 0.0000001, - "output_cost_per_token": 0.0000001 + "output_cost_per_token": 0.0000001, + "litellm_provider": "together_ai" }, "together-ai-3.1b-7b": { "input_cost_per_token": 0.0000002, - "output_cost_per_token": 0.0000002 + "output_cost_per_token": 0.0000002, + "litellm_provider": "together_ai" }, "together-ai-7.1b-20b": { "max_tokens": 1000, "input_cost_per_token": 0.0000004, - "output_cost_per_token": 0.0000004 + "output_cost_per_token": 0.0000004, + "litellm_provider": "together_ai" }, "together-ai-20.1b-40b": { - "input_cost_per_token": 0.000001, - "output_cost_per_token": 0.000001 + "input_cost_per_token": 0.0000008, + "output_cost_per_token": 0.0000008, + "litellm_provider": "together_ai" }, "together-ai-40.1b-70b": { - "input_cost_per_token": 0.000003, - "output_cost_per_token": 0.000003 + "input_cost_per_token": 0.0000009, + "output_cost_per_token": 0.0000009, + "litellm_provider": "together_ai" }, "ollama/llama2": { "max_tokens": 4096, @@ -578,10 +777,38 @@ "litellm_provider": "ollama", "mode": "completion" }, + "ollama/mistral": { + "max_tokens": 8192, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "ollama", + "mode": "completion" + }, + "ollama/codellama": { + "max_tokens": 4096, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "ollama", + "mode": "completion" + }, + "ollama/orca-mini": { + "max_tokens": 4096, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "ollama", + "mode": "completion" + }, + "ollama/vicuna": { + "max_tokens": 2048, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "ollama", + "mode": "completion" + }, "deepinfra/meta-llama/Llama-2-70b-chat-hf": { - "max_tokens": 6144, - "input_cost_per_token": 0.000001875, - "output_cost_per_token": 0.000001875, + "max_tokens": 4096, + "input_cost_per_token": 0.000000700, + "output_cost_per_token": 0.000000950, "litellm_provider": "deepinfra", "mode": "chat" }, @@ -619,5 +846,103 @@ "output_cost_per_token": 0.00000095, "litellm_provider": "deepinfra", "mode": "chat" + }, + "perplexity/pplx-7b-chat": { + "max_tokens": 8192, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.000000, + "litellm_provider": "perplexity", + "mode": "chat" + }, + "perplexity/pplx-70b-chat": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.000000, + "litellm_provider": "perplexity", + "mode": "chat" + }, + "perplexity/pplx-7b-online": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.0005, + "litellm_provider": "perplexity", + "mode": "chat" + }, + "perplexity/pplx-70b-online": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.0005, + "litellm_provider": "perplexity", + "mode": "chat" + }, + "perplexity/llama-2-13b-chat": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.000000, + "litellm_provider": "perplexity", + "mode": "chat" + }, + "perplexity/llama-2-70b-chat": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.000000, + "litellm_provider": "perplexity", + "mode": "chat" + }, + "perplexity/mistral-7b-instruct": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.000000, + "litellm_provider": "perplexity", + "mode": "chat" + }, + "perplexity/replit-code-v1.5-3b": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.000000, + "litellm_provider": "perplexity", + "mode": "chat" + }, + "anyscale/mistralai/Mistral-7B-Instruct-v0.1": { + "max_tokens": 16384, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.00000015, + "litellm_provider": "anyscale", + "mode": "chat" + }, + "anyscale/HuggingFaceH4/zephyr-7b-beta": { + "max_tokens": 16384, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.00000015, + "litellm_provider": "anyscale", + "mode": "chat" + }, + "anyscale/meta-llama/Llama-2-7b-chat-hf": { + "max_tokens": 4096, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.00000015, + "litellm_provider": "anyscale", + "mode": "chat" + }, + "anyscale/meta-llama/Llama-2-13b-chat-hf": { + "max_tokens": 4096, + "input_cost_per_token": 0.00000025, + "output_cost_per_token": 0.00000025, + "litellm_provider": "anyscale", + "mode": "chat" + }, + "anyscale/meta-llama/Llama-2-70b-chat-hf": { + "max_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000001, + "litellm_provider": "anyscale", + "mode": "chat" + }, + "anyscale/codellama/CodeLlama-34b-Instruct-hf": { + "max_tokens": 16384, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000001, + "litellm_provider": "anyscale", + "mode": "chat" } } diff --git a/litellm/proxy/_experimental/post_call_rules.py b/litellm/proxy/_experimental/post_call_rules.py new file mode 100644 index 000000000..12caa5513 --- /dev/null +++ b/litellm/proxy/_experimental/post_call_rules.py @@ -0,0 +1,4 @@ +def my_custom_rule(input): # receives the model response + # if len(input) < 5: # trigger fallback if the model response is too short + return False + return True \ No newline at end of file diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py index 68709f34d..cb04f32a5 100644 --- a/litellm/proxy/_types.py +++ b/litellm/proxy/_types.py @@ -2,8 +2,21 @@ from pydantic import BaseModel, Extra, Field, root_validator from typing import Optional, List, Union, Dict, Literal from datetime import datetime import uuid, json + +class LiteLLMBase(BaseModel): + """ + Implements default functions, all pydantic objects should have. + """ + def json(self, **kwargs): + try: + return self.model_dump() # noqa + except: + # if using pydantic v1 + return self.dict() + + ######### Request Class Definition ###### -class ProxyChatCompletionRequest(BaseModel): +class ProxyChatCompletionRequest(LiteLLMBase): model: str messages: List[Dict[str, str]] temperature: Optional[float] = None @@ -38,16 +51,16 @@ class ProxyChatCompletionRequest(BaseModel): class Config: extra='allow' # allow params not defined here, these fall in litellm.completion(**kwargs) -class ModelInfoDelete(BaseModel): +class ModelInfoDelete(LiteLLMBase): id: Optional[str] -class ModelInfo(BaseModel): +class ModelInfo(LiteLLMBase): id: Optional[str] mode: Optional[Literal['embedding', 'chat', 'completion']] - input_cost_per_token: Optional[float] - output_cost_per_token: Optional[float] - max_tokens: Optional[int] + input_cost_per_token: Optional[float] = 0.0 + output_cost_per_token: Optional[float] = 0.0 + max_tokens: Optional[int] = 2048 # assume 2048 if not set # for azure models we need users to specify the base model, one azure you can call deployments - azure/my-random-model # we look up the base model in model_prices_and_context_window.json @@ -65,38 +78,41 @@ class ModelInfo(BaseModel): class Config: extra = Extra.allow # Allow extra fields protected_namespaces = () + - # @root_validator(pre=True) - # def set_model_info(cls, values): - # if values.get("id") is None: - # values.update({"id": str(uuid.uuid4())}) - # if values.get("mode") is None: - # values.update({"mode": str(uuid.uuid4())}) - # return values + @root_validator(pre=True) + def set_model_info(cls, values): + if values.get("id") is None: + values.update({"id": str(uuid.uuid4())}) + if values.get("mode") is None: + values.update({"mode": None}) + if values.get("input_cost_per_token") is None: + values.update({"input_cost_per_token": None}) + if values.get("output_cost_per_token") is None: + values.update({"output_cost_per_token": None}) + if values.get("max_tokens") is None: + values.update({"max_tokens": None}) + if values.get("base_model") is None: + values.update({"base_model": None}) + return values -class ModelParams(BaseModel): +class ModelParams(LiteLLMBase): model_name: str litellm_params: dict - model_info: Optional[ModelInfo]=None + model_info: ModelInfo - # def __init__(self, model_name: str, litellm_params: dict, model_info: Optional[ModelInfo] = None): - # self.model_name = model_name - # self.litellm_params = litellm_params - # self.model_info = model_info if model_info else ModelInfo() - # super.__init__(model_name=self.model_name, litellm_params=self.litellm_params, model_info=self.model_info) - class Config: protected_namespaces = () - # @root_validator(pre=True) - # def set_model_info(cls, values): - # if values.get("model_info") is None: - # values.update({"model_info": ModelInfo()}) - # return values + @root_validator(pre=True) + def set_model_info(cls, values): + if values.get("model_info") is None: + values.update({"model_info": ModelInfo()}) + return values -class GenerateKeyRequest(BaseModel): +class GenerateKeyRequest(LiteLLMBase): duration: Optional[str] = "1h" models: Optional[list] = [] aliases: Optional[dict] = {} @@ -105,26 +121,32 @@ class GenerateKeyRequest(BaseModel): user_id: Optional[str] = None max_parallel_requests: Optional[int] = None - def json(self, **kwargs): - try: - return self.model_dump() # noqa - except: - # if using pydantic v1 - return self.dict() +class UpdateKeyRequest(LiteLLMBase): + key: str + duration: Optional[str] = None + models: Optional[list] = None + aliases: Optional[dict] = None + config: Optional[dict] = None + spend: Optional[float] = None + user_id: Optional[str] = None + max_parallel_requests: Optional[int] = None -class GenerateKeyResponse(BaseModel): +class GenerateKeyResponse(LiteLLMBase): key: str expires: datetime user_id: str -class _DeleteKeyObject(BaseModel): + + + +class _DeleteKeyObject(LiteLLMBase): key: str -class DeleteKeyRequest(BaseModel): +class DeleteKeyRequest(LiteLLMBase): keys: List[_DeleteKeyObject] -class UserAPIKeyAuth(BaseModel): # the expected response object for user api key auth +class UserAPIKeyAuth(LiteLLMBase): # the expected response object for user api key auth """ Return the row in the db """ @@ -137,7 +159,7 @@ class UserAPIKeyAuth(BaseModel): # the expected response object for user api key max_parallel_requests: Optional[int] = None duration: str = "1h" -class ConfigGeneralSettings(BaseModel): +class ConfigGeneralSettings(LiteLLMBase): """ Documents all the fields supported by `general_settings` in config.yaml """ @@ -153,10 +175,12 @@ class ConfigGeneralSettings(BaseModel): health_check_interval: int = Field(300, description="background health check interval in seconds") -class ConfigYAML(BaseModel): +class ConfigYAML(LiteLLMBase): """ Documents all the fields supported by the config.yaml """ model_list: Optional[List[ModelParams]] = Field(None, description="List of supported models on the server, with model-specific configs") litellm_settings: Optional[dict] = Field(None, description="litellm Module settings. See __init__.py for all, example litellm.drop_params=True, litellm.set_verbose=True, litellm.api_base, litellm.cache") general_settings: Optional[ConfigGeneralSettings] = None + class Config: + protected_namespaces = () diff --git a/litellm/proxy/custom_callbacks.py b/litellm/proxy/custom_callbacks.py index c04916344..dfcd55520 100644 --- a/litellm/proxy/custom_callbacks.py +++ b/litellm/proxy/custom_callbacks.py @@ -1,3 +1,11 @@ +import sys, os, traceback + +# this file is to test litellm/proxy + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path + from litellm.integrations.custom_logger import CustomLogger import litellm import inspect @@ -36,9 +44,12 @@ class MyCustomHandler(CustomLogger): def log_success_event(self, kwargs, response_obj, start_time, end_time): print_verbose("On Success!") + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): print_verbose(f"On Async Success!") + response_cost = litellm.completion_cost(completion_response=response_obj) + assert response_cost > 0.0 return async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): diff --git a/litellm/proxy/health_check.py b/litellm/proxy/health_check.py index db07ae20a..f8e56c059 100644 --- a/litellm/proxy/health_check.py +++ b/litellm/proxy/health_check.py @@ -69,7 +69,6 @@ async def _perform_health_check(model_list: list): for model in model_list: litellm_params = model["litellm_params"] model_info = model.get("model_info", {}) - litellm_params["model"] = litellm.utils.remove_model_id(litellm_params["model"]) litellm_params["messages"] = _get_random_llm_message() prepped_params.append(litellm_params) diff --git a/litellm/proxy/hooks/parallel_request_limiter.py b/litellm/proxy/hooks/parallel_request_limiter.py index 4a321d009..d42a5739a 100644 --- a/litellm/proxy/hooks/parallel_request_limiter.py +++ b/litellm/proxy/hooks/parallel_request_limiter.py @@ -1,6 +1,7 @@ from typing import Optional import litellm from litellm.caching import DualCache +from litellm.proxy._types import UserAPIKeyAuth from litellm.integrations.custom_logger import CustomLogger from fastapi import HTTPException @@ -14,24 +15,28 @@ class MaxParallelRequestsHandler(CustomLogger): print(print_statement) # noqa - async def max_parallel_request_allow_request(self, max_parallel_requests: Optional[int], api_key: Optional[str], user_api_key_cache: DualCache): + async def async_pre_call_hook(self, user_api_key_dict: UserAPIKeyAuth, cache: DualCache, data: dict, call_type: str): + self.print_verbose(f"Inside Max Parallel Request Pre-Call Hook") + api_key = user_api_key_dict.api_key + max_parallel_requests = user_api_key_dict.max_parallel_requests + if api_key is None: return if max_parallel_requests is None: return - self.user_api_key_cache = user_api_key_cache # save the api key cache for updating the value + self.user_api_key_cache = cache # save the api key cache for updating the value # CHECK IF REQUEST ALLOWED request_count_api_key = f"{api_key}_request_count" - current = user_api_key_cache.get_cache(key=request_count_api_key) + current = cache.get_cache(key=request_count_api_key) self.print_verbose(f"current: {current}") if current is None: - user_api_key_cache.set_cache(request_count_api_key, 1) + cache.set_cache(request_count_api_key, 1) elif int(current) < max_parallel_requests: # Increase count for this token - user_api_key_cache.set_cache(request_count_api_key, int(current) + 1) + cache.set_cache(request_count_api_key, int(current) + 1) else: raise HTTPException(status_code=429, detail="Max parallel request limit reached.") @@ -55,16 +60,24 @@ class MaxParallelRequestsHandler(CustomLogger): except Exception as e: self.print_verbose(e) # noqa - async def async_log_failure_call(self, api_key, user_api_key_cache): + async def async_log_failure_call(self, user_api_key_dict: UserAPIKeyAuth, original_exception: Exception): try: + self.print_verbose(f"Inside Max Parallel Request Failure Hook") + api_key = user_api_key_dict.api_key if api_key is None: return - request_count_api_key = f"{api_key}_request_count" - # Decrease count for this token - current = self.user_api_key_cache.get_cache(key=request_count_api_key) or 1 - new_val = current - 1 - self.print_verbose(f"updated_value in failure call: {new_val}") - self.user_api_key_cache.set_cache(request_count_api_key, new_val) + ## decrement call count if call failed + if (hasattr(original_exception, "status_code") + and original_exception.status_code == 429 + and "Max parallel request limit reached" in str(original_exception)): + pass # ignore failed calls due to max limit being reached + else: + request_count_api_key = f"{api_key}_request_count" + # Decrease count for this token + current = self.user_api_key_cache.get_cache(key=request_count_api_key) or 1 + new_val = current - 1 + self.print_verbose(f"updated_value in failure call: {new_val}") + self.user_api_key_cache.set_cache(request_count_api_key, new_val) except Exception as e: self.print_verbose(f"An exception occurred - {str(e)}") # noqa \ No newline at end of file diff --git a/litellm/proxy/proxy_cli.py b/litellm/proxy/proxy_cli.py index 9a03bca52..761319d15 100644 --- a/litellm/proxy/proxy_cli.py +++ b/litellm/proxy/proxy_cli.py @@ -3,6 +3,7 @@ import subprocess, traceback, json import os, sys import random, appdirs from datetime import datetime +import importlib from dotenv import load_dotenv import operator sys.path.append(os.getcwd()) @@ -76,13 +77,14 @@ def is_port_in_use(port): @click.option('--config', '-c', default=None, help='Path to the proxy configuration file (e.g. config.yaml). Usage `litellm --config config.yaml`') @click.option('--max_budget', default=None, type=float, help='Set max budget for API calls - works for hosted models like OpenAI, TogetherAI, Anthropic, etc.`') @click.option('--telemetry', default=True, type=bool, help='Helps us know if people are using this feature. Turn this off by doing `--telemetry False`') +@click.option('--version', '-v', default=False, is_flag=True, type=bool, help='Print LiteLLM version') @click.option('--logs', flag_value=False, type=int, help='Gets the "n" most recent logs. By default gets most recent log.') @click.option('--health', flag_value=True, help='Make a chat/completions request to all llms in config.yaml') @click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to') @click.option('--test_async', default=False, is_flag=True, help='Calls async endpoints /queue/requests and /queue/response') @click.option('--num_requests', default=10, type=int, help='Number of requests to hit async endpoint with') @click.option('--local', is_flag=True, default=False, help='for local debugging') -def run_server(host, port, api_base, api_version, model, alias, add_key, headers, save, debug, temperature, max_tokens, request_timeout, drop_params, add_function_to_prompt, config, max_budget, telemetry, logs, test, local, num_workers, test_async, num_requests, use_queue, health): +def run_server(host, port, api_base, api_version, model, alias, add_key, headers, save, debug, temperature, max_tokens, request_timeout, drop_params, add_function_to_prompt, config, max_budget, telemetry, logs, test, local, num_workers, test_async, num_requests, use_queue, health, version): global feature_telemetry args = locals() if local: @@ -113,6 +115,10 @@ def run_server(host, port, api_base, api_version, model, alias, add_key, headers except: raise Exception("LiteLLM: No logs saved!") return + if version == True: + pkg_version = importlib.metadata.version("litellm") + click.echo(f'\nLiteLLM: Current Version = {pkg_version}\n') + return if model and "ollama" in model and api_base is None: run_ollama_serve() if test_async is True: diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 1c096aac7..0180d232e 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -11,8 +11,10 @@ model_list: output_cost_per_token: 0.00003 max_tokens: 4096 base_model: gpt-3.5-turbo - - - model_name: openai-gpt-3.5 + - model_name: BEDROCK_GROUP + litellm_params: + model: bedrock/cohere.command-text-v14 + - model_name: Azure OpenAI GPT-4 Canada-East (External) litellm_params: model: gpt-3.5-turbo api_key: os.environ/OPENAI_API_KEY @@ -41,11 +43,12 @@ model_list: mode: completion litellm_settings: + # cache: True # setting callback class # callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance] - model_group_alias_map: {"gpt-4": "openai-gpt-3.5"} # all requests with gpt-4 model_name, get sent to openai-gpt-3.5 - general_settings: + +environment_variables: # otel: True # OpenTelemetry Logger # master_key: sk-1234 # [OPTIONAL] Only use this if you to require all calls to contain this key (Authorization: Bearer sk-1234) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index a82723051..e942d4d41 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -195,8 +195,10 @@ prisma_client: Optional[PrismaClient] = None user_api_key_cache = DualCache() user_custom_auth = None use_background_health_checks = None +use_queue = False health_check_interval = None health_check_results = {} +queue: List = [] ### INITIALIZE GLOBAL LOGGING OBJECT ### proxy_logging_obj = ProxyLogging(user_api_key_cache=user_api_key_cache) ### REDIS QUEUE ### @@ -252,51 +254,58 @@ async def user_api_key_auth(request: Request, api_key: str = fastapi.Security(ap if api_key is None: # only require api key if master key is set raise Exception(f"No api key passed in.") - route = request.url.path + route: str = request.url.path # note: never string compare api keys, this is vulenerable to a time attack. Use secrets.compare_digest instead is_master_key_valid = secrets.compare_digest(api_key, master_key) if is_master_key_valid: return UserAPIKeyAuth(api_key=master_key) - if (route == "/key/generate" or route == "/key/delete" or route == "/key/info") and not is_master_key_valid: - raise Exception(f"If master key is set, only master key can be used to generate, delete or get info for new keys") + if route.startswith("/key/") and not is_master_key_valid: + raise Exception(f"If master key is set, only master key can be used to generate, delete, update or get info for new keys") - if prisma_client: - ## check for cache hit (In-Memory Cache) - valid_token = user_api_key_cache.get_cache(key=api_key) - print(f"valid_token from cache: {valid_token}") - if valid_token is None: - ## check db - valid_token = await prisma_client.get_data(token=api_key, expires=datetime.utcnow()) - user_api_key_cache.set_cache(key=api_key, value=valid_token, ttl=60) - elif valid_token is not None: - print(f"API Key Cache Hit!") - if valid_token: - litellm.model_alias_map = valid_token.aliases - config = valid_token.config - if config != {}: - model_list = config.get("model_list", []) - llm_model_list = model_list - print("\n new llm router model list", llm_model_list) - if len(valid_token.models) == 0: # assume an empty model list means all models are allowed to be called - api_key = valid_token.token - valid_token_dict = _get_pydantic_json_dict(valid_token) - valid_token_dict.pop("token", None) - return UserAPIKeyAuth(api_key=api_key, **valid_token_dict) - else: - data = await request.json() - model = data.get("model", None) - if model in litellm.model_alias_map: - model = litellm.model_alias_map[model] - if model and model not in valid_token.models: - raise Exception(f"Token not allowed to access model") + if prisma_client is None: # if both master key + user key submitted, and user key != master key, and no db connected, raise an error + raise Exception("No connected db.") + + ## check for cache hit (In-Memory Cache) + valid_token = user_api_key_cache.get_cache(key=api_key) + print(f"valid_token from cache: {valid_token}") + if valid_token is None: + ## check db + print(f"api key: {api_key}") + valid_token = await prisma_client.get_data(token=api_key, expires=datetime.utcnow()) + print(f"valid token from prisma: {valid_token}") + user_api_key_cache.set_cache(key=api_key, value=valid_token, ttl=60) + elif valid_token is not None: + print(f"API Key Cache Hit!") + if valid_token: + litellm.model_alias_map = valid_token.aliases + config = valid_token.config + if config != {}: + model_list = config.get("model_list", []) + llm_model_list = model_list + print("\n new llm router model list", llm_model_list) + if len(valid_token.models) == 0: # assume an empty model list means all models are allowed to be called api_key = valid_token.token valid_token_dict = _get_pydantic_json_dict(valid_token) valid_token_dict.pop("token", None) return UserAPIKeyAuth(api_key=api_key, **valid_token_dict) else: - raise Exception(f"Invalid token") + try: + data = await request.json() + except json.JSONDecodeError: + data = {} # Provide a default value, such as an empty dictionary + model = data.get("model", None) + if model in litellm.model_alias_map: + model = litellm.model_alias_map[model] + if model and model not in valid_token.models: + raise Exception(f"Token not allowed to access model") + api_key = valid_token.token + valid_token_dict = _get_pydantic_json_dict(valid_token) + valid_token_dict.pop("token", None) + return UserAPIKeyAuth(api_key=api_key, **valid_token_dict) + else: + raise Exception(f"Invalid token") except Exception as e: print(f"An exception occurred - {traceback.format_exc()}") if isinstance(e, HTTPException): @@ -310,24 +319,12 @@ async def user_api_key_auth(request: Request, api_key: str = fastapi.Security(ap def prisma_setup(database_url: Optional[str]): global prisma_client, proxy_logging_obj, user_api_key_cache - proxy_logging_obj._init_litellm_callbacks() if database_url is not None: try: prisma_client = PrismaClient(database_url=database_url, proxy_logging_obj=proxy_logging_obj) except Exception as e: print("Error when initializing prisma, Ensure you run pip install prisma", e) -def celery_setup(use_queue: bool): - global celery_fn, celery_app_conn, async_result - if use_queue: - from litellm.proxy.queue.celery_worker import start_worker - from litellm.proxy.queue.celery_app import celery_app, process_job - from celery.result import AsyncResult - start_worker(os.getcwd()) - celery_fn = process_job - async_result = AsyncResult - celery_app_conn = celery_app - def load_from_azure_key_vault(use_azure_key_vault: bool = False): if use_azure_key_vault is False: return @@ -380,30 +377,14 @@ async def track_cost_callback( if "complete_streaming_response" in kwargs: # for tracking streaming cost we pass the "messages" and the output_text to litellm.completion_cost completion_response=kwargs["complete_streaming_response"] - input_text = kwargs["messages"] - output_text = completion_response["choices"][0]["message"]["content"] - response_cost = litellm.completion_cost( - model = kwargs["model"], - messages = input_text, - completion=output_text - ) + response_cost = litellm.completion_cost(completion_response=completion_response) print("streaming response_cost", response_cost) user_api_key = kwargs["litellm_params"]["metadata"].get("user_api_key", None) - print(f"user_api_key - {user_api_key}; prisma_client - {prisma_client}") if user_api_key and prisma_client: await update_prisma_database(token=user_api_key, response_cost=response_cost) elif kwargs["stream"] == False: # for non streaming responses - input_text = kwargs.get("messages", "") - print(f"type of input_text: {type(input_text)}") - if isinstance(input_text, list): - response_cost = litellm.completion_cost(completion_response=completion_response, messages=input_text) - elif isinstance(input_text, str): - response_cost = litellm.completion_cost(completion_response=completion_response, prompt=input_text) - print(f"received completion response: {completion_response}") - - print(f"regular response_cost: {response_cost}") + response_cost = litellm.completion_cost(completion_response=completion_response) user_api_key = kwargs["litellm_params"]["metadata"].get("user_api_key", None) - print(f"user_api_key - {user_api_key}; prisma_client - {prisma_client}") if user_api_key and prisma_client: await update_prisma_database(token=user_api_key, response_cost=response_cost) except Exception as e: @@ -459,7 +440,7 @@ async def _run_background_health_check(): await asyncio.sleep(health_check_interval) def load_router_config(router: Optional[litellm.Router], config_file_path: str): - global master_key, user_config_file_path, otel_logging, user_custom_auth, user_custom_auth_path, use_background_health_checks, health_check_interval + global master_key, user_config_file_path, otel_logging, user_custom_auth, user_custom_auth_path, use_background_health_checks, health_check_interval, use_queue config = {} try: if os.path.exists(config_file_path): @@ -504,6 +485,18 @@ def load_router_config(router: Optional[litellm.Router], config_file_path: str): cache_port = litellm.get_secret("REDIS_PORT", None) cache_password = litellm.get_secret("REDIS_PASSWORD", None) + cache_params = { + "type": cache_type, + "host": cache_host, + "port": cache_port, + "password": cache_password + } + + if "cache_params" in litellm_settings: + cache_params_in_config = litellm_settings["cache_params"] + # overwrie cache_params with cache_params_in_config + cache_params.update(cache_params_in_config) + # Assuming cache_type, cache_host, cache_port, and cache_password are strings print(f"{blue_color_code}Cache Type:{reset_color_code} {cache_type}") print(f"{blue_color_code}Cache Host:{reset_color_code} {cache_host}") @@ -513,15 +506,15 @@ def load_router_config(router: Optional[litellm.Router], config_file_path: str): ## to pass a complete url, or set ssl=True, etc. just set it as `os.environ[REDIS_URL] = `, _redis.py checks for REDIS specific environment variables litellm.cache = Cache( - type=cache_type, - host=cache_host, - port=cache_port, - password=cache_password + **cache_params ) print(f"{blue_color_code}Set Cache on LiteLLM Proxy: {litellm.cache.cache}{reset_color_code} {cache_password}") elif key == "callbacks": litellm.callbacks = [get_instance_fn(value=value, config_file_path=config_file_path)] print_verbose(f"{blue_color_code} Initialized Callbacks - {litellm.callbacks} {reset_color_code}") + elif key == "post_call_rules": + litellm.post_call_rules = [get_instance_fn(value=value, config_file_path=config_file_path)] + print(f"litellm.post_call_rules: {litellm.post_call_rules}") elif key == "success_callback": litellm.success_callback = [] @@ -533,10 +526,6 @@ def load_router_config(router: Optional[litellm.Router], config_file_path: str): # these are litellm callbacks - "langfuse", "sentry", "wandb" else: litellm.success_callback.append(callback) - if callback == "traceloop": - from traceloop.sdk import Traceloop - print_verbose(f"{blue_color_code} Initializing Traceloop SDK - \nRunning:`Traceloop.init(app_name='Litellm-Server', disable_batch=True)`") - Traceloop.init(app_name="Litellm-Server", disable_batch=True) print_verbose(f"{blue_color_code} Initialized Success Callbacks - {litellm.success_callback} {reset_color_code}") elif key == "failure_callback": litellm.failure_callback = [] @@ -550,6 +539,10 @@ def load_router_config(router: Optional[litellm.Router], config_file_path: str): else: litellm.failure_callback.append(callback) print_verbose(f"{blue_color_code} Initialized Success Callbacks - {litellm.failure_callback} {reset_color_code}") + elif key == "cache_params": + # this is set in the cache branch + # see usage here: https://docs.litellm.ai/docs/proxy/caching + pass else: setattr(litellm, key, value) @@ -572,7 +565,6 @@ def load_router_config(router: Optional[litellm.Router], config_file_path: str): cost_tracking() ### START REDIS QUEUE ### use_queue = general_settings.get("use_queue", False) - celery_setup(use_queue=use_queue) ### MASTER KEY ### master_key = general_settings.get("master_key", None) if master_key and master_key.startswith("os.environ/"): @@ -683,6 +675,8 @@ async def generate_key_helper_fn(duration: Optional[str], models: list, aliases: raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR) return {"token": token, "expires": new_verification_token.expires, "user_id": user_id} + + async def delete_verification_token(tokens: List): global prisma_client try: @@ -761,8 +755,6 @@ def initialize( if max_budget: # litellm-specific param litellm.max_budget = max_budget dynamic_config["general"]["max_budget"] = max_budget - if use_queue: - celery_setup(use_queue=use_queue) if experimental: pass user_telemetry = telemetry @@ -798,48 +790,12 @@ def data_generator(response): async def async_data_generator(response, user_api_key_dict): print_verbose("inside generator") async for chunk in response: - # try: - # await proxy_logging_obj.pre_call_hook(user_api_key_dict=user_api_key_dict, data=None, call_type="completion") - # except Exception as e: - # print(f"An exception occurred - {str(e)}") - print_verbose(f"returned chunk: {chunk}") try: yield f"data: {json.dumps(chunk.dict())}\n\n" except: yield f"data: {json.dumps(chunk)}\n\n" -def litellm_completion(*args, **kwargs): - global user_temperature, user_request_timeout, user_max_tokens, user_api_base - call_type = kwargs.pop("call_type") - # override with user settings, these are params passed via cli - if user_temperature: - kwargs["temperature"] = user_temperature - if user_request_timeout: - kwargs["request_timeout"] = user_request_timeout - if user_max_tokens: - kwargs["max_tokens"] = user_max_tokens - if user_api_base: - kwargs["api_base"] = user_api_base - ## ROUTE TO CORRECT ENDPOINT ## - router_model_names = [m["model_name"] for m in llm_model_list] if llm_model_list is not None else [] - try: - if llm_router is not None and kwargs["model"] in router_model_names: # model in router model list - if call_type == "chat_completion": - response = llm_router.completion(*args, **kwargs) - elif call_type == "text_completion": - response = llm_router.text_completion(*args, **kwargs) - else: - if call_type == "chat_completion": - response = litellm.completion(*args, **kwargs) - elif call_type == "text_completion": - response = litellm.text_completion(*args, **kwargs) - except Exception as e: - raise e - if 'stream' in kwargs and kwargs['stream'] == True: # use generate_responses to stream responses - return StreamingResponse(data_generator(response), media_type='text/event-stream') - return response - def get_litellm_model_info(model: dict = {}): model_info = model.get("model_info", {}) model_to_lookup = model.get("litellm_params", {}).get("model", None) @@ -870,6 +826,8 @@ async def startup_event(): initialize(**worker_config) + proxy_logging_obj._init_litellm_callbacks() # INITIALIZE LITELLM CALLBACKS ON SERVER STARTUP <- do this to catch any logging errors on startup, not when calls are being made + if use_background_health_checks: asyncio.create_task(_run_background_health_check()) # start the background health check coroutine. @@ -881,16 +839,6 @@ async def startup_event(): # add master key to db await generate_key_helper_fn(duration=None, models=[], aliases={}, config={}, spend=0, token=master_key) -@router.on_event("shutdown") -async def shutdown_event(): - global prisma_client, master_key, user_custom_auth - if prisma_client: - print("Disconnecting from Prisma") - await prisma_client.disconnect() - - ## RESET CUSTOM VARIABLES ## - master_key = None - user_custom_auth = None #### API ENDPOINTS #### @router.get("/v1/models", dependencies=[Depends(user_api_key_auth)]) @@ -929,7 +877,8 @@ def model_list(): @router.post("/v1/completions", dependencies=[Depends(user_api_key_auth)]) @router.post("/completions", dependencies=[Depends(user_api_key_auth)]) @router.post("/engines/{model:path}/completions", dependencies=[Depends(user_api_key_auth)]) -async def completion(request: Request, model: Optional[str] = None, user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth)): +async def completion(request: Request, model: Optional[str] = None, user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), background_tasks: BackgroundTasks = BackgroundTasks()): + global user_temperature, user_request_timeout, user_max_tokens, user_api_base try: body = await request.body() body_str = body.decode() @@ -938,7 +887,7 @@ async def completion(request: Request, model: Optional[str] = None, user_api_key except: data = json.loads(body_str) - data["user"] = user_api_key_dict.user_id + data["user"] = data.get("user", user_api_key_dict.user_id) data["model"] = ( general_settings.get("completion_model", None) # server default or user_model # model name passed via cli args @@ -947,17 +896,44 @@ async def completion(request: Request, model: Optional[str] = None, user_api_key ) if user_model: data["model"] = user_model - data["call_type"] = "text_completion" if "metadata" in data: data["metadata"]["user_api_key"] = user_api_key_dict.api_key else: data["metadata"] = {"user_api_key": user_api_key_dict.api_key} - return litellm_completion( - **data - ) + # override with user settings, these are params passed via cli + if user_temperature: + data["temperature"] = user_temperature + if user_request_timeout: + data["request_timeout"] = user_request_timeout + if user_max_tokens: + data["max_tokens"] = user_max_tokens + if user_api_base: + data["api_base"] = user_api_base + + ### CALL HOOKS ### - modify incoming data before calling the model + data = await proxy_logging_obj.pre_call_hook(user_api_key_dict=user_api_key_dict, data=data, call_type="completion") + + ### ROUTE THE REQUEST ### + router_model_names = [m["model_name"] for m in llm_model_list] if llm_model_list is not None else [] + if llm_router is not None and data["model"] in router_model_names: # model in router model list + response = await llm_router.atext_completion(**data) + elif llm_router is not None and data["model"] in llm_router.deployment_names: # model in router deployments, calling a specific deployment on the router + response = await llm_router.atext_completion(**data, specific_deployment = True) + elif llm_router is not None and llm_router.model_group_alias is not None and data["model"] in llm_router.model_group_alias: # model set in model_group_alias + response = await llm_router.atext_completion(**data) + else: # router is not set + response = await litellm.atext_completion(**data) + + print(f"final response: {response}") + if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses + return StreamingResponse(async_data_generator(user_api_key_dict=user_api_key_dict, response=response), media_type='text/event-stream') + + background_tasks.add_task(log_input_output, request, response) # background task for logging to OTEL + return response except Exception as e: print(f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`") + traceback.print_exc() error_traceback = traceback.format_exc() error_msg = f"{str(e)}\n\n{error_traceback}" try: @@ -995,7 +971,7 @@ async def chat_completion(request: Request, model: Optional[str] = None, user_ap ) # users can pass in 'user' param to /chat/completions. Don't override it - if data.get("user", None) is None: + if data.get("user", None) is None and user_api_key_dict.user_id is not None: # if users are using user_api_key_auth, set `user` in `data` data["user"] = user_api_key_dict.user_id @@ -1027,7 +1003,7 @@ async def chat_completion(request: Request, model: Optional[str] = None, user_ap response = await llm_router.acompletion(**data) elif llm_router is not None and data["model"] in llm_router.deployment_names: # model in router deployments, calling a specific deployment on the router response = await llm_router.acompletion(**data, specific_deployment = True) - elif llm_router is not None and litellm.model_group_alias_map is not None and data["model"] in litellm.model_group_alias_map: # model set in model_group_alias_map + elif llm_router is not None and llm_router.model_group_alias is not None and data["model"] in llm_router.model_group_alias: # model set in model_group_alias response = await llm_router.acompletion(**data) else: # router is not set response = await litellm.acompletion(**data) @@ -1088,7 +1064,9 @@ async def embeddings(request: Request, user_api_key_dict: UserAPIKeyAuth = Depen "body": copy.copy(data) # use copy instead of deepcopy } - data["user"] = user_api_key_dict.user_id + if data.get("user", None) is None and user_api_key_dict.user_id is not None: + data["user"] = user_api_key_dict.user_id + data["model"] = ( general_settings.get("embedding_model", None) # server default or user_model # model name passed via cli args @@ -1098,10 +1076,11 @@ async def embeddings(request: Request, user_api_key_dict: UserAPIKeyAuth = Depen data["model"] = user_model if "metadata" in data: data["metadata"]["user_api_key"] = user_api_key_dict.api_key + data["metadata"]["headers"] = dict(request.headers) else: data["metadata"] = {"user_api_key": user_api_key_dict.api_key} + data["metadata"]["headers"] = dict(request.headers) router_model_names = [m["model_name"] for m in llm_model_list] if llm_model_list is not None else [] - print(f"received data: {data['input']}") if "input" in data and isinstance(data['input'], list) and isinstance(data['input'][0], list) and isinstance(data['input'][0][0], int): # check if array of tokens passed in # check if non-openai/azure model called - e.g. for langchain integration if llm_model_list is not None and data["model"] in router_model_names: @@ -1119,12 +1098,13 @@ async def embeddings(request: Request, user_api_key_dict: UserAPIKeyAuth = Depen ### CALL HOOKS ### - modify incoming data / reject request before calling the model data = await proxy_logging_obj.pre_call_hook(user_api_key_dict=user_api_key_dict, data=data, call_type="embeddings") - ## ROUTE TO CORRECT ENDPOINT ## if llm_router is not None and data["model"] in router_model_names: # model in router model list response = await llm_router.aembedding(**data) elif llm_router is not None and data["model"] in llm_router.deployment_names: # model in router deployments, calling a specific deployment on the router response = await llm_router.aembedding(**data, specific_deployment = True) + elif llm_router is not None and llm_router.model_group_alias is not None and data["model"] in llm_router.model_group_alias: # model set in model_group_alias + response = await llm_router.aembedding(**data) # ensure this goes the llm_router, router will do the correct alias mapping else: response = await litellm.aembedding(**data) background_tasks.add_task(log_input_output, request, response) # background task for logging to OTEL @@ -1133,7 +1113,19 @@ async def embeddings(request: Request, user_api_key_dict: UserAPIKeyAuth = Depen except Exception as e: await proxy_logging_obj.post_call_failure_hook(user_api_key_dict=user_api_key_dict, original_exception=e) traceback.print_exc() - raise e + if isinstance(e, HTTPException): + raise e + else: + error_traceback = traceback.format_exc() + error_msg = f"{str(e)}\n\n{error_traceback}" + try: + status = e.status_code # type: ignore + except: + status = 500 + raise HTTPException( + status_code=status, + detail=error_msg + ) #### KEY MANAGEMENT #### @@ -1162,6 +1154,30 @@ async def generate_key_fn(request: Request, data: GenerateKeyRequest, Authorizat response = await generate_key_helper_fn(**data_json) return GenerateKeyResponse(key=response["token"], expires=response["expires"], user_id=response["user_id"]) +@router.post("/key/update", tags=["key management"], dependencies=[Depends(user_api_key_auth)]) +async def update_key_fn(request: Request, data: UpdateKeyRequest): + """ + Update an existing key + """ + global prisma_client + try: + data_json: dict = data.json() + key = data_json.pop("key") + # get the row from db + if prisma_client is None: + raise Exception("Not connected to DB!") + + non_default_values = {k: v for k, v in data_json.items() if v is not None} + print(f"non_default_values: {non_default_values}") + response = await prisma_client.update_data(token=key, data={**non_default_values, "token": key}) + return {"key": key, **non_default_values} + # update based on remaining passed in values + except Exception as e: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail={"error": str(e)}, + ) + @router.post("/key/delete", tags=["key management"], dependencies=[Depends(user_api_key_auth)]) async def delete_key_fn(request: Request, data: DeleteKeyRequest): try: @@ -1207,10 +1223,12 @@ async def add_new_model(model_params: ModelParams): print_verbose(f"Loaded config: {config}") # Add the new model to the config + model_info = model_params.model_info.json() + model_info = {k: v for k, v in model_info.items() if v is not None} config['model_list'].append({ 'model_name': model_params.model_name, 'litellm_params': model_params.litellm_params, - 'model_info': model_params.model_info + 'model_info': model_info }) # Save the updated config @@ -1228,7 +1246,7 @@ async def add_new_model(model_params: ModelParams): raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}") #### [BETA] - This is a beta endpoint, format might change based on user feedback https://github.com/BerriAI/litellm/issues/933. If you need a stable endpoint use /model/info -@router.get("/v1/model/info", description="Provides more info about each model in /models, including config.yaml descriptions (except api key and api base)", tags=["model management"], dependencies=[Depends(user_api_key_auth)]) +@router.get("/model/info", description="Provides more info about each model in /models, including config.yaml descriptions (except api key and api base)", tags=["model management"], dependencies=[Depends(user_api_key_auth)]) async def model_info_v1(request: Request): global llm_model_list, general_settings, user_config_file_path # Load existing config @@ -1256,7 +1274,7 @@ async def model_info_v1(request: Request): #### [BETA] - This is a beta endpoint, format might change based on user feedback. - https://github.com/BerriAI/litellm/issues/933 -@router.get("/model/info", description="Provides more info about each model in /models, including config.yaml descriptions (except api key and api base)", tags=["model management"], dependencies=[Depends(user_api_key_auth)]) +@router.get("/v1/model/info", description="Provides more info about each model in /models, including config.yaml descriptions (except api key and api base)", tags=["model management"], dependencies=[Depends(user_api_key_auth)]) async def model_info(request: Request): global llm_model_list, general_settings, user_config_file_path # Load existing config @@ -1341,47 +1359,108 @@ async def delete_model(model_info: ModelInfoDelete): raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}") #### EXPERIMENTAL QUEUING #### -@router.post("/queue/request", dependencies=[Depends(user_api_key_auth)]) -async def async_queue_request(request: Request): - global celery_fn, llm_model_list - if celery_fn is not None: - body = await request.body() - body_str = body.decode() - try: - data = ast.literal_eval(body_str) - except: - data = json.loads(body_str) +async def _litellm_chat_completions_worker(data, user_api_key_dict): + """ + worker to make litellm completions calls + """ + while True: + try: + ### CALL HOOKS ### - modify incoming data before calling the model + data = await proxy_logging_obj.pre_call_hook(user_api_key_dict=user_api_key_dict, data=data, call_type="completion") + + print(f"_litellm_chat_completions_worker started") + ### ROUTE THE REQUEST ### + router_model_names = [m["model_name"] for m in llm_model_list] if llm_model_list is not None else [] + if llm_router is not None and data["model"] in router_model_names: # model in router model list + response = await llm_router.acompletion(**data) + elif llm_router is not None and data["model"] in llm_router.deployment_names: # model in router deployments, calling a specific deployment on the router + response = await llm_router.acompletion(**data, specific_deployment = True) + elif llm_router is not None and llm_router.model_group_alias is not None and data["model"] in llm_router.model_group_alias: # model set in model_group_alias + response = await llm_router.acompletion(**data) + else: # router is not set + response = await litellm.acompletion(**data) + + print(f"final response: {response}") + return response + except HTTPException as e: + print(f"EXCEPTION RAISED IN _litellm_chat_completions_worker - {e.status_code}; {e.detail}") + if e.status_code == 429 and "Max parallel request limit reached" in e.detail: + print(f"Max parallel request limit reached!") + timeout = litellm._calculate_retry_after(remaining_retries=3, max_retries=3, min_timeout=1) + await asyncio.sleep(timeout) + else: + raise e + + +@router.post("/queue/chat/completions", tags=["experimental"], dependencies=[Depends(user_api_key_auth)]) +async def async_queue_request(request: Request, model: Optional[str] = None, user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), background_tasks: BackgroundTasks = BackgroundTasks()): + global general_settings, user_debug, proxy_logging_obj + """ + v2 attempt at a background worker to handle queuing. + + Just supports /chat/completion calls currently. + + Now using a FastAPI background task + /chat/completions compatible endpoint + """ + try: + data = {} + data = await request.json() # type: ignore + + # Include original request and headers in the data + data["proxy_server_request"] = { + "url": str(request.url), + "method": request.method, + "headers": dict(request.headers), + "body": copy.copy(data) # use copy instead of deepcopy + } + + print_verbose(f"receiving data: {data}") data["model"] = ( general_settings.get("completion_model", None) # server default or user_model # model name passed via cli args + or model # for azure deployments or data["model"] # default passed in http request ) - data["llm_model_list"] = llm_model_list - print(f"data: {data}") - job = celery_fn.apply_async(kwargs=data) - return {"id": job.id, "url": f"/queue/response/{job.id}", "eta": 5, "status": "queued"} - else: + + # users can pass in 'user' param to /chat/completions. Don't override it + if data.get("user", None) is None and user_api_key_dict.user_id is not None: + # if users are using user_api_key_auth, set `user` in `data` + data["user"] = user_api_key_dict.user_id + + if "metadata" in data: + print(f'received metadata: {data["metadata"]}') + data["metadata"]["user_api_key"] = user_api_key_dict.api_key + data["metadata"]["headers"] = dict(request.headers) + else: + data["metadata"] = {"user_api_key": user_api_key_dict.api_key} + data["metadata"]["headers"] = dict(request.headers) + + global user_temperature, user_request_timeout, user_max_tokens, user_api_base + # override with user settings, these are params passed via cli + if user_temperature: + data["temperature"] = user_temperature + if user_request_timeout: + data["request_timeout"] = user_request_timeout + if user_max_tokens: + data["max_tokens"] = user_max_tokens + if user_api_base: + data["api_base"] = user_api_base + + response = await asyncio.wait_for(_litellm_chat_completions_worker(data=data, user_api_key_dict=user_api_key_dict), timeout=litellm.request_timeout) + + if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses + return StreamingResponse(async_data_generator(user_api_key_dict=user_api_key_dict, response=response), media_type='text/event-stream') + + background_tasks.add_task(log_input_output, request, response) # background task for logging to OTEL + return response + except Exception as e: + await proxy_logging_obj.post_call_failure_hook(user_api_key_dict=user_api_key_dict, original_exception=e) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, - detail={"error": "Queue not initialized"}, + detail={"error": str(e)}, ) - -@router.get("/queue/response/{task_id}", dependencies=[Depends(user_api_key_auth)]) -async def async_queue_response(request: Request, task_id: str): - global celery_app_conn, async_result - try: - if celery_app_conn is not None and async_result is not None: - job = async_result(task_id, app=celery_app_conn) - if job.ready(): - return {"status": "finished", "result": job.result} - else: - return {'status': 'queued'} - else: - raise Exception() - except Exception as e: - return {"status": "finished", "result": str(e)} - - + + @router.get("/ollama_logs", dependencies=[Depends(user_api_key_auth)]) async def retrieve_server_log(request: Request): filepath = os.path.expanduser("~/.ollama/logs/server.log") @@ -1411,8 +1490,18 @@ async def config_yaml_endpoint(config_info: ConfigYAML): return {"hello": "world"} -@router.get("/test") +@router.get("/test", tags=["health"]) async def test_endpoint(request: Request): + """ + A test endpoint that pings the proxy server to check if it's healthy. + + Parameters: + request (Request): The incoming request. + + Returns: + dict: A dictionary containing the route of the request URL. + """ + # ping the proxy server to check if its healthy return {"route": request.url.path} @router.get("/health", tags=["health"], dependencies=[Depends(user_api_key_auth)]) @@ -1470,4 +1559,27 @@ async def get_routes(): return {"routes": routes} +@router.on_event("shutdown") +async def shutdown_event(): + global prisma_client, master_key, user_custom_auth + if prisma_client: + print("Disconnecting from Prisma") + await prisma_client.disconnect() + + ## RESET CUSTOM VARIABLES ## + cleanup_router_config_variables() + +def cleanup_router_config_variables(): + global master_key, user_config_file_path, otel_logging, user_custom_auth, user_custom_auth_path, use_background_health_checks, health_check_interval + + # Set all variables to None + master_key = None + user_config_file_path = None + otel_logging = None + user_custom_auth = None + user_custom_auth_path = None + use_background_health_checks = None + health_check_interval = None + + app.include_router(router) diff --git a/litellm/proxy/tests/test_proxy_exception_mapping.py b/litellm/proxy/tests/test_proxy_exception_mapping.py deleted file mode 100644 index 7fb5bedbe..000000000 --- a/litellm/proxy/tests/test_proxy_exception_mapping.py +++ /dev/null @@ -1,23 +0,0 @@ -import openai -client = openai.OpenAI( - api_key="anything", - # base_url="http://0.0.0.0:8000", -) - -try: - # request sent to model set on litellm proxy, `litellm --model` - response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [ - { - "role": "user", - "content": "this is a test request, write a short poem" - }, - ]) - - print(response) -# except openai.APITimeoutError: -# print("Got openai Timeout Exception. Good job. The proxy mapped to OpenAI exceptions") -except Exception as e: - print("\n the proxy did not map to OpenAI exception. Instead got", e) - print(e.type) # type: ignore - print(e.message) # type: ignore - print(e.code) # type: ignore \ No newline at end of file diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index e972eff4d..938508bcc 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -1,13 +1,13 @@ from typing import Optional, List, Any, Literal -import os, subprocess, hashlib, importlib, asyncio +import os, subprocess, hashlib, importlib, asyncio, copy import litellm, backoff from litellm.proxy._types import UserAPIKeyAuth from litellm.caching import DualCache from litellm.proxy.hooks.parallel_request_limiter import MaxParallelRequestsHandler - +from litellm.integrations.custom_logger import CustomLogger def print_verbose(print_statement): if litellm.set_verbose: - print(print_statement) # noqa + print(f"LiteLLM Proxy: {print_statement}") # noqa ### LOGGING ### class ProxyLogging: """ @@ -26,7 +26,7 @@ class ProxyLogging: pass def _init_litellm_callbacks(self): - + print_verbose(f"INITIALIZING LITELLM CALLBACKS!") litellm.callbacks.append(self.max_parallel_request_limiter) for callback in litellm.callbacks: if callback not in litellm.input_callback: @@ -64,18 +64,14 @@ class ProxyLogging: 1. /chat/completions 2. /embeddings """ - try: - self.call_details["data"] = data - self.call_details["call_type"] = call_type + try: + for callback in litellm.callbacks: + if isinstance(callback, CustomLogger) and 'async_pre_call_hook' in vars(callback.__class__): + response = await callback.async_pre_call_hook(user_api_key_dict=user_api_key_dict, cache=self.call_details["user_api_key_cache"], data=data, call_type=call_type) + if response is not None: + data = response - ## check if max parallel requests set - if user_api_key_dict.max_parallel_requests is not None: - ## if set, check if request allowed - await self.max_parallel_request_limiter.max_parallel_request_allow_request( - max_parallel_requests=user_api_key_dict.max_parallel_requests, - api_key=user_api_key_dict.api_key, - user_api_key_cache=self.call_details["user_api_key_cache"]) - + print_verbose(f'final data being sent to {call_type} call: {data}') return data except Exception as e: raise e @@ -103,17 +99,13 @@ class ProxyLogging: 1. /chat/completions 2. /embeddings """ - # check if max parallel requests set - if user_api_key_dict.max_parallel_requests is not None: - ## decrement call count if call failed - if (hasattr(original_exception, "status_code") - and original_exception.status_code == 429 - and "Max parallel request limit reached" in str(original_exception)): - pass # ignore failed calls due to max limit being reached - else: - await self.max_parallel_request_limiter.async_log_failure_call( - api_key=user_api_key_dict.api_key, - user_api_key_cache=self.call_details["user_api_key_cache"]) + + for callback in litellm.callbacks: + try: + if isinstance(callback, CustomLogger): + await callback.async_post_call_failure_hook(user_api_key_dict=user_api_key_dict, original_exception=original_exception) + except Exception as e: + raise e return @@ -165,19 +157,20 @@ class PrismaClient: async def get_data(self, token: str, expires: Optional[Any]=None): try: # check if plain text or hash + hashed_token = token if token.startswith("sk-"): - token = self.hash_token(token=token) + hashed_token = self.hash_token(token=token) if expires: response = await self.db.litellm_verificationtoken.find_first( where={ - "token": token, + "token": hashed_token, "expires": {"gte": expires} # Check if the token is not expired } ) else: response = await self.db.litellm_verificationtoken.find_unique( where={ - "token": token + "token": hashed_token } ) return response @@ -200,18 +193,18 @@ class PrismaClient: try: token = data["token"] hashed_token = self.hash_token(token=token) - data["token"] = hashed_token + db_data = copy.deepcopy(data) + db_data["token"] = hashed_token new_verification_token = await self.db.litellm_verificationtoken.upsert( # type: ignore where={ 'token': hashed_token, }, data={ - "create": {**data}, #type: ignore + "create": {**db_data}, #type: ignore "update": {} # don't do anything if it already exists } ) - return new_verification_token except Exception as e: asyncio.create_task(self.proxy_logging_obj.failure_handler(original_exception=e)) @@ -235,15 +228,16 @@ class PrismaClient: if token.startswith("sk-"): token = self.hash_token(token=token) - data["token"] = token + db_data = copy.deepcopy(data) + db_data["token"] = token response = await self.db.litellm_verificationtoken.update( where={ "token": token }, - data={**data} # type: ignore + data={**db_data} # type: ignore ) print_verbose("\033[91m" + f"DB write succeeded {response}" + "\033[0m") - return {"token": token, "data": data} + return {"token": token, "data": db_data} except Exception as e: asyncio.create_task(self.proxy_logging_obj.failure_handler(original_exception=e)) print_verbose("\033[91m" + f"DB write failed: {e}" + "\033[0m") diff --git a/litellm/router.py b/litellm/router.py index 1721a381b..1b8dc1172 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -7,6 +7,7 @@ # # Thank you ! We ❤️ you! - Krrish & Ishaan +import copy from datetime import datetime from typing import Dict, List, Optional, Union, Literal, Any import random, threading, time, traceback, uuid @@ -17,6 +18,7 @@ import inspect, concurrent from openai import AsyncOpenAI from collections import defaultdict from litellm.router_strategy.least_busy import LeastBusyLoggingHandler +import copy class Router: """ Example usage: @@ -68,6 +70,7 @@ class Router: redis_password: Optional[str] = None, cache_responses: Optional[bool] = False, cache_kwargs: dict = {}, # additional kwargs to pass to RedisCache (see caching.py) + caching_groups: Optional[List[tuple]] = None, # if you want to cache across model groups ## RELIABILITY ## num_retries: int = 0, timeout: Optional[float] = None, @@ -76,11 +79,13 @@ class Router: fallbacks: List = [], allowed_fails: Optional[int] = None, context_window_fallbacks: List = [], + model_group_alias: Optional[dict] = {}, routing_strategy: Literal["simple-shuffle", "least-busy", "usage-based-routing", "latency-based-routing"] = "simple-shuffle") -> None: self.set_verbose = set_verbose self.deployment_names: List = [] # names of models under litellm_params. ex. azure/chatgpt-v-2 if model_list: + model_list = copy.deepcopy(model_list) self.set_model_list(model_list) self.healthy_deployments: List = self.model_list self.deployment_latency_map = {} @@ -99,6 +104,7 @@ class Router: self.fail_calls: defaultdict = defaultdict(int) # dict to store fail_calls made to each model self.success_calls: defaultdict = defaultdict(int) # dict to store success_calls made to each model self.previous_models: List = [] # list to store failed calls (passed in as metadata to next call) + self.model_group_alias: dict = model_group_alias or {} # dict to store aliases for router, ex. {"gpt-4": "gpt-3.5-turbo"}, all requests with gpt-4 -> get routed to gpt-3.5-turbo group # make Router.chat.completions.create compatible for openai.chat.completions.create self.chat = litellm.Chat(params=default_litellm_params) @@ -107,9 +113,10 @@ class Router: self.default_litellm_params = default_litellm_params self.default_litellm_params.setdefault("timeout", timeout) self.default_litellm_params.setdefault("max_retries", 0) + self.default_litellm_params.setdefault("metadata", {}).update({"caching_groups": caching_groups}) ### CACHING ### - cache_type = "local" # default to an in-memory cache + cache_type: Literal["local", "redis"] = "local" # default to an in-memory cache redis_cache = None cache_config = {} if redis_url is not None or (redis_host is not None and redis_port is not None and redis_password is not None): @@ -133,7 +140,7 @@ class Router: if cache_responses: if litellm.cache is None: # the cache can be initialized on the proxy server. We should not overwrite it - litellm.cache = litellm.Cache(type=cache_type, **cache_config) + litellm.cache = litellm.Cache(type=cache_type, **cache_config) # type: ignore self.cache_responses = cache_responses self.cache = DualCache(redis_cache=redis_cache, in_memory_cache=InMemoryCache()) # use a dual cache (Redis+In-Memory) for tracking cooldowns, usage, etc. ### ROUTING SETUP ### @@ -198,19 +205,10 @@ class Router: data = deployment["litellm_params"].copy() kwargs["model_info"] = deployment.get("model_info", {}) for k, v in self.default_litellm_params.items(): - if k not in data: # prioritize model-specific params > default router params - data[k] = v - - ########## remove -ModelID-XXXX from model ############## - original_model_string = data["model"] - # Find the index of "ModelID" in the string - self.print_verbose(f"completion model: {original_model_string}") - index_of_model_id = original_model_string.find("-ModelID") - # Remove everything after "-ModelID" if it exists - if index_of_model_id != -1: - data["model"] = original_model_string[:index_of_model_id] - else: - data["model"] = original_model_string + if k not in kwargs: # prioritize model-specific params > default router params + kwargs[k] = v + elif k == "metadata": + kwargs[k].update(v) model_client = self._get_client(deployment=deployment, kwargs=kwargs) return litellm.completion(**{**data, "messages": messages, "caching": self.cache_responses, "client": model_client, **kwargs}) except Exception as e: @@ -241,31 +239,25 @@ class Router: **kwargs): try: self.print_verbose(f"Inside _acompletion()- model: {model}; kwargs: {kwargs}") - original_model_string = None # set a default for this variable deployment = self.get_available_deployment(model=model, messages=messages, specific_deployment=kwargs.pop("specific_deployment", None)) kwargs.setdefault("metadata", {}).update({"deployment": deployment["litellm_params"]["model"]}) kwargs["model_info"] = deployment.get("model_info", {}) data = deployment["litellm_params"].copy() + model_name = data["model"] for k, v in self.default_litellm_params.items(): - if k not in data: # prioritize model-specific params > default router params - data[k] = v - ########## remove -ModelID-XXXX from model ############## - original_model_string = data["model"] - # Find the index of "ModelID" in the string - index_of_model_id = original_model_string.find("-ModelID") - # Remove everything after "-ModelID" if it exists - if index_of_model_id != -1: - data["model"] = original_model_string[:index_of_model_id] - else: - data["model"] = original_model_string + if k not in kwargs: # prioritize model-specific params > default router params + kwargs[k] = v + elif k == "metadata": + kwargs[k].update(v) + model_client = self._get_client(deployment=deployment, kwargs=kwargs, client_type="async") - self.total_calls[original_model_string] +=1 + self.total_calls[model_name] +=1 response = await litellm.acompletion(**{**data, "messages": messages, "caching": self.cache_responses, "client": model_client, **kwargs}) - self.success_calls[original_model_string] +=1 + self.success_calls[model_name] +=1 return response except Exception as e: - if original_model_string is not None: - self.fail_calls[original_model_string] +=1 + if model_name is not None: + self.fail_calls[model_name] +=1 raise e def text_completion(self, @@ -283,8 +275,43 @@ class Router: data = deployment["litellm_params"].copy() for k, v in self.default_litellm_params.items(): - if k not in data: # prioritize model-specific params > default router params - data[k] = v + if k not in kwargs: # prioritize model-specific params > default router params + kwargs[k] = v + elif k == "metadata": + kwargs[k].update(v) + + # call via litellm.completion() + return litellm.text_completion(**{**data, "prompt": prompt, "caching": self.cache_responses, **kwargs}) # type: ignore + except Exception as e: + if self.num_retries > 0: + kwargs["model"] = model + kwargs["messages"] = messages + kwargs["original_exception"] = e + kwargs["original_function"] = self.completion + return self.function_with_retries(**kwargs) + else: + raise e + + async def atext_completion(self, + model: str, + prompt: str, + is_retry: Optional[bool] = False, + is_fallback: Optional[bool] = False, + is_async: Optional[bool] = False, + **kwargs): + try: + kwargs.setdefault("metadata", {}).update({"model_group": model}) + messages=[{"role": "user", "content": prompt}] + # pick the one that is available (lowest TPM/RPM) + deployment = self.get_available_deployment(model=model, messages=messages, specific_deployment=kwargs.pop("specific_deployment", None)) + + data = deployment["litellm_params"].copy() + for k, v in self.default_litellm_params.items(): + if k not in kwargs: # prioritize model-specific params > default router params + kwargs[k] = v + elif k == "metadata": + kwargs[k].update(v) + ########## remove -ModelID-XXXX from model ############## original_model_string = data["model"] # Find the index of "ModelID" in the string @@ -294,8 +321,9 @@ class Router: data["model"] = original_model_string[:index_of_model_id] else: data["model"] = original_model_string - # call via litellm.completion() - return litellm.text_completion(**{**data, "prompt": prompt, "caching": self.cache_responses, **kwargs}) # type: ignore + # call via litellm.atext_completion() + response = await litellm.atext_completion(**{**data, "prompt": prompt, "caching": self.cache_responses, **kwargs}) # type: ignore + return response except Exception as e: if self.num_retries > 0: kwargs["model"] = model @@ -313,21 +341,14 @@ class Router: **kwargs) -> Union[List[float], None]: # pick the one that is available (lowest TPM/RPM) deployment = self.get_available_deployment(model=model, input=input, specific_deployment=kwargs.pop("specific_deployment", None)) - kwargs.setdefault("metadata", {}).update({"deployment": deployment["litellm_params"]["model"]}) - kwargs["model_info"] = deployment.get("model_info", {}) + kwargs.setdefault("model_info", {}) + kwargs.setdefault("metadata", {}).update({"model_group": model, "deployment": deployment["litellm_params"]["model"]}) # [TODO]: move to using async_function_with_fallbacks data = deployment["litellm_params"].copy() for k, v in self.default_litellm_params.items(): - if k not in data: # prioritize model-specific params > default router params - data[k] = v - ########## remove -ModelID-XXXX from model ############## - original_model_string = data["model"] - # Find the index of "ModelID" in the string - index_of_model_id = original_model_string.find("-ModelID") - # Remove everything after "-ModelID" if it exists - if index_of_model_id != -1: - data["model"] = original_model_string[:index_of_model_id] - else: - data["model"] = original_model_string + if k not in kwargs: # prioritize model-specific params > default router params + kwargs[k] = v + elif k == "metadata": + kwargs[k].update(v) model_client = self._get_client(deployment=deployment, kwargs=kwargs) # call via litellm.embedding() return litellm.embedding(**{**data, "input": input, "caching": self.cache_responses, "client": model_client, **kwargs}) @@ -339,21 +360,15 @@ class Router: **kwargs) -> Union[List[float], None]: # pick the one that is available (lowest TPM/RPM) deployment = self.get_available_deployment(model=model, input=input, specific_deployment=kwargs.pop("specific_deployment", None)) - kwargs.setdefault("metadata", {}).update({"deployment": deployment["litellm_params"]["model"]}) + kwargs.setdefault("metadata", {}).update({"model_group": model, "deployment": deployment["litellm_params"]["model"]}) data = deployment["litellm_params"].copy() kwargs["model_info"] = deployment.get("model_info", {}) for k, v in self.default_litellm_params.items(): - if k not in data: # prioritize model-specific params > default router params - data[k] = v - ########## remove -ModelID-XXXX from model ############## - original_model_string = data["model"] - # Find the index of "ModelID" in the string - index_of_model_id = original_model_string.find("-ModelID") - # Remove everything after "-ModelID" if it exists - if index_of_model_id != -1: - data["model"] = original_model_string[:index_of_model_id] - else: - data["model"] = original_model_string + if k not in kwargs: # prioritize model-specific params > default router params + kwargs[k] = v + elif k == "metadata": + kwargs[k].update(v) + model_client = self._get_client(deployment=deployment, kwargs=kwargs, client_type="async") return await litellm.aembedding(**{**data, "input": input, "caching": self.cache_responses, "client": model_client, **kwargs}) @@ -371,7 +386,7 @@ class Router: self.print_verbose(f'Async Response: {response}') return response except Exception as e: - self.print_verbose(f"An exception occurs: {e}") + self.print_verbose(f"An exception occurs: {e}\n\n Traceback{traceback.format_exc()}") original_exception = e try: self.print_verbose(f"Trying to fallback b/w models") @@ -637,9 +652,10 @@ class Router: model_name = kwargs.get('model', None) # i.e. gpt35turbo custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure metadata = kwargs.get("litellm_params", {}).get('metadata', None) + deployment_id = kwargs.get("litellm_params", {}).get("model_info").get("id") + self._set_cooldown_deployments(deployment_id) # setting deployment_id in cooldown deployments if metadata: deployment = metadata.get("deployment", None) - self._set_cooldown_deployments(deployment) deployment_exceptions = self.model_exception_map.get(deployment, []) deployment_exceptions.append(exception_str) self.model_exception_map[deployment] = deployment_exceptions @@ -877,7 +893,7 @@ class Router: return chosen_item def set_model_list(self, model_list: list): - self.model_list = model_list + self.model_list = copy.deepcopy(model_list) # we add api_base/api_key each model so load balancing between azure/gpt on api_base1 and api_base2 works import os for model in self.model_list: @@ -889,23 +905,26 @@ class Router: model["model_info"] = model_info #### for OpenAI / Azure we need to initalize the Client for High Traffic ######## custom_llm_provider = litellm_params.get("custom_llm_provider") - if custom_llm_provider is None: - custom_llm_provider = model_name.split("/",1)[0] + custom_llm_provider = custom_llm_provider or model_name.split("/",1)[0] or "" + default_api_base = None + default_api_key = None + if custom_llm_provider in litellm.openai_compatible_providers: + _, custom_llm_provider, api_key, api_base = litellm.get_llm_provider(model=model_name) + default_api_base = api_base + default_api_key = api_key if ( model_name in litellm.open_ai_chat_completion_models - or custom_llm_provider == "custom_openai" - or custom_llm_provider == "deepinfra" - or custom_llm_provider == "perplexity" - or custom_llm_provider == "anyscale" - or custom_llm_provider == "openai" + or custom_llm_provider in litellm.openai_compatible_providers or custom_llm_provider == "azure" + or custom_llm_provider == "custom_openai" + or custom_llm_provider == "openai" or "ft:gpt-3.5-turbo" in model_name or model_name in litellm.open_ai_embedding_models ): # glorified / complicated reading of configs # user can pass vars directly or they can pas os.environ/AZURE_API_KEY, in which case we will read the env # we do this here because we init clients for Azure, OpenAI and we need to set the right key - api_key = litellm_params.get("api_key") + api_key = litellm_params.get("api_key") or default_api_key if api_key and api_key.startswith("os.environ/"): api_key_env_name = api_key.replace("os.environ/", "") api_key = litellm.get_secret(api_key_env_name) @@ -913,7 +932,7 @@ class Router: api_base = litellm_params.get("api_base") base_url = litellm_params.get("base_url") - api_base = api_base or base_url # allow users to pass in `api_base` or `base_url` for azure + api_base = api_base or base_url or default_api_base # allow users to pass in `api_base` or `base_url` for azure if api_base and api_base.startswith("os.environ/"): api_base_env_name = api_base.replace("os.environ/", "") api_base = litellm.get_secret(api_base_env_name) @@ -1049,12 +1068,6 @@ class Router: ############ End of initializing Clients for OpenAI/Azure ################### self.deployment_names.append(model["litellm_params"]["model"]) - model_id = "" - for key in model["litellm_params"]: - if key != "api_key" and key != "metadata": - model_id+= str(model["litellm_params"][key]) - model["litellm_params"]["model"] += "-ModelID-" + model_id - self.print_verbose(f"\n Initialized Model List {self.model_list}") ############ Users can either pass tpm/rpm as a litellm_param or a router param ########### @@ -1115,38 +1128,41 @@ class Router: if specific_deployment == True: # users can also specify a specific deployment name. At this point we should check if they are just trying to call a specific deployment for deployment in self.model_list: - cleaned_model = litellm.utils.remove_model_id(deployment.get("litellm_params").get("model")) - if cleaned_model == model: + deployment_model = deployment.get("litellm_params").get("model") + if deployment_model == model: # User Passed a specific deployment name on their config.yaml, example azure/chat-gpt-v-2 # return the first deployment where the `model` matches the specificed deployment name return deployment raise ValueError(f"LiteLLM Router: Trying to call specific deployment, but Model:{model} does not exist in Model List: {self.model_list}") # check if aliases set on litellm model alias map - if model in litellm.model_group_alias_map: - self.print_verbose(f"Using a model alias. Got Request for {model}, sending requests to {litellm.model_group_alias_map.get(model)}") - model = litellm.model_group_alias_map[model] + if model in self.model_group_alias: + self.print_verbose(f"Using a model alias. Got Request for {model}, sending requests to {self.model_group_alias.get(model)}") + model = self.model_group_alias[model] ## get healthy deployments ### get all deployments - ### filter out the deployments currently cooling down healthy_deployments = [m for m in self.model_list if m["model_name"] == model] if len(healthy_deployments) == 0: # check if the user sent in a deployment name instead healthy_deployments = [m for m in self.model_list if m["litellm_params"]["model"] == model] self.print_verbose(f"initial list of deployments: {healthy_deployments}") + + # filter out the deployments currently cooling down deployments_to_remove = [] - cooldown_deployments = self._get_cooldown_deployments() + # cooldown_deployments is a list of model_id's cooling down, cooldown_deployments = ["16700539-b3cd-42f4-b426-6a12a1bb706a", "16700539-b3cd-42f4-b426-7899"] + cooldown_deployments = self._get_cooldown_deployments() self.print_verbose(f"cooldown deployments: {cooldown_deployments}") - ### FIND UNHEALTHY DEPLOYMENTS + # Find deployments in model_list whose model_id is cooling down for deployment in healthy_deployments: - deployment_name = deployment["litellm_params"]["model"] - if deployment_name in cooldown_deployments: + deployment_id = deployment["model_info"]["id"] + if deployment_id in cooldown_deployments: deployments_to_remove.append(deployment) - ### FILTER OUT UNHEALTHY DEPLOYMENTS + # remove unhealthy deployments from healthy deployments for deployment in deployments_to_remove: healthy_deployments.remove(deployment) + self.print_verbose(f"healthy deployments: length {len(healthy_deployments)} {healthy_deployments}") if len(healthy_deployments) == 0: raise ValueError("No models available") @@ -1222,11 +1238,14 @@ class Router: raise ValueError("No models available.") def flush_cache(self): + litellm.cache = None self.cache.flush_cache() def reset(self): ## clean up on close litellm.success_callback = [] + litellm.__async_success_callback = [] litellm.failure_callback = [] + litellm._async_failure_callback = [] self.flush_cache() \ No newline at end of file diff --git a/litellm/tests/conftest.py b/litellm/tests/conftest.py new file mode 100644 index 000000000..411da8023 --- /dev/null +++ b/litellm/tests/conftest.py @@ -0,0 +1,34 @@ +# conftest.py + +import pytest, sys, os +import importlib +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import litellm + + +@pytest.fixture(scope="function", autouse=True) +def setup_and_teardown(): + """ + This fixture reloads litellm before every function. To speed up testing by removing callbacks being chained. + """ + curr_dir = os.getcwd() # Get the current working directory + sys.path.insert(0, os.path.abspath("../..")) # Adds the project directory to the system path + import litellm + importlib.reload(litellm) + print(litellm) + # from litellm import Router, completion, aembedding, acompletion, embedding + yield + +def pytest_collection_modifyitems(config, items): + # Separate tests in 'test_amazing_proxy_custom_logger.py' and other tests + custom_logger_tests = [item for item in items if 'custom_logger' in item.parent.name] + other_tests = [item for item in items if 'custom_logger' not in item.parent.name] + + # Sort tests based on their names + custom_logger_tests.sort(key=lambda x: x.name) + other_tests.sort(key=lambda x: x.name) + + # Reorder the items list + items[:] = custom_logger_tests + other_tests \ No newline at end of file diff --git a/litellm/tests/example_config_yaml/cache_no_params.yaml b/litellm/tests/example_config_yaml/cache_no_params.yaml new file mode 100644 index 000000000..20ed919dd --- /dev/null +++ b/litellm/tests/example_config_yaml/cache_no_params.yaml @@ -0,0 +1,7 @@ +model_list: + - model_name: "openai-model" + litellm_params: + model: "gpt-3.5-turbo" + +litellm_settings: + cache: True diff --git a/litellm/tests/example_config_yaml/cache_with_params.yaml b/litellm/tests/example_config_yaml/cache_with_params.yaml new file mode 100644 index 000000000..372151d0c --- /dev/null +++ b/litellm/tests/example_config_yaml/cache_with_params.yaml @@ -0,0 +1,10 @@ +model_list: + - model_name: "openai-model" + litellm_params: + model: "gpt-3.5-turbo" + +litellm_settings: + cache: True + cache_params: + supported_call_types: ["embedding", "aembedding"] + host: "localhost" \ No newline at end of file diff --git a/litellm/tests/langfuse.log b/litellm/tests/langfuse.log new file mode 100644 index 000000000..58cdb5267 --- /dev/null +++ b/litellm/tests/langfuse.log @@ -0,0 +1,4 @@ +uploading batch of 2 items +successfully uploaded batch of 2 items +uploading batch of 2 items +successfully uploaded batch of 2 items diff --git a/litellm/tests/test_acooldowns_router.py b/litellm/tests/test_acooldowns_router.py index afab5944c..d1a33d10b 100644 --- a/litellm/tests/test_acooldowns_router.py +++ b/litellm/tests/test_acooldowns_router.py @@ -118,6 +118,7 @@ def test_cooldown_same_model_name(): "api_key": os.getenv("AZURE_API_KEY"), "api_version": os.getenv("AZURE_API_VERSION"), "api_base": "BAD_API_BASE", + "tpm": 90 }, }, { @@ -126,7 +127,8 @@ def test_cooldown_same_model_name(): "model": "azure/chatgpt-v-2", "api_key": os.getenv("AZURE_API_KEY"), "api_version": os.getenv("AZURE_API_VERSION"), - "api_base": os.getenv("AZURE_API_BASE") + "api_base": os.getenv("AZURE_API_BASE"), + "tpm": 0.000001 }, }, ] @@ -151,13 +153,14 @@ def test_cooldown_same_model_name(): ] ) print(router.model_list) - litellm_model_names = [] + model_ids = [] for model in router.model_list: - litellm_model_names.append(model["litellm_params"]["model"]) - print("\n litellm model names ", litellm_model_names) + model_ids.append(model["model_info"]["id"]) + print("\n litellm model ids ", model_ids) # example litellm_model_names ['azure/chatgpt-v-2-ModelID-64321', 'azure/chatgpt-v-2-ModelID-63960'] - assert litellm_model_names[0] != litellm_model_names[1] # ensure both models have a uuid added, and they have different names + assert model_ids[0] != model_ids[1] # ensure both models have a uuid added, and they have different names + print("\ngot response\n", response) except Exception as e: pytest.fail(f"Got unexpected exception on router! - {e}") diff --git a/litellm/tests/test_amazing_vertex_completion.py b/litellm/tests/test_amazing_vertex_completion.py index 8b6821caa..6506f0a41 100644 --- a/litellm/tests/test_amazing_vertex_completion.py +++ b/litellm/tests/test_amazing_vertex_completion.py @@ -9,9 +9,9 @@ import os, io sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path -import pytest +import pytest, asyncio import litellm -from litellm import embedding, completion, completion_cost, Timeout +from litellm import embedding, completion, completion_cost, Timeout, acompletion from litellm import RateLimitError import json import os @@ -63,6 +63,27 @@ def load_vertex_ai_credentials(): # Export the temporary file as GOOGLE_APPLICATION_CREDENTIALS os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.abspath(temp_file.name) +@pytest.mark.asyncio +async def get_response(): + load_vertex_ai_credentials() + prompt = '\ndef count_nums(arr):\n """\n Write a function count_nums which takes an array of integers and returns\n the number of elements which has a sum of digits > 0.\n If a number is negative, then its first signed digit will be negative:\n e.g. -123 has signed digits -1, 2, and 3.\n >>> count_nums([]) == 0\n >>> count_nums([-1, 11, -11]) == 1\n >>> count_nums([1, 1, 2]) == 3\n """\n' + try: + response = await acompletion( + model="gemini-pro", + messages=[ + { + "role": "system", + "content": "Complete the given code with no more explanation. Remember that there is a 4-space indent before the first line of your generated code.", + }, + {"role": "user", "content": prompt}, + ], + ) + return response + except litellm.UnprocessableEntityError as e: + pass + except Exception as e: + pytest.fail(f"An error occurred - {str(e)}") + def test_vertex_ai(): import random @@ -72,14 +93,15 @@ def test_vertex_ai(): litellm.set_verbose=False litellm.vertex_project = "hardy-device-386718" - test_models = random.sample(test_models, 4) + test_models = random.sample(test_models, 1) + test_models += litellm.vertex_language_models # always test gemini-pro for model in test_models: try: - if model in ["code-gecko@001", "code-gecko@latest", "code-bison@001", "text-bison@001"]: + if model in ["code-gecko", "code-gecko@001", "code-gecko@002", "code-gecko@latest", "code-bison@001", "text-bison@001"]: # our account does not have access to this model continue print("making request", model) - response = completion(model=model, messages=[{'role': 'user', 'content': 'hi'}]) + response = completion(model=model, messages=[{'role': 'user', 'content': 'hi'}], temperature=0.7) print("\nModel Response", response) print(response) assert type(response.choices[0].message.content) == str @@ -94,11 +116,12 @@ def test_vertex_ai_stream(): litellm.vertex_project = "hardy-device-386718" import random - test_models = litellm.vertex_chat_models + litellm.vertex_code_chat_models + litellm.vertex_text_models + litellm.vertex_code_text_models - test_models = random.sample(test_models, 4) + test_models = litellm.vertex_chat_models + litellm.vertex_code_chat_models + litellm.vertex_text_models + litellm.vertex_code_text_models + test_models = random.sample(test_models, 1) + test_models += litellm.vertex_language_models # always test gemini-pro for model in test_models: try: - if model in ["code-gecko@001", "code-gecko@latest", "code-bison@001", "text-bison@001"]: + if model in ["code-gecko", "code-gecko@001", "code-gecko@002", "code-gecko@latest", "code-bison@001", "text-bison@001"]: # our account does not have access to this model continue print("making request", model) @@ -115,3 +138,199 @@ def test_vertex_ai_stream(): except Exception as e: pytest.fail(f"Error occurred: {e}") # test_vertex_ai_stream() + +@pytest.mark.asyncio +async def test_async_vertexai_response(): + import random + load_vertex_ai_credentials() + test_models = litellm.vertex_chat_models + litellm.vertex_code_chat_models + litellm.vertex_text_models + litellm.vertex_code_text_models + test_models = random.sample(test_models, 1) + test_models += litellm.vertex_language_models # always test gemini-pro + for model in test_models: + print(f'model being tested in async call: {model}') + if model in ["code-gecko", "code-gecko@001", "code-gecko@002", "code-gecko@latest", "code-bison@001", "text-bison@001"]: + # our account does not have access to this model + continue + try: + user_message = "Hello, how are you?" + messages = [{"content": user_message, "role": "user"}] + response = await acompletion(model=model, messages=messages, temperature=0.7, timeout=5) + print(f"response: {response}") + except litellm.Timeout as e: + pass + except Exception as e: + pytest.fail(f"An exception occurred: {e}") + +# asyncio.run(test_async_vertexai_response()) + +@pytest.mark.asyncio +async def test_async_vertexai_streaming_response(): + import random + load_vertex_ai_credentials() + test_models = litellm.vertex_chat_models + litellm.vertex_code_chat_models + litellm.vertex_text_models + litellm.vertex_code_text_models + test_models = random.sample(test_models, 1) + test_models += litellm.vertex_language_models # always test gemini-pro + for model in test_models: + if model in ["code-gecko", "code-gecko@001", "code-gecko@002", "code-gecko@latest", "code-bison@001", "text-bison@001"]: + # our account does not have access to this model + continue + try: + user_message = "Hello, how are you?" + messages = [{"content": user_message, "role": "user"}] + response = await acompletion(model="gemini-pro", messages=messages, temperature=0.7, timeout=5, stream=True) + print(f"response: {response}") + complete_response = "" + async for chunk in response: + print(f"chunk: {chunk}") + complete_response += chunk.choices[0].delta.content + print(f"complete_response: {complete_response}") + assert len(complete_response) > 0 + except litellm.Timeout as e: + pass + except Exception as e: + print(e) + pytest.fail(f"An exception occurred: {e}") + +# asyncio.run(test_async_vertexai_streaming_response()) + +def test_gemini_pro_vision(): + try: + load_vertex_ai_credentials() + litellm.set_verbose = True + litellm.num_retries=0 + resp = litellm.completion( + model = "vertex_ai/gemini-pro-vision", + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "Whats in this image?" + }, + { + "type": "image_url", + "image_url": { + "url": "gs://cloud-samples-data/generative-ai/image/boats.jpeg" + } + } + ] + } + ], + ) + print(resp) + except Exception as e: + import traceback + traceback.print_exc() + raise e +# test_gemini_pro_vision() + + +# Extra gemini Vision tests for completion + stream, async, async + stream +# if we run into issues with gemini, we will also add these to our ci/cd pipeline +# def test_gemini_pro_vision_stream(): +# try: +# litellm.set_verbose = False +# litellm.num_retries=0 +# print("streaming response from gemini-pro-vision") +# resp = litellm.completion( +# model = "vertex_ai/gemini-pro-vision", +# messages=[ +# { +# "role": "user", +# "content": [ +# { +# "type": "text", +# "text": "Whats in this image?" +# }, +# { +# "type": "image_url", +# "image_url": { +# "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" +# } +# } +# ] +# } +# ], +# stream=True +# ) +# print(resp) +# for chunk in resp: +# print(chunk) +# except Exception as e: +# import traceback +# traceback.print_exc() +# raise e +# test_gemini_pro_vision_stream() + +# def test_gemini_pro_vision_async(): +# try: +# litellm.set_verbose = True +# litellm.num_retries=0 +# async def test(): +# resp = await litellm.acompletion( +# model = "vertex_ai/gemini-pro-vision", +# messages=[ +# { +# "role": "user", +# "content": [ +# { +# "type": "text", +# "text": "Whats in this image?" +# }, +# { +# "type": "image_url", +# "image_url": { +# "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" +# } +# } +# ] +# } +# ], +# ) +# print("async response gemini pro vision") +# print(resp) +# asyncio.run(test()) +# except Exception as e: +# import traceback +# traceback.print_exc() +# raise e +# test_gemini_pro_vision_async() + + +# def test_gemini_pro_vision_async_stream(): +# try: +# litellm.set_verbose = True +# litellm.num_retries=0 +# async def test(): +# resp = await litellm.acompletion( +# model = "vertex_ai/gemini-pro-vision", +# messages=[ +# { +# "role": "user", +# "content": [ +# { +# "type": "text", +# "text": "Whats in this image?" +# }, +# { +# "type": "image_url", +# "image_url": { +# "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" +# } +# } +# ] +# } +# ], +# stream=True +# ) +# print("async response gemini pro vision") +# print(resp) +# for chunk in resp: +# print(chunk) +# asyncio.run(test()) +# except Exception as e: +# import traceback +# traceback.print_exc() +# raise e +# test_gemini_pro_vision_async() \ No newline at end of file diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index cce923b6c..24b7f37a8 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -29,16 +29,19 @@ def generate_random_word(length=4): messages = [{"role": "user", "content": "who is ishaan 5222"}] def test_caching_v2(): # test in memory cache try: + litellm.set_verbose=True litellm.cache = Cache() response1 = completion(model="gpt-3.5-turbo", messages=messages, caching=True) response2 = completion(model="gpt-3.5-turbo", messages=messages, caching=True) print(f"response1: {response1}") print(f"response2: {response2}") litellm.cache = None # disable cache + litellm.success_callback = [] + litellm._async_success_callback = [] if response2['choices'][0]['message']['content'] != response1['choices'][0]['message']['content']: print(f"response1: {response1}") print(f"response2: {response2}") - pytest.fail(f"Error occurred: {e}") + pytest.fail(f"Error occurred:") except Exception as e: print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}") @@ -58,6 +61,8 @@ def test_caching_with_models_v2(): print(f"response2: {response2}") print(f"response3: {response3}") litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] if response3['choices'][0]['message']['content'] == response2['choices'][0]['message']['content']: # if models are different, it should not return cached response print(f"response2: {response2}") @@ -91,6 +96,8 @@ def test_embedding_caching(): print(f"Embedding 2 response time: {end_time - start_time} seconds") litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] assert end_time - start_time <= 0.1 # ensure 2nd response comes in in under 0.1 s if embedding2['data'][0]['embedding'] != embedding1['data'][0]['embedding']: print(f"embedding1: {embedding1}") @@ -145,6 +152,8 @@ def test_embedding_caching_azure(): print(f"Embedding 2 response time: {end_time - start_time} seconds") litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] assert end_time - start_time <= 0.1 # ensure 2nd response comes in in under 0.1 s if embedding2['data'][0]['embedding'] != embedding1['data'][0]['embedding']: print(f"embedding1: {embedding1}") @@ -175,6 +184,8 @@ def test_redis_cache_completion(): print("\nresponse 3", response3) print("\nresponse 4", response4) litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] """ 1 & 2 should be exactly the same @@ -226,6 +237,8 @@ def test_redis_cache_completion_stream(): assert response_1_content == response_2_content, f"Response 1 != Response 2. Same params, Response 1{response_1_content} != Response 2{response_2_content}" litellm.success_callback = [] litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] except Exception as e: print(e) litellm.success_callback = [] @@ -271,11 +284,53 @@ def test_redis_cache_acompletion_stream(): print("\nresponse 2", response_2_content) assert response_1_content == response_2_content, f"Response 1 != Response 2. Same params, Response 1{response_1_content} != Response 2{response_2_content}" litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] except Exception as e: print(e) raise e # test_redis_cache_acompletion_stream() +def test_redis_cache_acompletion_stream_bedrock(): + import asyncio + try: + litellm.set_verbose = True + random_word = generate_random_word() + messages = [{"role": "user", "content": f"write a one sentence poem about: {random_word}"}] + litellm.cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD']) + print("test for caching, streaming + completion") + response_1_content = "" + response_2_content = "" + + async def call1(): + nonlocal response_1_content + response1 = await litellm.acompletion(model="bedrock/anthropic.claude-v1", messages=messages, max_tokens=40, temperature=1, stream=True) + async for chunk in response1: + print(chunk) + response_1_content += chunk.choices[0].delta.content or "" + print(response_1_content) + asyncio.run(call1()) + time.sleep(0.5) + print("\n\n Response 1 content: ", response_1_content, "\n\n") + + async def call2(): + nonlocal response_2_content + response2 = await litellm.acompletion(model="bedrock/anthropic.claude-v1", messages=messages, max_tokens=40, temperature=1, stream=True) + async for chunk in response2: + print(chunk) + response_2_content += chunk.choices[0].delta.content or "" + print(response_2_content) + asyncio.run(call2()) + print("\nresponse 1", response_1_content) + print("\nresponse 2", response_2_content) + assert response_1_content == response_2_content, f"Response 1 != Response 2. Same params, Response 1{response_1_content} != Response 2{response_2_content}" + litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] + except Exception as e: + print(e) + raise e +# test_redis_cache_acompletion_stream_bedrock() # redis cache with custom keys def custom_get_cache_key(*args, **kwargs): # return key to use for your cache: @@ -312,9 +367,44 @@ def test_custom_redis_cache_with_key(): if response3['choices'][0]['message']['content'] == response2['choices'][0]['message']['content']: pytest.fail(f"Error occurred:") litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] # test_custom_redis_cache_with_key() +def test_cache_override(): + # test if we can override the cache, when `caching=False` but litellm.cache = Cache() is set + # in this case it should not return cached responses + litellm.cache = Cache() + print("Testing cache override") + litellm.set_verbose=True + + # test embedding + response1 = embedding( + model = "text-embedding-ada-002", + input=[ + "hello who are you" + ], + caching = False + ) + + + start_time = time.time() + + response2 = embedding( + model = "text-embedding-ada-002", + input=[ + "hello who are you" + ], + caching = False + ) + + end_time = time.time() + print(f"Embedding 2 response time: {end_time - start_time} seconds") + + assert end_time - start_time > 0.1 # ensure 2nd response comes in over 0.1s. This should not be cached. +# test_cache_override() + def test_custom_redis_cache_params(): # test if we can init redis with **kwargs @@ -333,6 +423,8 @@ def test_custom_redis_cache_params(): print(litellm.cache.cache.redis_client) litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] except Exception as e: pytest.fail(f"Error occurred:", e) @@ -340,15 +432,58 @@ def test_custom_redis_cache_params(): def test_get_cache_key(): from litellm.caching import Cache try: + print("Testing get_cache_key") cache_instance = Cache() cache_key = cache_instance.get_cache_key(**{'model': 'gpt-3.5-turbo', 'messages': [{'role': 'user', 'content': 'write a one sentence poem about: 7510'}], 'max_tokens': 40, 'temperature': 0.2, 'stream': True, 'litellm_call_id': 'ffe75e7e-8a07-431f-9a74-71a5b9f35f0b', 'litellm_logging_obj': {}} ) + cache_key_2 = cache_instance.get_cache_key(**{'model': 'gpt-3.5-turbo', 'messages': [{'role': 'user', 'content': 'write a one sentence poem about: 7510'}], 'max_tokens': 40, 'temperature': 0.2, 'stream': True, 'litellm_call_id': 'ffe75e7e-8a07-431f-9a74-71a5b9f35f0b', 'litellm_logging_obj': {}} + ) assert cache_key == "model: gpt-3.5-turbomessages: [{'role': 'user', 'content': 'write a one sentence poem about: 7510'}]temperature: 0.2max_tokens: 40" + assert cache_key == cache_key_2, f"{cache_key} != {cache_key_2}. The same kwargs should have the same cache key across runs" + + embedding_cache_key = cache_instance.get_cache_key( + **{'model': 'azure/azure-embedding-model', 'api_base': 'https://openai-gpt-4-test-v-1.openai.azure.com/', + 'api_key': '', 'api_version': '2023-07-01-preview', + 'timeout': None, 'max_retries': 0, 'input': ['hi who is ishaan'], + 'caching': True, + 'client': "" + } + ) + + print(embedding_cache_key) + + assert embedding_cache_key == "model: azure/azure-embedding-modelinput: ['hi who is ishaan']", f"{embedding_cache_key} != 'model: azure/azure-embedding-modelinput: ['hi who is ishaan']'. The same kwargs should have the same cache key across runs" + + # Proxy - embedding cache, test if embedding key, gets model_group and not model + embedding_cache_key_2 = cache_instance.get_cache_key( + **{'model': 'azure/azure-embedding-model', 'api_base': 'https://openai-gpt-4-test-v-1.openai.azure.com/', + 'api_key': '', 'api_version': '2023-07-01-preview', + 'timeout': None, 'max_retries': 0, 'input': ['hi who is ishaan'], + 'caching': True, + 'client': "", + 'proxy_server_request': {'url': 'http://0.0.0.0:8000/embeddings', + 'method': 'POST', + 'headers': + {'host': '0.0.0.0:8000', 'user-agent': 'curl/7.88.1', 'accept': '*/*', 'content-type': 'application/json', + 'content-length': '80'}, + 'body': {'model': 'azure-embedding-model', 'input': ['hi who is ishaan']}}, + 'user': None, + 'metadata': {'user_api_key': None, + 'headers': {'host': '0.0.0.0:8000', 'user-agent': 'curl/7.88.1', 'accept': '*/*', 'content-type': 'application/json', 'content-length': '80'}, + 'model_group': 'EMBEDDING_MODEL_GROUP', + 'deployment': 'azure/azure-embedding-model-ModelID-azure/azure-embedding-modelhttps://openai-gpt-4-test-v-1.openai.azure.com/2023-07-01-preview'}, + 'model_info': {'mode': 'embedding', 'base_model': 'text-embedding-ada-002', 'id': '20b2b515-f151-4dd5-a74f-2231e2f54e29'}, + 'litellm_call_id': '2642e009-b3cd-443d-b5dd-bb7d56123b0e', 'litellm_logging_obj': ''} + ) + + print(embedding_cache_key_2) + assert embedding_cache_key_2 == "model: EMBEDDING_MODEL_GROUPinput: ['hi who is ishaan']" + print("passed!") except Exception as e: traceback.print_exc() pytest.fail(f"Error occurred:", e) -# test_get_cache_key() +test_get_cache_key() # test_custom_redis_cache_params() diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 3d8b9a1b2..d8babc0ca 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -21,6 +21,13 @@ messages = [{"content": user_message, "role": "user"}] def logger_fn(user_model_dict): print(f"user_model_dict: {user_model_dict}") +@pytest.fixture(autouse=True) +def reset_callbacks(): + print("\npytest fixture - resetting callbacks") + litellm.success_callback = [] + litellm._async_success_callback = [] + litellm.failure_callback = [] + litellm.callbacks = [] def test_completion_custom_provider_model_name(): try: @@ -54,13 +61,32 @@ def test_completion_claude(): print(response) print(response.usage) print(response.usage.completion_tokens) - print(response["usage"]["completion_tokens"]) + print(response["usage"]["completion_tokens"]) # print("new cost tracking") except Exception as e: pytest.fail(f"Error occurred: {e}") # test_completion_claude() +def test_completion_mistral_api(): + try: + litellm.set_verbose=True + response = completion( + model="mistral/mistral-tiny", + messages=[ + { + "role": "user", + "content": "Hey, how's it going?", + } + ], + safe_mode = True + ) + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") +# test_completion_mistral_api() + def test_completion_claude2_1(): try: print("claude2.1 test request") @@ -287,7 +313,7 @@ def hf_test_completion_tgi(): print(response) except Exception as e: pytest.fail(f"Error occurred: {e}") -hf_test_completion_tgi() +# hf_test_completion_tgi() # ################### Hugging Face Conversational models ######################## # def hf_test_completion_conv(): @@ -611,7 +637,7 @@ def test_completion_azure_key_completion_arg(): os.environ.pop("AZURE_API_KEY", None) try: print("azure gpt-3.5 test\n\n") - litellm.set_verbose=False + litellm.set_verbose=True ## Test azure call response = completion( model="azure/chatgpt-v-2", @@ -696,6 +722,7 @@ def test_completion_azure(): print(response) cost = completion_cost(completion_response=response) + assert cost > 0.0 print("Cost for azure completion request", cost) except Exception as e: pytest.fail(f"Error occurred: {e}") @@ -1013,15 +1040,56 @@ def test_completion_together_ai(): # Add any assertions here to check the response print(response) cost = completion_cost(completion_response=response) + assert cost > 0.0 print("Cost for completion call together-computer/llama-2-70b: ", f"${float(cost):.10f}") except Exception as e: pytest.fail(f"Error occurred: {e}") +def test_completion_together_ai_mixtral(): + model_name = "together_ai/DiscoResearch/DiscoLM-mixtral-8x7b-v2" + try: + messages =[ + {"role": "user", "content": "Who are you"}, + {"role": "assistant", "content": "I am your helpful assistant."}, + {"role": "user", "content": "Tell me a joke"}, + ] + response = completion(model=model_name, messages=messages, max_tokens=256, n=1, logger_fn=logger_fn) + # Add any assertions here to check the response + print(response) + cost = completion_cost(completion_response=response) + assert cost > 0.0 + print("Cost for completion call together-computer/llama-2-70b: ", f"${float(cost):.10f}") + except litellm.Timeout as e: + pass + except Exception as e: + pytest.fail(f"Error occurred: {e}") + +test_completion_together_ai_mixtral() + +def test_completion_together_ai_yi_chat(): + model_name = "together_ai/zero-one-ai/Yi-34B-Chat" + try: + messages =[ + {"role": "user", "content": "What llm are you?"}, + ] + response = completion(model=model_name, messages=messages) + # Add any assertions here to check the response + print(response) + cost = completion_cost(completion_response=response) + assert cost > 0.0 + print("Cost for completion call together-computer/llama-2-70b: ", f"${float(cost):.10f}") + except Exception as e: + pytest.fail(f"Error occurred: {e}") +# test_completion_together_ai_yi_chat() + # test_completion_together_ai() def test_customprompt_together_ai(): try: litellm.set_verbose = False litellm.num_retries = 0 + print("in test_customprompt_together_ai") + print(litellm.success_callback) + print(litellm._async_success_callback) response = completion( model="together_ai/mistralai/Mistral-7B-Instruct-v0.1", messages=messages, @@ -1030,7 +1098,6 @@ def test_customprompt_together_ai(): print(response) except litellm.exceptions.Timeout as e: print(f"Timeout Error") - litellm.num_retries = 3 # reset retries pass except Exception as e: print(f"ERROR TYPE {type(e)}") @@ -1065,7 +1132,7 @@ def test_completion_chat_sagemaker(): temperature=0.7, stream=True, ) - # Add any assertions here to check the response + # Add any assertions here to check the response complete_response = "" for chunk in response: complete_response += chunk.choices[0].delta.content or "" diff --git a/litellm/tests/test_config.py b/litellm/tests/test_config.py index 73e719cad..ceecaf181 100644 --- a/litellm/tests/test_config.py +++ b/litellm/tests/test_config.py @@ -47,7 +47,7 @@ def test_config_context_moderation(): print(f"Exception: {e}") pytest.fail(f"An exception occurred: {e}") -# test_config_context_moderation() +# test_config_context_moderation() def test_config_context_default_fallback(): try: diff --git a/litellm/tests/test_configs/custom_callbacks.py b/litellm/tests/test_configs/custom_callbacks.py index 94b89e589..7aa1577f6 100644 --- a/litellm/tests/test_configs/custom_callbacks.py +++ b/litellm/tests/test_configs/custom_callbacks.py @@ -2,7 +2,7 @@ from litellm.integrations.custom_logger import CustomLogger import inspect import litellm -class MyCustomHandler(CustomLogger): +class testCustomCallbackProxy(CustomLogger): def __init__(self): self.success: bool = False # type: ignore self.failure: bool = False # type: ignore @@ -55,8 +55,11 @@ class MyCustomHandler(CustomLogger): self.async_success = True print("Value of async success: ", self.async_success) print("\n kwargs: ", kwargs) - if kwargs.get("model") == "azure-embedding-model": + if kwargs.get("model") == "azure-embedding-model" or kwargs.get("model") == "ada": + print("Got an embedding model", kwargs.get("model")) + print("Setting embedding success to True") self.async_success_embedding = True + print("Value of async success embedding: ", self.async_success_embedding) self.async_embedding_kwargs = kwargs self.async_embedding_response = response_obj if kwargs.get("stream") == True: @@ -79,6 +82,9 @@ class MyCustomHandler(CustomLogger): # tokens used in response usage = response_obj["usage"] + print("\n\n in custom callback vars my custom logger, ", vars(my_custom_logger)) + + print( f""" Model: {model}, @@ -104,4 +110,4 @@ class MyCustomHandler(CustomLogger): self.async_completion_kwargs_fail = kwargs -my_custom_logger = MyCustomHandler() \ No newline at end of file +my_custom_logger = testCustomCallbackProxy() \ No newline at end of file diff --git a/litellm/tests/test_configs/test_bad_config.yaml b/litellm/tests/test_configs/test_bad_config.yaml new file mode 100644 index 000000000..9899af2b7 --- /dev/null +++ b/litellm/tests/test_configs/test_bad_config.yaml @@ -0,0 +1,16 @@ +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + api_key: bad-key + model: gpt-3.5-turbo + - model_name: azure-gpt-3.5-turbo + litellm_params: + model: azure/chatgpt-v-2 + api_base: os.environ/AZURE_API_BASE + api_key: bad-key + - model_name: azure-embedding + litellm_params: + model: azure/azure-embedding-model + api_base: os.environ/AZURE_API_BASE + api_key: bad-key + \ No newline at end of file diff --git a/litellm/tests/test_configs/test_config_no_auth.yaml b/litellm/tests/test_configs/test_config_no_auth.yaml index 1af6e6e6d..2fd9ef203 100644 --- a/litellm/tests/test_configs/test_config_no_auth.yaml +++ b/litellm/tests/test_configs/test_config_no_auth.yaml @@ -19,3 +19,63 @@ model_list: model_info: description: this is a test openai model model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 56f1bd94-3b54-4b67-9ea2-7c70e9a3a709 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 4d1ee26c-abca-450c-8744-8e87fd6755e9 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 00e19c0f-b63d-42bb-88e9-016fb0c60764 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 79fc75bf-8e1b-47d5-8d24-9365a854af03 + model_name: test_openai_models +- litellm_params: + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: 2023-07-01-preview + model: azure/azure-embedding-model + model_name: azure-embedding-model +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 55848c55-4162-40f9-a6e2-9a722b9ef404 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 34339b1e-e030-4bcc-a531-c48559f10ce4 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: f6f74e14-ac64-4403-9365-319e584dcdc5 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 9b1ef341-322c-410a-8992-903987fef439 + model_name: test_openai_models +- model_name: amazon-embeddings + litellm_params: + model: "bedrock/amazon.titan-embed-text-v1" +- model_name: "GPT-J 6B - Sagemaker Text Embedding (Internal)" + litellm_params: + model: "sagemaker/berri-benchmarking-gpt-j-6b-fp16" \ No newline at end of file diff --git a/litellm/tests/test_custom_callback_input.py b/litellm/tests/test_custom_callback_input.py new file mode 100644 index 000000000..d193c53b6 --- /dev/null +++ b/litellm/tests/test_custom_callback_input.py @@ -0,0 +1,631 @@ +### What this tests #### +## This test asserts the type of data passed into each method of the custom callback handler +import sys, os, time, inspect, asyncio, traceback +from datetime import datetime +import pytest +sys.path.insert(0, os.path.abspath('../..')) +from typing import Optional, Literal, List, Union +from litellm import completion, embedding, Cache +import litellm +from litellm.integrations.custom_logger import CustomLogger + +# Test Scenarios (test across completion, streaming, embedding) +## 1: Pre-API-Call +## 2: Post-API-Call +## 3: On LiteLLM Call success +## 4: On LiteLLM Call failure +## 5. Caching + +# Test models +## 1. OpenAI +## 2. Azure OpenAI +## 3. Non-OpenAI/Azure - e.g. Bedrock + +# Test interfaces +## 1. litellm.completion() + litellm.embeddings() +## refer to test_custom_callback_input_router.py for the router + proxy tests + +class CompletionCustomHandler(CustomLogger): # https://docs.litellm.ai/docs/observability/custom_callback#callback-class + """ + The set of expected inputs to a custom handler for a + """ + # Class variables or attributes + def __init__(self): + self.errors = [] + self.states: Optional[List[Literal["sync_pre_api_call", "async_pre_api_call", "post_api_call", "sync_stream", "async_stream", "sync_success", "async_success", "sync_failure", "async_failure"]]] = [] + + def log_pre_api_call(self, model, messages, kwargs): + try: + self.states.append("sync_pre_api_call") + ## MODEL + assert isinstance(model, str) + ## MESSAGES + assert isinstance(messages, list) + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + except Exception as e: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + def log_post_api_call(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("post_api_call") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert end_time == None + ## RESPONSE OBJECT + assert response_obj == None + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert isinstance(kwargs['input'], (list, dict, str)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) or inspect.iscoroutine(kwargs['original_response']) or inspect.isasyncgen(kwargs['original_response']) + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("async_stream") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert isinstance(response_obj, litellm.ModelResponse) + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) and isinstance(kwargs['messages'][0], dict) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert (isinstance(kwargs['input'], list) and isinstance(kwargs['input'][0], dict)) or isinstance(kwargs['input'], (dict, str)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) or inspect.isasyncgen(kwargs['original_response']) or inspect.iscoroutine(kwargs['original_response']) + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + def log_success_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("sync_success") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert isinstance(response_obj, litellm.ModelResponse) + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) and isinstance(kwargs['messages'][0], dict) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert (isinstance(kwargs['input'], list) and isinstance(kwargs['input'][0], dict)) or isinstance(kwargs['input'], (dict, str)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + def log_failure_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("sync_failure") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert response_obj == None + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) and isinstance(kwargs['messages'][0], dict) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert (isinstance(kwargs['input'], list) and isinstance(kwargs['input'][0], dict)) or isinstance(kwargs['input'], (dict, str)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) or kwargs["original_response"] == None + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_pre_api_call(self, model, messages, kwargs): + try: + self.states.append("async_pre_api_call") + ## MODEL + assert isinstance(model, str) + ## MESSAGES + assert isinstance(messages, list) and isinstance(messages[0], dict) + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) and isinstance(kwargs['messages'][0], dict) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + except Exception as e: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("async_success") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert isinstance(response_obj, (litellm.ModelResponse, litellm.EmbeddingResponse)) + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert isinstance(kwargs['input'], (list, dict, str)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) or inspect.isasyncgen(kwargs['original_response']) or inspect.iscoroutine(kwargs['original_response']) + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + assert kwargs["cache_hit"] is None or isinstance(kwargs["cache_hit"], bool) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("async_failure") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert response_obj == None + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert isinstance(kwargs['input'], (list, str, dict)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) or inspect.isasyncgen(kwargs['original_response']) or kwargs['original_response'] == None + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + +# COMPLETION +## Test OpenAI + sync +def test_chat_openai_stream(): + try: + customHandler = CompletionCustomHandler() + litellm.callbacks = [customHandler] + response = litellm.completion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm sync openai" + }]) + ## test streaming + response = litellm.completion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm openai" + }], + stream=True) + for chunk in response: + continue + ## test failure callback + try: + response = litellm.completion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm openai" + }], + api_key="my-bad-key", + stream=True) + for chunk in response: + continue + except: + pass + time.sleep(1) + print(f"customHandler.errors: {customHandler.errors}") + assert len(customHandler.errors) == 0 + litellm.callbacks = [] + except Exception as e: + pytest.fail(f"An exception occurred: {str(e)}") + +# test_chat_openai_stream() + +## Test OpenAI + Async +@pytest.mark.asyncio +async def test_async_chat_openai_stream(): + try: + customHandler = CompletionCustomHandler() + litellm.callbacks = [customHandler] + response = await litellm.acompletion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm openai" + }]) + ## test streaming + response = await litellm.acompletion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm openai" + }], + stream=True) + async for chunk in response: + continue + ## test failure callback + try: + response = await litellm.acompletion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm openai" + }], + api_key="my-bad-key", + stream=True) + async for chunk in response: + continue + except: + pass + time.sleep(1) + print(f"customHandler.errors: {customHandler.errors}") + assert len(customHandler.errors) == 0 + litellm.callbacks = [] + except Exception as e: + pytest.fail(f"An exception occurred: {str(e)}") + +# asyncio.run(test_async_chat_openai_stream()) + +## Test Azure + sync +def test_chat_azure_stream(): + try: + customHandler = CompletionCustomHandler() + litellm.callbacks = [customHandler] + response = litellm.completion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm sync azure" + }]) + # test streaming + response = litellm.completion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm sync azure" + }], + stream=True) + for chunk in response: + continue + # test failure callback + try: + response = litellm.completion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm sync azure" + }], + api_key="my-bad-key", + stream=True) + for chunk in response: + continue + except: + pass + time.sleep(1) + print(f"customHandler.errors: {customHandler.errors}") + assert len(customHandler.errors) == 0 + litellm.callbacks = [] + except Exception as e: + pytest.fail(f"An exception occurred: {str(e)}") + +# test_chat_azure_stream() + +## Test Azure + Async +@pytest.mark.asyncio +async def test_async_chat_azure_stream(): + try: + customHandler = CompletionCustomHandler() + litellm.callbacks = [customHandler] + response = await litellm.acompletion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm async azure" + }]) + ## test streaming + response = await litellm.acompletion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm async azure" + }], + stream=True) + async for chunk in response: + continue + ## test failure callback + try: + response = await litellm.acompletion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm async azure" + }], + api_key="my-bad-key", + stream=True) + async for chunk in response: + continue + except: + pass + await asyncio.sleep(1) + print(f"customHandler.errors: {customHandler.errors}") + assert len(customHandler.errors) == 0 + litellm.callbacks = [] + except Exception as e: + pytest.fail(f"An exception occurred: {str(e)}") + +# asyncio.run(test_async_chat_azure_stream()) + +## Test Bedrock + sync +def test_chat_bedrock_stream(): + try: + customHandler = CompletionCustomHandler() + litellm.callbacks = [customHandler] + response = litellm.completion(model="bedrock/anthropic.claude-v1", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm sync bedrock" + }]) + # test streaming + response = litellm.completion(model="bedrock/anthropic.claude-v1", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm sync bedrock" + }], + stream=True) + for chunk in response: + continue + # test failure callback + try: + response = litellm.completion(model="bedrock/anthropic.claude-v1", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm sync bedrock" + }], + aws_region_name="my-bad-region", + stream=True) + for chunk in response: + continue + except: + pass + time.sleep(1) + print(f"customHandler.errors: {customHandler.errors}") + assert len(customHandler.errors) == 0 + litellm.callbacks = [] + except Exception as e: + pytest.fail(f"An exception occurred: {str(e)}") + +# test_chat_bedrock_stream() + +## Test Bedrock + Async +@pytest.mark.asyncio +async def test_async_chat_bedrock_stream(): + try: + customHandler = CompletionCustomHandler() + litellm.callbacks = [customHandler] + response = await litellm.acompletion(model="bedrock/anthropic.claude-v1", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm async bedrock" + }]) + # test streaming + response = await litellm.acompletion(model="bedrock/anthropic.claude-v1", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm async bedrock" + }], + stream=True) + print(f"response: {response}") + async for chunk in response: + print(f"chunk: {chunk}") + continue + ## test failure callback + try: + response = await litellm.acompletion(model="bedrock/anthropic.claude-v1", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm async bedrock" + }], + aws_region_name="my-bad-key", + stream=True) + async for chunk in response: + continue + except: + pass + time.sleep(1) + print(f"customHandler.errors: {customHandler.errors}") + assert len(customHandler.errors) == 0 + litellm.callbacks = [] + except Exception as e: + pytest.fail(f"An exception occurred: {str(e)}") + +# asyncio.run(test_async_chat_bedrock_stream()) + +# EMBEDDING +## Test OpenAI + Async +@pytest.mark.asyncio +async def test_async_embedding_openai(): + try: + customHandler_success = CompletionCustomHandler() + customHandler_failure = CompletionCustomHandler() + litellm.callbacks = [customHandler_success] + response = await litellm.aembedding(model="azure/azure-embedding-model", + input=["good morning from litellm"]) + await asyncio.sleep(1) + print(f"customHandler_success.errors: {customHandler_success.errors}") + print(f"customHandler_success.states: {customHandler_success.states}") + assert len(customHandler_success.errors) == 0 + assert len(customHandler_success.states) == 3 # pre, post, success + # test failure callback + litellm.callbacks = [customHandler_failure] + try: + response = await litellm.aembedding(model="text-embedding-ada-002", + input=["good morning from litellm"], + api_key="my-bad-key") + except: + pass + await asyncio.sleep(1) + print(f"customHandler_failure.errors: {customHandler_failure.errors}") + print(f"customHandler_failure.states: {customHandler_failure.states}") + assert len(customHandler_failure.errors) == 0 + assert len(customHandler_failure.states) == 3 # pre, post, failure + except Exception as e: + pytest.fail(f"An exception occurred: {str(e)}") + +# asyncio.run(test_async_embedding_openai()) + +## Test Azure + Async +@pytest.mark.asyncio +async def test_async_embedding_azure(): + try: + customHandler_success = CompletionCustomHandler() + customHandler_failure = CompletionCustomHandler() + litellm.callbacks = [customHandler_success] + response = await litellm.aembedding(model="azure/azure-embedding-model", + input=["good morning from litellm"]) + await asyncio.sleep(1) + print(f"customHandler_success.errors: {customHandler_success.errors}") + print(f"customHandler_success.states: {customHandler_success.states}") + assert len(customHandler_success.errors) == 0 + assert len(customHandler_success.states) == 3 # pre, post, success + # test failure callback + litellm.callbacks = [customHandler_failure] + try: + response = await litellm.aembedding(model="azure/azure-embedding-model", + input=["good morning from litellm"], + api_key="my-bad-key") + except: + pass + await asyncio.sleep(1) + print(f"customHandler_failure.errors: {customHandler_failure.errors}") + print(f"customHandler_failure.states: {customHandler_failure.states}") + assert len(customHandler_failure.errors) == 0 + assert len(customHandler_failure.states) == 3 # pre, post, success + except Exception as e: + pytest.fail(f"An exception occurred: {str(e)}") + +# asyncio.run(test_async_embedding_azure()) + +## Test Bedrock + Async +@pytest.mark.asyncio +async def test_async_embedding_bedrock(): + try: + customHandler_success = CompletionCustomHandler() + customHandler_failure = CompletionCustomHandler() + litellm.callbacks = [customHandler_success] + litellm.set_verbose = True + response = await litellm.aembedding(model="bedrock/cohere.embed-multilingual-v3", + input=["good morning from litellm"], aws_region_name="os.environ/AWS_REGION_NAME_2") + await asyncio.sleep(1) + print(f"customHandler_success.errors: {customHandler_success.errors}") + print(f"customHandler_success.states: {customHandler_success.states}") + assert len(customHandler_success.errors) == 0 + assert len(customHandler_success.states) == 3 # pre, post, success + # test failure callback + litellm.callbacks = [customHandler_failure] + try: + response = await litellm.aembedding(model="bedrock/cohere.embed-multilingual-v3", + input=["good morning from litellm"], + aws_region_name="my-bad-region") + except: + pass + await asyncio.sleep(1) + print(f"customHandler_failure.errors: {customHandler_failure.errors}") + print(f"customHandler_failure.states: {customHandler_failure.states}") + assert len(customHandler_failure.errors) == 0 + assert len(customHandler_failure.states) == 3 # pre, post, success + except Exception as e: + pytest.fail(f"An exception occurred: {str(e)}") + +# asyncio.run(test_async_embedding_bedrock()) + +# CACHING +## Test Azure - completion, embedding +@pytest.mark.asyncio +async def test_async_completion_azure_caching(): + customHandler_caching = CompletionCustomHandler() + litellm.cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD']) + litellm.callbacks = [customHandler_caching] + unique_time = time.time() + response1 = await litellm.acompletion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": f"Hi 👋 - i'm async azure {unique_time}" + }], + caching=True) + await asyncio.sleep(1) + print(f"customHandler_caching.states pre-cache hit: {customHandler_caching.states}") + response2 = await litellm.acompletion(model="azure/chatgpt-v-2", + messages=[{ + "role": "user", + "content": f"Hi 👋 - i'm async azure {unique_time}" + }], + caching=True) + await asyncio.sleep(1) # success callbacks are done in parallel + print(f"customHandler_caching.states post-cache hit: {customHandler_caching.states}") + assert len(customHandler_caching.errors) == 0 + assert len(customHandler_caching.states) == 4 # pre, post, success, success + +@pytest.mark.asyncio +async def test_async_embedding_azure_caching(): + print("Testing custom callback input - Azure Caching") + customHandler_caching = CompletionCustomHandler() + litellm.cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD']) + litellm.callbacks = [customHandler_caching] + unique_time = time.time() + response1 = await litellm.aembedding(model="azure/azure-embedding-model", + input=[f"good morning from litellm1 {unique_time}"], + caching=True) + await asyncio.sleep(1) # set cache is async for aembedding() + response2 = await litellm.aembedding(model="azure/azure-embedding-model", + input=[f"good morning from litellm1 {unique_time}"], + caching=True) + await asyncio.sleep(1) # success callbacks are done in parallel + print(customHandler_caching.states) + assert len(customHandler_caching.errors) == 0 + assert len(customHandler_caching.states) == 4 # pre, post, success, success + +# asyncio.run( +# test_async_embedding_azure_caching() +# ) \ No newline at end of file diff --git a/litellm/tests/test_custom_callback_router.py b/litellm/tests/test_custom_callback_router.py new file mode 100644 index 000000000..43d532521 --- /dev/null +++ b/litellm/tests/test_custom_callback_router.py @@ -0,0 +1,488 @@ +### What this tests #### +## This test asserts the type of data passed into each method of the custom callback handler +import sys, os, time, inspect, asyncio, traceback +from datetime import datetime +import pytest +sys.path.insert(0, os.path.abspath('../..')) +from typing import Optional, Literal, List +from litellm import Router, Cache +import litellm +from litellm.integrations.custom_logger import CustomLogger + +# Test Scenarios (test across completion, streaming, embedding) +## 1: Pre-API-Call +## 2: Post-API-Call +## 3: On LiteLLM Call success +## 4: On LiteLLM Call failure +## fallbacks +## retries + +# Test cases +## 1. Simple Azure OpenAI acompletion + streaming call +## 2. Simple Azure OpenAI aembedding call +## 3. Azure OpenAI acompletion + streaming call with retries +## 4. Azure OpenAI aembedding call with retries +## 5. Azure OpenAI acompletion + streaming call with fallbacks +## 6. Azure OpenAI aembedding call with fallbacks + +# Test interfaces +## 1. router.completion() + router.embeddings() +## 2. proxy.completions + proxy.embeddings + +class CompletionCustomHandler(CustomLogger): # https://docs.litellm.ai/docs/observability/custom_callback#callback-class + """ + The set of expected inputs to a custom handler for a + """ + # Class variables or attributes + def __init__(self): + self.errors = [] + self.states: Optional[List[Literal["sync_pre_api_call", "async_pre_api_call", "post_api_call", "sync_stream", "async_stream", "sync_success", "async_success", "sync_failure", "async_failure"]]] = [] + + def log_pre_api_call(self, model, messages, kwargs): + try: + print(f'received kwargs in pre-input: {kwargs}') + self.states.append("sync_pre_api_call") + ## MODEL + assert isinstance(model, str) + ## MESSAGES + assert isinstance(messages, list) + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + ### ROUTER-SPECIFIC KWARGS + assert isinstance(kwargs["litellm_params"]["metadata"], dict) + assert isinstance(kwargs["litellm_params"]["metadata"]["model_group"], str) + assert isinstance(kwargs["litellm_params"]["metadata"]["deployment"], str) + assert isinstance(kwargs["litellm_params"]["model_info"], dict) + assert isinstance(kwargs["litellm_params"]["model_info"]["id"], str) + assert isinstance(kwargs["litellm_params"]["proxy_server_request"], (str, type(None))) + assert isinstance(kwargs["litellm_params"]["preset_cache_key"], (str, type(None))) + assert isinstance(kwargs["litellm_params"]["stream_response"], dict) + except Exception as e: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + def log_post_api_call(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("post_api_call") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert end_time == None + ## RESPONSE OBJECT + assert response_obj == None + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert isinstance(kwargs['input'], (list, dict, str)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) or inspect.iscoroutine(kwargs['original_response']) or inspect.isasyncgen(kwargs['original_response']) + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + ### ROUTER-SPECIFIC KWARGS + assert isinstance(kwargs["litellm_params"]["metadata"], dict) + assert isinstance(kwargs["litellm_params"]["metadata"]["model_group"], str) + assert isinstance(kwargs["litellm_params"]["metadata"]["deployment"], str) + assert isinstance(kwargs["litellm_params"]["model_info"], dict) + assert isinstance(kwargs["litellm_params"]["model_info"]["id"], str) + assert isinstance(kwargs["litellm_params"]["proxy_server_request"], (str, type(None))) + assert isinstance(kwargs["litellm_params"]["preset_cache_key"], (str, type(None))) + assert isinstance(kwargs["litellm_params"]["stream_response"], dict) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("async_stream") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert isinstance(response_obj, litellm.ModelResponse) + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) and isinstance(kwargs['messages'][0], dict) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert (isinstance(kwargs['input'], list) and isinstance(kwargs['input'][0], dict)) or isinstance(kwargs['input'], (dict, str)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) or inspect.isasyncgen(kwargs['original_response']) or inspect.iscoroutine(kwargs['original_response']) + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + def log_success_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("sync_success") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert isinstance(response_obj, litellm.ModelResponse) + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) and isinstance(kwargs['messages'][0], dict) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert (isinstance(kwargs['input'], list) and isinstance(kwargs['input'][0], dict)) or isinstance(kwargs['input'], (dict, str)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + assert kwargs["cache_hit"] is None or isinstance(kwargs["cache_hit"], bool) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + def log_failure_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("sync_failure") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert response_obj == None + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) and isinstance(kwargs['messages'][0], dict) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert (isinstance(kwargs['input'], list) and isinstance(kwargs['input'][0], dict)) or isinstance(kwargs['input'], (dict, str)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) or kwargs["original_response"] == None + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_pre_api_call(self, model, messages, kwargs): + try: + """ + No-op. + Not implemented yet. + """ + pass + except Exception as e: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("async_success") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert isinstance(response_obj, (litellm.ModelResponse, litellm.EmbeddingResponse)) + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert isinstance(kwargs['input'], (list, dict, str)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) or inspect.isasyncgen(kwargs['original_response']) or inspect.iscoroutine(kwargs['original_response']) + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + assert kwargs["cache_hit"] is None or isinstance(kwargs["cache_hit"], bool) + ### ROUTER-SPECIFIC KWARGS + assert isinstance(kwargs["litellm_params"]["metadata"], dict) + assert isinstance(kwargs["litellm_params"]["metadata"]["model_group"], str) + assert isinstance(kwargs["litellm_params"]["metadata"]["deployment"], str) + assert isinstance(kwargs["litellm_params"]["model_info"], dict) + assert isinstance(kwargs["litellm_params"]["model_info"]["id"], str) + assert isinstance(kwargs["litellm_params"]["proxy_server_request"], (str, type(None))) + assert isinstance(kwargs["litellm_params"]["preset_cache_key"], (str, type(None))) + assert isinstance(kwargs["litellm_params"]["stream_response"], dict) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): + try: + print(f"received original response: {kwargs['original_response']}") + self.states.append("async_failure") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert response_obj == None + ## KWARGS + assert isinstance(kwargs['model'], str) + assert isinstance(kwargs['messages'], list) + assert isinstance(kwargs['optional_params'], dict) + assert isinstance(kwargs['litellm_params'], dict) + assert isinstance(kwargs['start_time'], (datetime, type(None))) + assert isinstance(kwargs['stream'], bool) + assert isinstance(kwargs['user'], (str, type(None))) + assert isinstance(kwargs['input'], (list, str, dict)) + assert isinstance(kwargs['api_key'], (str, type(None))) + assert isinstance(kwargs['original_response'], (str, litellm.CustomStreamWrapper)) or inspect.isasyncgen(kwargs['original_response']) or inspect.iscoroutine(kwargs['original_response']) or kwargs['original_response'] == None + assert isinstance(kwargs['additional_args'], (dict, type(None))) + assert isinstance(kwargs['log_event_type'], str) + except: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + +# Simple Azure OpenAI call +## COMPLETION +@pytest.mark.asyncio +async def test_async_chat_azure(): + try: + customHandler_completion_azure_router = CompletionCustomHandler() + customHandler_streaming_azure_router = CompletionCustomHandler() + customHandler_failure = CompletionCustomHandler() + litellm.callbacks = [customHandler_completion_azure_router] + model_list = [ + { + "model_name": "gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + ] + router = Router(model_list=model_list) # type: ignore + response = await router.acompletion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm openai" + }]) + await asyncio.sleep(2) + assert len(customHandler_completion_azure_router.errors) == 0 + assert len(customHandler_completion_azure_router.states) == 3 # pre, post, success + # streaming + litellm.callbacks = [customHandler_streaming_azure_router] + router2 = Router(model_list=model_list) # type: ignore + response = await router2.acompletion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm openai" + }], + stream=True) + async for chunk in response: + print(f"async azure router chunk: {chunk}") + continue + await asyncio.sleep(1) + print(f"customHandler.states: {customHandler_streaming_azure_router.states}") + assert len(customHandler_streaming_azure_router.errors) == 0 + assert len(customHandler_streaming_azure_router.states) >= 4 # pre, post, stream (multiple times), success + # failure + model_list = [ + { + "model_name": "gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": "my-bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + ] + litellm.callbacks = [customHandler_failure] + router3 = Router(model_list=model_list) # type: ignore + try: + response = await router3.acompletion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm openai" + }]) + print(f"response in router3 acompletion: {response}") + except: + pass + await asyncio.sleep(1) + print(f"customHandler.states: {customHandler_failure.states}") + assert len(customHandler_failure.errors) == 0 + assert len(customHandler_failure.states) == 3 # pre, post, failure + assert "async_failure" in customHandler_failure.states + except Exception as e: + print(f"Assertion Error: {traceback.format_exc()}") + pytest.fail(f"An exception occurred - {str(e)}") +# asyncio.run(test_async_chat_azure()) +## EMBEDDING +@pytest.mark.asyncio +async def test_async_embedding_azure(): + try: + customHandler = CompletionCustomHandler() + customHandler_failure = CompletionCustomHandler() + litellm.callbacks = [customHandler] + model_list = [ + { + "model_name": "azure-embedding-model", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/azure-embedding-model", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + ] + router = Router(model_list=model_list) # type: ignore + response = await router.aembedding(model="azure-embedding-model", + input=["hello from litellm!"]) + await asyncio.sleep(2) + assert len(customHandler.errors) == 0 + assert len(customHandler.states) == 3 # pre, post, success + # failure + model_list = [ + { + "model_name": "azure-embedding-model", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/azure-embedding-model", + "api_key": "my-bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + ] + litellm.callbacks = [customHandler_failure] + router3 = Router(model_list=model_list) # type: ignore + try: + response = await router3.aembedding(model="azure-embedding-model", + input=["hello from litellm!"]) + print(f"response in router3 aembedding: {response}") + except: + pass + await asyncio.sleep(1) + print(f"customHandler.states: {customHandler_failure.states}") + assert len(customHandler_failure.errors) == 0 + assert len(customHandler_failure.states) == 3 # pre, post, failure + assert "async_failure" in customHandler_failure.states + except Exception as e: + print(f"Assertion Error: {traceback.format_exc()}") + pytest.fail(f"An exception occurred - {str(e)}") +# asyncio.run(test_async_embedding_azure()) +# Azure OpenAI call w/ Fallbacks +## COMPLETION +@pytest.mark.asyncio +async def test_async_chat_azure_with_fallbacks(): + try: + customHandler_fallbacks = CompletionCustomHandler() + litellm.callbacks = [customHandler_fallbacks] + # with fallbacks + model_list = [ + { + "model_name": "gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": "my-bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { + "model_name": "gpt-3.5-turbo-16k", + "litellm_params": { + "model": "gpt-3.5-turbo-16k", + }, + "tpm": 240000, + "rpm": 1800 + } + ] + router = Router(model_list=model_list, fallbacks=[{"gpt-3.5-turbo": ["gpt-3.5-turbo-16k"]}]) # type: ignore + response = await router.acompletion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm openai" + }]) + await asyncio.sleep(2) + print(f"customHandler_fallbacks.states: {customHandler_fallbacks.states}") + assert len(customHandler_fallbacks.errors) == 0 + assert len(customHandler_fallbacks.states) == 6 # pre, post, failure, pre, post, success + litellm.callbacks = [] + except Exception as e: + print(f"Assertion Error: {traceback.format_exc()}") + pytest.fail(f"An exception occurred - {str(e)}") +# asyncio.run(test_async_chat_azure_with_fallbacks()) + +# CACHING +## Test Azure - completion, embedding +@pytest.mark.asyncio +async def test_async_completion_azure_caching(): + customHandler_caching = CompletionCustomHandler() + litellm.cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD']) + litellm.callbacks = [customHandler_caching] + unique_time = time.time() + model_list = [ + { + "model_name": "gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { + "model_name": "gpt-3.5-turbo-16k", + "litellm_params": { + "model": "gpt-3.5-turbo-16k", + }, + "tpm": 240000, + "rpm": 1800 + } + ] + router = Router(model_list=model_list) # type: ignore + response1 = await router.acompletion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": f"Hi 👋 - i'm async azure {unique_time}" + }], + caching=True) + await asyncio.sleep(1) + print(f"customHandler_caching.states pre-cache hit: {customHandler_caching.states}") + response2 = await router.acompletion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": f"Hi 👋 - i'm async azure {unique_time}" + }], + caching=True) + await asyncio.sleep(1) # success callbacks are done in parallel + print(f"customHandler_caching.states post-cache hit: {customHandler_caching.states}") + assert len(customHandler_caching.errors) == 0 + assert len(customHandler_caching.states) == 4 # pre, post, success, success diff --git a/litellm/tests/test_custom_logger.py b/litellm/tests/test_custom_logger.py index 2df5e0f76..533168388 100644 --- a/litellm/tests/test_custom_logger.py +++ b/litellm/tests/test_custom_logger.py @@ -1,15 +1,14 @@ ### What this tests #### -import sys, os, time, inspect, asyncio +import sys, os, time, inspect, asyncio, traceback import pytest sys.path.insert(0, os.path.abspath('../..')) from litellm import completion, embedding import litellm from litellm.integrations.custom_logger import CustomLogger - -async_success = False -complete_streaming_response_in_callback = "" + class MyCustomHandler(CustomLogger): + complete_streaming_response_in_callback = "" def __init__(self): self.success: bool = False # type: ignore self.failure: bool = False # type: ignore @@ -27,9 +26,12 @@ class MyCustomHandler(CustomLogger): self.stream_collected_response = None # type: ignore self.sync_stream_collected_response = None # type: ignore + self.user = None # type: ignore + self.data_sent_to_api: dict = {} def log_pre_api_call(self, model, messages, kwargs): print(f"Pre-API Call") + self.data_sent_to_api = kwargs["additional_args"].get("complete_input_dict", {}) def log_post_api_call(self, kwargs, response_obj, start_time, end_time): print(f"Post-API Call") @@ -50,9 +52,8 @@ class MyCustomHandler(CustomLogger): async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): print(f"On Async success") + print(f"received kwargs user: {kwargs['user']}") self.async_success = True - print("Value of async success: ", self.async_success) - print("\n kwargs: ", kwargs) if kwargs.get("model") == "text-embedding-ada-002": self.async_success_embedding = True self.async_embedding_kwargs = kwargs @@ -60,31 +61,32 @@ class MyCustomHandler(CustomLogger): if kwargs.get("stream") == True: self.stream_collected_response = response_obj self.async_completion_kwargs = kwargs + self.user = kwargs.get("user", None) async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): print(f"On Async Failure") self.async_failure = True - print("Value of async failure: ", self.async_failure) - print("\n kwargs: ", kwargs) if kwargs.get("model") == "text-embedding-ada-002": self.async_failure_embedding = True self.async_embedding_kwargs_fail = kwargs self.async_completion_kwargs_fail = kwargs -async def async_test_logging_fn(kwargs, completion_obj, start_time, end_time): - global async_success, complete_streaming_response_in_callback - print(f"ON ASYNC LOGGING") - async_success = True - print("\nKWARGS", kwargs) - complete_streaming_response_in_callback = kwargs.get("complete_streaming_response") +class TmpFunction: + complete_streaming_response_in_callback = "" + async_success: bool = False + async def async_test_logging_fn(self, kwargs, completion_obj, start_time, end_time): + print(f"ON ASYNC LOGGING") + self.async_success = True + print(f'kwargs.get("complete_streaming_response"): {kwargs.get("complete_streaming_response")}') + self.complete_streaming_response_in_callback = kwargs.get("complete_streaming_response") def test_async_chat_openai_stream(): try: - global complete_streaming_response_in_callback + tmp_function = TmpFunction() # litellm.set_verbose = True - litellm.success_callback = [async_test_logging_fn] + litellm.success_callback = [tmp_function.async_test_logging_fn] complete_streaming_response = "" async def call_gpt(): nonlocal complete_streaming_response @@ -98,12 +100,16 @@ def test_async_chat_openai_stream(): complete_streaming_response += chunk["choices"][0]["delta"]["content"] or "" print(complete_streaming_response) asyncio.run(call_gpt()) - assert complete_streaming_response_in_callback["choices"][0]["message"]["content"] == complete_streaming_response - assert async_success == True + complete_streaming_response = complete_streaming_response.strip("'") + response1 = tmp_function.complete_streaming_response_in_callback["choices"][0]["message"]["content"] + response2 = complete_streaming_response + # assert [ord(c) for c in response1] == [ord(c) for c in response2] + assert response1 == response2 + assert tmp_function.async_success == True except Exception as e: print(e) pytest.fail(f"An error occurred - {str(e)}") -test_async_chat_openai_stream() +# test_async_chat_openai_stream() def test_completion_azure_stream_moderation_failure(): try: @@ -205,13 +211,27 @@ def test_azure_completion_stream(): assert response_in_success_handler == complete_streaming_response except Exception as e: pytest.fail(f"Error occurred: {e}") -test_azure_completion_stream() -def test_async_custom_handler(): - try: - customHandler2 = MyCustomHandler() - litellm.callbacks = [customHandler2] - litellm.set_verbose = True +@pytest.mark.asyncio +async def test_async_custom_handler_completion(): + try: + customHandler_success = MyCustomHandler() + customHandler_failure = MyCustomHandler() + # success + assert customHandler_success.async_success == False + litellm.callbacks = [customHandler_success] + response = await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "hello from litellm test", + }] + ) + await asyncio.sleep(1) + assert customHandler_success.async_success == True, "async success is not set to True even after success" + assert customHandler_success.async_completion_kwargs.get("model") == "gpt-3.5-turbo" + # failure + litellm.callbacks = [customHandler_failure] messages = [ {"role": "system", "content": "You are a helpful assistant."}, { @@ -219,77 +239,101 @@ def test_async_custom_handler(): "content": "how do i kill someone", }, ] - async def test_1(): - try: - response = await litellm.acompletion( - model="gpt-3.5-turbo", - messages=messages, - api_key="test", - ) - except: - pass - assert customHandler2.async_failure == False - asyncio.run(test_1()) - assert customHandler2.async_failure == True, "async failure is not set to True even after failure" - assert customHandler2.async_completion_kwargs_fail.get("model") == "gpt-3.5-turbo" - assert len(str(customHandler2.async_completion_kwargs_fail.get("exception"))) > 10 # exppect APIError("OpenAIException - Error code: 401 - {'error': {'message': 'Incorrect API key provided: test. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}"), 'traceback_exception': 'Traceback (most recent call last):\n File "/Users/ishaanjaffer/Github/litellm/litellm/llms/openai.py", line 269, in acompletion\n response = await openai_aclient.chat.completions.create(**data)\n File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/openai/resources/chat/completions.py", line 119 - print("Passed setting async failure") - - async def test_2(): + assert customHandler_failure.async_failure == False + try: response = await litellm.acompletion( - model="gpt-3.5-turbo", - messages=[{ - "role": "user", - "content": "hello from litellm test", - }] - ) - print("\n response", response) - assert customHandler2.async_success == False - asyncio.run(test_2()) - assert customHandler2.async_success == True, "async success is not set to True even after success" - assert customHandler2.async_completion_kwargs.get("model") == "gpt-3.5-turbo" + model="gpt-3.5-turbo", + messages=messages, + api_key="my-bad-key", + ) + except: + pass + assert customHandler_failure.async_failure == True, "async failure is not set to True even after failure" + assert customHandler_failure.async_completion_kwargs_fail.get("model") == "gpt-3.5-turbo" + assert len(str(customHandler_failure.async_completion_kwargs_fail.get("exception"))) > 10 # expect APIError("OpenAIException - Error code: 401 - {'error': {'message': 'Incorrect API key provided: test. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}"), 'traceback_exception': 'Traceback (most recent call last):\n File "/Users/ishaanjaffer/Github/litellm/litellm/llms/openai.py", line 269, in acompletion\n response = await openai_aclient.chat.completions.create(**data)\n File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/openai/resources/chat/completions.py", line 119 + litellm.callbacks = [] + print("Passed setting async failure") + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") +# asyncio.run(test_async_custom_handler_completion()) - - async def test_3(): - response = await litellm.aembedding( +@pytest.mark.asyncio +async def test_async_custom_handler_embedding(): + try: + customHandler_embedding = MyCustomHandler() + litellm.callbacks = [customHandler_embedding] + # success + assert customHandler_embedding.async_success_embedding == False + response = await litellm.aembedding( model="text-embedding-ada-002", input = ["hello world"], ) - print("\n response", response) - assert customHandler2.async_success_embedding == False - asyncio.run(test_3()) - assert customHandler2.async_success_embedding == True, "async_success_embedding is not set to True even after success" - assert customHandler2.async_embedding_kwargs.get("model") == "text-embedding-ada-002" - assert customHandler2.async_embedding_response["usage"]["prompt_tokens"] ==2 + await asyncio.sleep(1) + assert customHandler_embedding.async_success_embedding == True, "async_success_embedding is not set to True even after success" + assert customHandler_embedding.async_embedding_kwargs.get("model") == "text-embedding-ada-002" + assert customHandler_embedding.async_embedding_response["usage"]["prompt_tokens"] ==2 print("Passed setting async success: Embedding") - - - print("Testing custom failure callback for embedding") - - async def test_4(): - try: - response = await litellm.aembedding( - model="text-embedding-ada-002", - input = ["hello world"], - api_key="test", - ) - except: - pass - - assert customHandler2.async_failure_embedding == False - asyncio.run(test_4()) - assert customHandler2.async_failure_embedding == True, "async failure embedding is not set to True even after failure" - assert customHandler2.async_embedding_kwargs_fail.get("model") == "text-embedding-ada-002" - assert len(str(customHandler2.async_embedding_kwargs_fail.get("exception"))) > 10 # exppect APIError("OpenAIException - Error code: 401 - {'error': {'message': 'Incorrect API key provided: test. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}"), 'traceback_exception': 'Traceback (most recent call last):\n File "/Users/ishaanjaffer/Github/litellm/litellm/llms/openai.py", line 269, in acompletion\n response = await openai_aclient.chat.completions.create(**data)\n File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/openai/resources/chat/completions.py", line 119 - print("Passed setting async failure") - + # failure + assert customHandler_embedding.async_failure_embedding == False + try: + response = await litellm.aembedding( + model="text-embedding-ada-002", + input = ["hello world"], + api_key="my-bad-key", + ) + except: + pass + assert customHandler_embedding.async_failure_embedding == True, "async failure embedding is not set to True even after failure" + assert customHandler_embedding.async_embedding_kwargs_fail.get("model") == "text-embedding-ada-002" + assert len(str(customHandler_embedding.async_embedding_kwargs_fail.get("exception"))) > 10 # exppect APIError("OpenAIException - Error code: 401 - {'error': {'message': 'Incorrect API key provided: test. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}}"), 'traceback_exception': 'Traceback (most recent call last):\n File "/Users/ishaanjaffer/Github/litellm/litellm/llms/openai.py", line 269, in acompletion\n response = await openai_aclient.chat.completions.create(**data)\n File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/openai/resources/chat/completions.py", line 119 except Exception as e: - pytest.fail(f"Error occurred: {e}") -# test_async_custom_handler() + pytest.fail(f"An exception occurred - {str(e)}") +# asyncio.run(test_async_custom_handler_embedding()) + +@pytest.mark.asyncio +async def test_async_custom_handler_embedding_optional_param(): + """ + Tests if the openai optional params for embedding - user + encoding_format, + are logged + """ + customHandler_optional_params = MyCustomHandler() + litellm.callbacks = [customHandler_optional_params] + response = await litellm.aembedding( + model="azure/azure-embedding-model", + input = ["hello world"], + user = "John" + ) + await asyncio.sleep(1) # success callback is async + assert customHandler_optional_params.user == "John" + assert customHandler_optional_params.user == customHandler_optional_params.data_sent_to_api["user"] + +# asyncio.run(test_async_custom_handler_embedding_optional_param()) + +@pytest.mark.asyncio +async def test_async_custom_handler_embedding_optional_param_bedrock(): + """ + Tests if the openai optional params for embedding - user + encoding_format, + are logged + + but makes sure these are not sent to the non-openai/azure endpoint (raises errors). + """ + litellm.drop_params = True + litellm.set_verbose = True + customHandler_optional_params = MyCustomHandler() + litellm.callbacks = [customHandler_optional_params] + response = await litellm.aembedding( + model="bedrock/amazon.titan-embed-text-v1", + input = ["hello world"], + user = "John" + ) + await asyncio.sleep(1) # success callback is async + assert customHandler_optional_params.user == "John" + assert "user" not in customHandler_optional_params.data_sent_to_api + -from litellm import Cache def test_redis_cache_completion_stream(): + from litellm import Cache # Important Test - This tests if we can add to streaming cache, when custom callbacks are set import random try: @@ -316,13 +360,10 @@ def test_redis_cache_completion_stream(): print("\nresponse 2", response_2_content) assert response_1_content == response_2_content, f"Response 1 != Response 2. Same params, Response 1{response_1_content} != Response 2{response_2_content}" litellm.success_callback = [] + litellm._async_success_callback = [] litellm.cache = None except Exception as e: print(e) litellm.success_callback = [] raise e - """ - - 1 & 2 should be exactly the same - """ # test_redis_cache_completion_stream() \ No newline at end of file diff --git a/litellm/tests/test_dynamodb_logs.py b/litellm/tests/test_dynamodb_logs.py new file mode 100644 index 000000000..6e40c9512 --- /dev/null +++ b/litellm/tests/test_dynamodb_logs.py @@ -0,0 +1,120 @@ +import sys +import os +import io, asyncio +# import logging +# logging.basicConfig(level=logging.DEBUG) +sys.path.insert(0, os.path.abspath('../..')) + +from litellm import completion +import litellm +litellm.num_retries = 3 + +import time, random +import pytest + + +def pre_request(): + file_name = f"dynamo.log" + log_file = open(file_name, "a+") + + # Clear the contents of the file by truncating it + log_file.truncate(0) + + # Save the original stdout so that we can restore it later + original_stdout = sys.stdout + # Redirect stdout to the file + sys.stdout = log_file + + return original_stdout, log_file, file_name + + +import re +def verify_log_file(log_file_path): + + with open(log_file_path, 'r') as log_file: + log_content = log_file.read() + print(f"\nVerifying DynamoDB file = {log_file_path}. File content=", log_content) + + # Define the pattern to search for in the log file + pattern = r"Response from DynamoDB:{.*?}" + + # Find all matches in the log content + matches = re.findall(pattern, log_content) + + # Print the DynamoDB success log matches + print("DynamoDB Success Log Matches:") + for match in matches: + print(match) + + # Print the total count of lines containing the specified response + print(f"Total occurrences of specified response: {len(matches)}") + + # Count the occurrences of successful responses (status code 200 or 201) + success_count = sum(1 for match in matches if "'HTTPStatusCode': 200" in match or "'HTTPStatusCode': 201" in match) + + # Print the count of successful responses + print(f"Count of successful responses from DynamoDB: {success_count}") + assert success_count == 3 # Expect 3 success logs from dynamoDB + + +def test_dynamo_logging(): + # all dynamodb requests need to be in one test function + # since we are modifying stdout, and pytests runs tests in parallel + try: + # pre + # redirect stdout to log_file + + litellm.success_callback = ["dynamodb"] + litellm.dynamodb_table_name = "litellm-logs-1" + litellm.set_verbose = True + original_stdout, log_file, file_name = pre_request() + + + print("Testing async dynamoDB logging") + async def _test(): + return await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content":"This is a test"}], + max_tokens=100, + temperature=0.7, + user = "ishaan-2" + ) + response = asyncio.run(_test()) + print(f"response: {response}") + + + # streaming + async + async def _test2(): + response = await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content":"This is a test"}], + max_tokens=10, + temperature=0.7, + user = "ishaan-2", + stream=True + ) + async for chunk in response: + pass + asyncio.run(_test2()) + + # aembedding() + async def _test3(): + return await litellm.aembedding( + model="text-embedding-ada-002", + input = ["hi"], + user = "ishaan-2" + ) + response = asyncio.run(_test3()) + time.sleep(1) + except Exception as e: + pytest.fail(f"An exception occurred - {e}") + finally: + # post, close log file and verify + # Reset stdout to the original value + sys.stdout = original_stdout + # Close the file + log_file.close() + verify_log_file(file_name) + print("Passed! Testing async dynamoDB logging") + +# test_dynamo_logging_async() diff --git a/litellm/tests/test_embedding.py b/litellm/tests/test_embedding.py index 71e59819f..9a2a5951a 100644 --- a/litellm/tests/test_embedding.py +++ b/litellm/tests/test_embedding.py @@ -164,7 +164,7 @@ def test_bedrock_embedding_titan(): assert all(isinstance(x, float) for x in response['data'][0]['embedding']), "Expected response to be a list of floats" except Exception as e: pytest.fail(f"Error occurred: {e}") -# test_bedrock_embedding_titan() +test_bedrock_embedding_titan() def test_bedrock_embedding_cohere(): try: diff --git a/litellm/tests/test_exceptions.py b/litellm/tests/test_exceptions.py index 67298f019..f6a0ba25b 100644 --- a/litellm/tests/test_exceptions.py +++ b/litellm/tests/test_exceptions.py @@ -21,6 +21,7 @@ from concurrent.futures import ThreadPoolExecutor import pytest litellm.vertex_project = "pathrise-convert-1606954137718" litellm.vertex_location = "us-central1" +litellm.num_retries=0 # litellm.failure_callback = ["sentry"] #### What this tests #### @@ -38,10 +39,11 @@ models = ["command-nightly"] # Test 1: Context Window Errors @pytest.mark.parametrize("model", models) def test_context_window(model): + print("Testing context window error") sample_text = "Say error 50 times" * 1000000 messages = [{"content": sample_text, "role": "user"}] try: - litellm.set_verbose = False + litellm.set_verbose = True response = completion(model=model, messages=messages) print(f"response: {response}") print("FAILED!") @@ -176,7 +178,7 @@ def test_completion_azure_exception(): try: import openai print("azure gpt-3.5 test\n\n") - litellm.set_verbose=False + litellm.set_verbose=True ## Test azure call old_azure_key = os.environ["AZURE_API_KEY"] os.environ["AZURE_API_KEY"] = "good morning" @@ -189,6 +191,7 @@ def test_completion_azure_exception(): } ], ) + os.environ["AZURE_API_KEY"] = old_azure_key print(f"response: {response}") print(response) except openai.AuthenticationError as e: @@ -196,14 +199,14 @@ def test_completion_azure_exception(): print("good job got the correct error for azure when key not set") except Exception as e: pytest.fail(f"Error occurred: {e}") -test_completion_azure_exception() +# test_completion_azure_exception() async def asynctest_completion_azure_exception(): try: import openai import litellm print("azure gpt-3.5 test\n\n") - litellm.set_verbose=False + litellm.set_verbose=True ## Test azure call old_azure_key = os.environ["AZURE_API_KEY"] os.environ["AZURE_API_KEY"] = "good morning" @@ -226,19 +229,75 @@ async def asynctest_completion_azure_exception(): print("Got wrong exception") print("exception", e) pytest.fail(f"Error occurred: {e}") - # import asyncio # asyncio.run( # asynctest_completion_azure_exception() # ) +def asynctest_completion_openai_exception_bad_model(): + try: + import openai + import litellm, asyncio + print("azure exception bad model\n\n") + litellm.set_verbose=True + ## Test azure call + async def test(): + response = await litellm.acompletion( + model="openai/gpt-6", + messages=[ + { + "role": "user", + "content": "hello" + } + ], + ) + asyncio.run(test()) + except openai.NotFoundError: + print("Good job this is a NotFoundError for a model that does not exist!") + print("Passed") + except Exception as e: + print("Raised wrong type of exception", type(e)) + assert isinstance(e, openai.BadRequestError) + pytest.fail(f"Error occurred: {e}") + +# asynctest_completion_openai_exception_bad_model() + + + +def asynctest_completion_azure_exception_bad_model(): + try: + import openai + import litellm, asyncio + print("azure exception bad model\n\n") + litellm.set_verbose=True + ## Test azure call + async def test(): + response = await litellm.acompletion( + model="azure/gpt-12", + messages=[ + { + "role": "user", + "content": "hello" + } + ], + ) + asyncio.run(test()) + except openai.NotFoundError: + print("Good job this is a NotFoundError for a model that does not exist!") + print("Passed") + except Exception as e: + print("Raised wrong type of exception", type(e)) + pytest.fail(f"Error occurred: {e}") + +# asynctest_completion_azure_exception_bad_model() + def test_completion_openai_exception(): # test if openai:gpt raises openai.AuthenticationError try: import openai print("openai gpt-3.5 test\n\n") - litellm.set_verbose=False + litellm.set_verbose=True ## Test azure call old_azure_key = os.environ["OPENAI_API_KEY"] os.environ["OPENAI_API_KEY"] = "good morning" @@ -255,11 +314,38 @@ def test_completion_openai_exception(): print(response) except openai.AuthenticationError as e: os.environ["OPENAI_API_KEY"] = old_azure_key - print("good job got the correct error for openai when key not set") + print("OpenAI: good job got the correct error for openai when key not set") except Exception as e: pytest.fail(f"Error occurred: {e}") # test_completion_openai_exception() +def test_completion_mistral_exception(): + # test if mistral/mistral-tiny raises openai.AuthenticationError + try: + import openai + print("Testing mistral ai exception mapping") + litellm.set_verbose=True + ## Test azure call + old_azure_key = os.environ["MISTRAL_API_KEY"] + os.environ["MISTRAL_API_KEY"] = "good morning" + response = completion( + model="mistral/mistral-tiny", + messages=[ + { + "role": "user", + "content": "hello" + } + ], + ) + print(f"response: {response}") + print(response) + except openai.AuthenticationError as e: + os.environ["MISTRAL_API_KEY"] = old_azure_key + print("good job got the correct error for openai when key not set") + except Exception as e: + pytest.fail(f"Error occurred: {e}") +# test_completion_mistral_exception() + @@ -301,4 +387,4 @@ def test_completion_openai_exception(): # counts[result] += 1 # accuracy_score = counts[True]/(counts[True] + counts[False]) -# print(f"accuracy_score: {accuracy_score}") +# print(f"accuracy_score: {accuracy_score}") \ No newline at end of file diff --git a/litellm/tests/test_langfuse.py b/litellm/tests/test_langfuse.py index 017d7e1e2..02abbf656 100644 --- a/litellm/tests/test_langfuse.py +++ b/litellm/tests/test_langfuse.py @@ -9,33 +9,107 @@ from litellm import completion import litellm litellm.num_retries = 3 litellm.success_callback = ["langfuse"] -# litellm.set_verbose = True +os.environ["LANGFUSE_DEBUG"] = "True" import time import pytest +def search_logs(log_file_path): + """ + Searches the given log file for logs containing the "/api/public" string. + + Parameters: + - log_file_path (str): The path to the log file to be searched. + + Returns: + - None + + Raises: + - Exception: If there are any bad logs found in the log file. + """ + import re + print("\n searching logs") + bad_logs = [] + good_logs = [] + all_logs = [] + try: + with open(log_file_path, 'r') as log_file: + lines = log_file.readlines() + print(f"searching logslines: {lines}") + for line in lines: + all_logs.append(line.strip()) + if "/api/public" in line: + print("Found log with /api/public:") + print(line.strip()) + print("\n\n") + match = re.search(r'receive_response_headers.complete return_value=\(b\'HTTP/1.1\', (\d+),', line) + if match: + status_code = int(match.group(1)) + if status_code != 200 and status_code != 201: + print("got a BAD log") + bad_logs.append(line.strip()) + else: + + good_logs.append(line.strip()) + print("\nBad Logs") + print(bad_logs) + if len(bad_logs)>0: + raise Exception(f"bad logs, Bad logs = {bad_logs}") + + print("\nGood Logs") + print(good_logs) + if len(good_logs) <= 0: + raise Exception(f"There were no Good Logs from Langfuse. No logs with /api/public status 200. \nAll logs:{all_logs}") + + except Exception as e: + raise e + +def pre_langfuse_setup(): + """ + Set up the logging for the 'pre_langfuse_setup' function. + """ + # sends logs to langfuse.log + import logging + # Configure the logging to write to a file + logging.basicConfig(filename="langfuse.log", level=logging.DEBUG) + logger = logging.getLogger() + + # Add a FileHandler to the logger + file_handler = logging.FileHandler("langfuse.log", mode='w') + file_handler.setLevel(logging.DEBUG) + logger.addHandler(file_handler) + return + +@pytest.mark.skip(reason="beta test - checking langfuse output") def test_langfuse_logging_async(): try: + pre_langfuse_setup() litellm.set_verbose = True async def _test_langfuse(): return await litellm.acompletion( model="gpt-3.5-turbo", messages=[{"role": "user", "content":"This is a test"}], - max_tokens=1000, + max_tokens=100, temperature=0.7, timeout=5, ) response = asyncio.run(_test_langfuse()) print(f"response: {response}") + + # time.sleep(2) + # # check langfuse.log to see if there was a failed response + # search_logs("langfuse.log") except litellm.Timeout as e: pass except Exception as e: pytest.fail(f"An exception occurred - {e}") -# test_langfuse_logging_async() +test_langfuse_logging_async() +@pytest.mark.skip(reason="beta test - checking langfuse output") def test_langfuse_logging(): try: - # litellm.set_verbose = True + pre_langfuse_setup() + litellm.set_verbose = True response = completion(model="claude-instant-1.2", messages=[{ "role": "user", @@ -43,17 +117,20 @@ def test_langfuse_logging(): }], max_tokens=10, temperature=0.2, - metadata={"langfuse/key": "foo"} ) print(response) + # time.sleep(5) + # # check langfuse.log to see if there was a failed response + # search_logs("langfuse.log") + except litellm.Timeout as e: pass except Exception as e: - print(e) + pytest.fail(f"An exception occurred - {e}") test_langfuse_logging() - +@pytest.mark.skip(reason="beta test - checking langfuse output") def test_langfuse_logging_stream(): try: litellm.set_verbose=True @@ -77,6 +154,7 @@ def test_langfuse_logging_stream(): # test_langfuse_logging_stream() +@pytest.mark.skip(reason="beta test - checking langfuse output") def test_langfuse_logging_custom_generation_name(): try: litellm.set_verbose=True @@ -99,8 +177,8 @@ def test_langfuse_logging_custom_generation_name(): pytest.fail(f"An exception occurred - {e}") print(e) -test_langfuse_logging_custom_generation_name() - +# test_langfuse_logging_custom_generation_name() +@pytest.mark.skip(reason="beta test - checking langfuse output") def test_langfuse_logging_function_calling(): function1 = [ { diff --git a/litellm/tests/test_model_alias_map.py b/litellm/tests/test_model_alias_map.py index f4647fe7c..b99a626e3 100644 --- a/litellm/tests/test_model_alias_map.py +++ b/litellm/tests/test_model_alias_map.py @@ -17,10 +17,10 @@ model_alias_map = { "good-model": "anyscale/meta-llama/Llama-2-7b-chat-hf" } -litellm.model_alias_map = model_alias_map def test_model_alias_map(): try: + litellm.model_alias_map = model_alias_map response = completion( "good-model", messages=[{"role": "user", "content": "Hey, how's it going?"}], diff --git a/litellm/tests/test_ollama.py b/litellm/tests/test_ollama.py index b3635b6a9..4a602fe64 100644 --- a/litellm/tests/test_ollama.py +++ b/litellm/tests/test_ollama.py @@ -1,100 +1,37 @@ -##### THESE TESTS CAN ONLY RUN LOCALLY WITH THE OLLAMA SERVER RUNNING ###### -# import aiohttp -# import json -# import asyncio -# import requests -# -# async def get_ollama_response_stream(api_base="http://localhost:11434", model="llama2", prompt="Why is the sky blue?"): -# session = aiohttp.ClientSession() -# url = f'{api_base}/api/generate' -# data = { -# "model": model, -# "prompt": prompt, -# } +import sys, os +import traceback +from dotenv import load_dotenv -# response = "" +load_dotenv() +import os, io -# try: -# async with session.post(url, json=data) as resp: -# async for line in resp.content.iter_any(): -# if line: -# try: -# json_chunk = line.decode("utf-8") -# chunks = json_chunk.split("\n") -# for chunk in chunks: -# if chunk.strip() != "": -# j = json.loads(chunk) -# if "response" in j: -# print(j["response"]) -# yield { -# "role": "assistant", -# "content": j["response"] -# } -# # self.responses.append(j["response"]) -# # yield "blank" -# except Exception as e: -# print(f"Error decoding JSON: {e}") -# finally: -# await session.close() - -# async def get_ollama_response_no_stream(api_base="http://localhost:11434", model="llama2", prompt="Why is the sky blue?"): -# generator = get_ollama_response_stream(api_base="http://localhost:11434", model="llama2", prompt="Why is the sky blue?") -# response = "" -# async for elem in generator: -# print(elem) -# response += elem["content"] -# return response - -# #generator = get_ollama_response_stream() - -# result = asyncio.run(get_ollama_response_no_stream()) -# print(result) - -# # return this generator to the client for streaming requests +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import pytest +import litellm -# async def get_response(): -# global generator -# async for elem in generator: -# print(elem) +## for ollama we can't test making the completion call +from litellm.utils import get_optional_params, get_llm_provider -# asyncio.run(get_response()) +def test_get_ollama_params(): + try: + converted_params = get_optional_params(custom_llm_provider="ollama", model="llama2", max_tokens=20, temperature=0.5, stream=True) + print("Converted params", converted_params) + assert converted_params == {'num_predict': 20, 'stream': True, 'temperature': 0.5}, f"{converted_params} != {'num_predict': 20, 'stream': True, 'temperature': 0.5}" + except Exception as e: + pytest.fail(f"Error occurred: {e}") +# test_get_ollama_params() +def test_get_ollama_model(): + try: + model, custom_llm_provider, _, _ = get_llm_provider("ollama/code-llama-22") + print("Model", "custom_llm_provider", model, custom_llm_provider) + assert custom_llm_provider == "ollama", f"{custom_llm_provider} != ollama" + assert model == "code-llama-22", f"{model} != code-llama-22" + except Exception as e: + pytest.fail(f"Error occurred: {e}") -##### latest implementation of making raw http post requests to local ollama server - -# import requests -# import json -# def get_ollama_response_stream(api_base="http://localhost:11434", model="llama2", prompt="Why is the sky blue?"): -# url = f"{api_base}/api/generate" -# data = { -# "model": model, -# "prompt": prompt, -# } -# session = requests.Session() - -# with session.post(url, json=data, stream=True) as resp: -# for line in resp.iter_lines(): -# if line: -# try: -# json_chunk = line.decode("utf-8") -# chunks = json_chunk.split("\n") -# for chunk in chunks: -# if chunk.strip() != "": -# j = json.loads(chunk) -# if "response" in j: -# completion_obj = { -# "role": "assistant", -# "content": "", -# } -# completion_obj["content"] = j["response"] -# yield {"choices": [{"delta": completion_obj}]} -# except Exception as e: -# print(f"Error decoding JSON: {e}") -# session.close() - -# response = get_ollama_response_stream() - -# for chunk in response: -# print(chunk['choices'][0]['delta']) +# test_get_ollama_model() \ No newline at end of file diff --git a/litellm/tests/test_ollama_local.py b/litellm/tests/test_ollama_local.py index 05dd9c646..b5be561ad 100644 --- a/litellm/tests/test_ollama_local.py +++ b/litellm/tests/test_ollama_local.py @@ -16,6 +16,19 @@ # user_message = "respond in 20 words. who are you?" # messages = [{ "content": user_message,"role": "user"}] +# async def test_async_ollama_streaming(): +# try: +# litellm.set_verbose = True +# response = await litellm.acompletion(model="ollama/mistral-openorca", +# messages=[{"role": "user", "content": "Hey, how's it going?"}], +# stream=True) +# async for chunk in response: +# print(chunk) +# except Exception as e: +# print(e) + +# asyncio.run(test_async_ollama_streaming()) + # def test_completion_ollama(): # try: # response = completion( @@ -29,7 +42,7 @@ # except Exception as e: # pytest.fail(f"Error occurred: {e}") -# test_completion_ollama() +# # test_completion_ollama() # def test_completion_ollama_with_api_base(): # try: @@ -42,7 +55,7 @@ # except Exception as e: # pytest.fail(f"Error occurred: {e}") -# test_completion_ollama_with_api_base() +# # test_completion_ollama_with_api_base() # def test_completion_ollama_custom_prompt_template(): @@ -72,7 +85,7 @@ # traceback.print_exc() # pytest.fail(f"Error occurred: {e}") -# test_completion_ollama_custom_prompt_template() +# # test_completion_ollama_custom_prompt_template() # async def test_completion_ollama_async_stream(): # user_message = "what is the weather" @@ -98,8 +111,8 @@ # except Exception as e: # pytest.fail(f"Error occurred: {e}") -# import asyncio -# asyncio.run(test_completion_ollama_async_stream()) +# # import asyncio +# # asyncio.run(test_completion_ollama_async_stream()) @@ -154,8 +167,35 @@ # pass # pytest.fail(f"Error occurred: {e}") -# test_completion_expect_error() +# # test_completion_expect_error() -# if __name__ == "__main__": -# import asyncio -# asyncio.run(main()) + +# def test_ollama_llava(): +# litellm.set_verbose=True +# # same params as gpt-4 vision +# response = completion( +# model = "ollama/llava", +# messages=[ +# { +# "role": "user", +# "content": [ +# { +# "type": "text", +# "text": "What is in this picture" +# }, +# { +# "type": "image_url", +# "image_url": { +# "url": "iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC" +# } +# } +# ] +# } +# ], +# ) +# print("Response from ollama/llava") +# print(response) +# test_ollama_llava() + + +# # PROCESSED CHUNK PRE CHUNK CREATOR diff --git a/litellm/tests/test_optional_params.py b/litellm/tests/test_optional_params.py new file mode 100644 index 000000000..0ce917afe --- /dev/null +++ b/litellm/tests/test_optional_params.py @@ -0,0 +1,27 @@ +#### What this tests #### +# This tests if get_optional_params works as expected +import sys, os, time, inspect, asyncio, traceback +import pytest +sys.path.insert(0, os.path.abspath('../..')) +import litellm +from litellm.utils import get_optional_params_embeddings +## get_optional_params_embeddings +### Models: OpenAI, Azure, Bedrock +### Scenarios: w/ optional params + litellm.drop_params = True + +def test_bedrock_optional_params_embeddings(): + litellm.drop_params = True + optional_params = get_optional_params_embeddings(user="John", encoding_format=None, custom_llm_provider="bedrock") + assert len(optional_params) == 0 + +def test_openai_optional_params_embeddings(): + litellm.drop_params = True + optional_params = get_optional_params_embeddings(user="John", encoding_format=None, custom_llm_provider="openai") + assert len(optional_params) == 1 + assert optional_params["user"] == "John" + +def test_azure_optional_params_embeddings(): + litellm.drop_params = True + optional_params = get_optional_params_embeddings(user="John", encoding_format=None, custom_llm_provider="azure") + assert len(optional_params) == 1 + assert optional_params["user"] == "John" diff --git a/litellm/tests/test_proxy_custom_auth.py b/litellm/tests/test_proxy_custom_auth.py index 5708b1c41..c96acb816 100644 --- a/litellm/tests/test_proxy_custom_auth.py +++ b/litellm/tests/test_proxy_custom_auth.py @@ -19,21 +19,23 @@ from litellm import RateLimitError from fastapi.testclient import TestClient from fastapi import FastAPI from litellm.proxy.proxy_server import router, save_worker_config, initialize # Replace with the actual module where your FastAPI router is defined -filepath = os.path.dirname(os.path.abspath(__file__)) -config_fp = f"{filepath}/test_configs/test_config_custom_auth.yaml" -save_worker_config(config=config_fp, model=None, alias=None, api_base=None, api_version=None, debug=False, temperature=None, max_tokens=None, request_timeout=600, max_budget=None, telemetry=False, drop_params=True, add_function_to_prompt=False, headers=None, save=False, use_queue=False) -app = FastAPI() -app.include_router(router) # Include your router in the test app -@app.on_event("startup") -async def wrapper_startup_event(): - initialize(config=config_fp, model=None, alias=None, api_base=None, api_version=None, debug=False, temperature=None, max_tokens=None, request_timeout=600, max_budget=None, telemetry=False, drop_params=True, add_function_to_prompt=False, headers=None, save=False, use_queue=False) + # Here you create a fixture that will be used by your tests # Make sure the fixture returns TestClient(app) -@pytest.fixture(autouse=True) +@pytest.fixture(scope="function") def client(): - with TestClient(app) as client: - yield client + from litellm.proxy.proxy_server import cleanup_router_config_variables + cleanup_router_config_variables() + filepath = os.path.dirname(os.path.abspath(__file__)) + config_fp = f"{filepath}/test_configs/test_config_custom_auth.yaml" + # initialize can get run in parallel, it sets specific variables for the fast api app, sinc eit gets run in parallel different tests use the wrong variables + app = FastAPI() + initialize(config=config_fp) + + app.include_router(router) # Include your router in the test app + return TestClient(app) + def test_custom_auth(client): try: diff --git a/litellm/tests/test_amazing_proxy_custom_logger.py b/litellm/tests/test_proxy_custom_logger.py similarity index 65% rename from litellm/tests/test_amazing_proxy_custom_logger.py rename to litellm/tests/test_proxy_custom_logger.py index 0c4463cc7..6ddc9caac 100644 --- a/litellm/tests/test_amazing_proxy_custom_logger.py +++ b/litellm/tests/test_proxy_custom_logger.py @@ -3,7 +3,7 @@ import traceback from dotenv import load_dotenv load_dotenv() -import os, io +import os, io, asyncio # this file is to test litellm/proxy @@ -21,21 +21,24 @@ from fastapi.testclient import TestClient from fastapi import FastAPI from litellm.proxy.proxy_server import router, save_worker_config, initialize # Replace with the actual module where your FastAPI router is defined filepath = os.path.dirname(os.path.abspath(__file__)) -config_fp = f"{filepath}/test_configs/test_custom_logger.yaml" python_file_path = f"{filepath}/test_configs/custom_callbacks.py" -save_worker_config(config=config_fp, model=None, alias=None, api_base=None, api_version=None, debug=False, temperature=None, max_tokens=None, request_timeout=600, max_budget=None, telemetry=False, drop_params=True, add_function_to_prompt=False, headers=None, save=False, use_queue=False) -app = FastAPI() -app.include_router(router) # Include your router in the test app -@app.on_event("startup") -async def wrapper_startup_event(): - initialize(config=config_fp, model=None, alias=None, api_base=None, api_version=None, debug=True, temperature=None, max_tokens=None, request_timeout=600, max_budget=None, telemetry=False, drop_params=True, add_function_to_prompt=False, headers=None, save=False, use_queue=False) -# Here you create a fixture that will be used by your tests -# Make sure the fixture returns TestClient(app) -@pytest.fixture(autouse=True) +# @app.on_event("startup") +# async def wrapper_startup_event(): + # initialize(config=config_fp) + +# Use the app fixture in your client fixture + +@pytest.fixture def client(): - with TestClient(app) as client: - yield client + filepath = os.path.dirname(os.path.abspath(__file__)) + config_fp = f"{filepath}/test_configs/test_custom_logger.yaml" + initialize(config=config_fp) + app = FastAPI() + app.include_router(router) # Include your router in the test app + return TestClient(app) + + # Your bearer token token = os.getenv("PROXY_MASTER_KEY") @@ -45,15 +48,76 @@ headers = { } -def test_chat_completion(client): +print("Testing proxy custom logger") + +def test_embedding(client): try: - # Your test data + litellm.set_verbose=False + from litellm.proxy.utils import get_instance_fn + my_custom_logger = get_instance_fn( + value = "custom_callbacks.my_custom_logger", + config_file_path=python_file_path + ) + print("id of initialized custom logger", id(my_custom_logger)) + litellm.callbacks = [my_custom_logger] + # Your test data print("initialized proxy") # import the initialized custom logger print(litellm.callbacks) - assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback - my_custom_logger = litellm.callbacks[0] + # assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback + print("my_custom_logger", my_custom_logger) + assert my_custom_logger.async_success_embedding == False + + test_data = { + "model": "azure-embedding-model", + "input": ["hello"] + } + response = client.post("/embeddings", json=test_data, headers=headers) + print("made request", response.status_code, response.text) + print("vars my custom logger /embeddings", vars(my_custom_logger), "id", id(my_custom_logger)) + assert my_custom_logger.async_success_embedding == True # checks if the status of async_success is True, only the async_log_success_event can set this to true + assert my_custom_logger.async_embedding_kwargs["model"] == "azure-embedding-model" # checks if kwargs passed to async_log_success_event are correct + kwargs = my_custom_logger.async_embedding_kwargs + litellm_params = kwargs.get("litellm_params") + metadata = litellm_params.get("metadata", None) + print("\n\n Metadata in custom logger kwargs", litellm_params.get("metadata")) + assert metadata is not None + assert "user_api_key" in metadata + assert "headers" in metadata + proxy_server_request = litellm_params.get("proxy_server_request") + model_info = litellm_params.get("model_info") + assert proxy_server_request == {'url': 'http://testserver/embeddings', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer sk-1234', 'content-length': '54', 'content-type': 'application/json'}, 'body': {'model': 'azure-embedding-model', 'input': ['hello']}} + assert model_info == {'input_cost_per_token': 0.002, 'mode': 'embedding', 'id': 'hello'} + result = response.json() + print(f"Received response: {result}") + print("Passed Embedding custom logger on proxy!") + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") + + +def test_chat_completion(client): + try: + # Your test data + + print("initialized proxy") + litellm.set_verbose=False + from litellm.proxy.utils import get_instance_fn + my_custom_logger = get_instance_fn( + value = "custom_callbacks.my_custom_logger", + config_file_path=python_file_path + ) + + print("id of initialized custom logger", id(my_custom_logger)) + + litellm.callbacks = [my_custom_logger] + # import the initialized custom logger + print(litellm.callbacks) + + # assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback + + print("LiteLLM Callbacks", litellm.callbacks) + print("my_custom_logger", my_custom_logger) assert my_custom_logger.async_success == False test_data = { @@ -61,7 +125,7 @@ def test_chat_completion(client): "messages": [ { "role": "user", - "content": "hi" + "content": "write a litellm poem" }, ], "max_tokens": 10, @@ -70,33 +134,53 @@ def test_chat_completion(client): response = client.post("/chat/completions", json=test_data, headers=headers) print("made request", response.status_code, response.text) + print("LiteLLM Callbacks", litellm.callbacks) + asyncio.sleep(1) # sleep while waiting for callback to run + + print("my_custom_logger in /chat/completions", my_custom_logger, "id", id(my_custom_logger)) + print("vars my custom logger, ", vars(my_custom_logger)) assert my_custom_logger.async_success == True # checks if the status of async_success is True, only the async_log_success_event can set this to true assert my_custom_logger.async_completion_kwargs["model"] == "chatgpt-v-2" # checks if kwargs passed to async_log_success_event are correct print("\n\n Custom Logger Async Completion args", my_custom_logger.async_completion_kwargs) - litellm_params = my_custom_logger.async_completion_kwargs.get("litellm_params") + metadata = litellm_params.get("metadata", None) + print("\n\n Metadata in custom logger kwargs", litellm_params.get("metadata")) + assert metadata is not None + assert "user_api_key" in metadata + assert "headers" in metadata config_model_info = litellm_params.get("model_info") proxy_server_request_object = litellm_params.get("proxy_server_request") assert config_model_info == {'id': 'gm', 'input_cost_per_token': 0.0002, 'mode': 'chat'} - assert proxy_server_request_object == {'url': 'http://testserver/chat/completions', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer sk-1234', 'content-length': '105', 'content-type': 'application/json'}, 'body': {'model': 'Azure OpenAI GPT-4 Canada', 'messages': [{'role': 'user', 'content': 'hi'}], 'max_tokens': 10}} + assert proxy_server_request_object == {'url': 'http://testserver/chat/completions', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer sk-1234', 'content-length': '123', 'content-type': 'application/json'}, 'body': {'model': 'Azure OpenAI GPT-4 Canada', 'messages': [{'role': 'user', 'content': 'write a litellm poem'}], 'max_tokens': 10}} result = response.json() print(f"Received response: {result}") print("\nPassed /chat/completions with Custom Logger!") except Exception as e: - pytest.fail("LiteLLM Proxy test failed. Exception", e) + pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") def test_chat_completion_stream(client): try: # Your test data + litellm.set_verbose=False + from litellm.proxy.utils import get_instance_fn + my_custom_logger = get_instance_fn( + value = "custom_callbacks.my_custom_logger", + config_file_path=python_file_path + ) + + print("id of initialized custom logger", id(my_custom_logger)) + + litellm.callbacks = [my_custom_logger] import json print("initialized proxy") # import the initialized custom logger print(litellm.callbacks) + - assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback - my_custom_logger = litellm.callbacks[0] + print("LiteLLM Callbacks", litellm.callbacks) + print("my_custom_logger", my_custom_logger) assert my_custom_logger.streaming_response_obj == None # no streaming response obj is set pre call @@ -148,37 +232,5 @@ def test_chat_completion_stream(client): assert complete_response == streamed_response["choices"][0]["message"]["content"] except Exception as e: - pytest.fail("LiteLLM Proxy test failed. Exception", e) + pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") - - -def test_embedding(client): - try: - # Your test data - print("initialized proxy") - # import the initialized custom logger - print(litellm.callbacks) - - assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback - my_custom_logger = litellm.callbacks[0] - assert my_custom_logger.async_success_embedding == False - - test_data = { - "model": "azure-embedding-model", - "input": ["hello"] - } - response = client.post("/embeddings", json=test_data, headers=headers) - print("made request", response.status_code, response.text) - assert my_custom_logger.async_success_embedding == True # checks if the status of async_success is True, only the async_log_success_event can set this to true - assert my_custom_logger.async_embedding_kwargs["model"] == "azure-embedding-model" # checks if kwargs passed to async_log_success_event are correct - - kwargs = my_custom_logger.async_embedding_kwargs - litellm_params = kwargs.get("litellm_params") - proxy_server_request = litellm_params.get("proxy_server_request") - model_info = litellm_params.get("model_info") - assert proxy_server_request == {'url': 'http://testserver/embeddings', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer sk-1234', 'content-length': '54', 'content-type': 'application/json'}, 'body': {'model': 'azure-embedding-model', 'input': ['hello']}} - assert model_info == {'input_cost_per_token': 0.002, 'mode': 'embedding', 'id': 'hello'} - result = response.json() - print(f"Received response: {result}") - except Exception as e: - pytest.fail("LiteLLM Proxy test failed. Exception", e) \ No newline at end of file diff --git a/litellm/tests/test_proxy_exception_mapping.py b/litellm/tests/test_proxy_exception_mapping.py new file mode 100644 index 000000000..5dcb782c4 --- /dev/null +++ b/litellm/tests/test_proxy_exception_mapping.py @@ -0,0 +1,177 @@ +# test that the proxy actually does exception mapping to the OpenAI format + +import sys, os +from dotenv import load_dotenv + +load_dotenv() +import os, io, asyncio +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import pytest +import litellm, openai +from fastapi.testclient import TestClient +from fastapi import FastAPI +from litellm.proxy.proxy_server import router, save_worker_config, initialize # Replace with the actual module where your FastAPI router is defined + +@pytest.fixture +def client(): + filepath = os.path.dirname(os.path.abspath(__file__)) + config_fp = f"{filepath}/test_configs/test_bad_config.yaml" + initialize(config=config_fp) + app = FastAPI() + app.include_router(router) # Include your router in the test app + return TestClient(app) + +# raise openai.AuthenticationError +def test_chat_completion_exception(client): + try: + # Your test data + test_data = { + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "hi" + }, + ], + "max_tokens": 10, + } + + response = client.post("/chat/completions", json=test_data) + + # make an openai client to call _make_status_error_from_response + openai_client = openai.OpenAI(api_key="anything") + openai_exception = openai_client._make_status_error_from_response(response=response) + assert isinstance(openai_exception, openai.AuthenticationError) + + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") + +# raise openai.AuthenticationError +def test_chat_completion_exception_azure(client): + try: + # Your test data + test_data = { + "model": "azure-gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "hi" + }, + ], + "max_tokens": 10, + } + + response = client.post("/chat/completions", json=test_data) + + # make an openai client to call _make_status_error_from_response + openai_client = openai.OpenAI(api_key="anything") + openai_exception = openai_client._make_status_error_from_response(response=response) + assert isinstance(openai_exception, openai.AuthenticationError) + + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") + + +# raise openai.AuthenticationError +def test_embedding_auth_exception_azure(client): + try: + # Your test data + test_data = { + "model": "azure-embedding", + "input": ["hi"] + } + + response = client.post("/embeddings", json=test_data) + print("Response from proxy=", response) + + # make an openai client to call _make_status_error_from_response + openai_client = openai.OpenAI(api_key="anything") + openai_exception = openai_client._make_status_error_from_response(response=response) + print("Exception raised=", openai_exception) + assert isinstance(openai_exception, openai.AuthenticationError) + + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") + + + + +# raise openai.BadRequestError +# chat/completions openai +def test_exception_openai_bad_model(client): + try: + # Your test data + test_data = { + "model": "azure/GPT-12", + "messages": [ + { + "role": "user", + "content": "hi" + }, + ], + "max_tokens": 10, + } + + response = client.post("/chat/completions", json=test_data) + + # make an openai client to call _make_status_error_from_response + openai_client = openai.OpenAI(api_key="anything") + openai_exception = openai_client._make_status_error_from_response(response=response) + print("Type of exception=", type(openai_exception)) + assert isinstance(openai_exception, openai.NotFoundError) + + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") + +# chat/completions any model +def test_chat_completion_exception_any_model(client): + try: + # Your test data + test_data = { + "model": "Lite-GPT-12", + "messages": [ + { + "role": "user", + "content": "hi" + }, + ], + "max_tokens": 10, + } + + response = client.post("/chat/completions", json=test_data) + + # make an openai client to call _make_status_error_from_response + openai_client = openai.OpenAI(api_key="anything") + openai_exception = openai_client._make_status_error_from_response(response=response) + print("Exception raised=", openai_exception) + assert isinstance(openai_exception, openai.NotFoundError) + + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") + + + +# embeddings any model +def test_embedding_exception_any_model(client): + try: + # Your test data + test_data = { + "model": "Lite-GPT-12", + "input": ["hi"] + } + + response = client.post("/embeddings", json=test_data) + print("Response from proxy=", response) + + # make an openai client to call _make_status_error_from_response + openai_client = openai.OpenAI(api_key="anything") + openai_exception = openai_client._make_status_error_from_response(response=response) + print("Exception raised=", openai_exception) + assert isinstance(openai_exception, openai.NotFoundError) + + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") + + diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index bc22c384f..5e9854f43 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -24,30 +24,29 @@ logging.basicConfig( from fastapi.testclient import TestClient from fastapi import FastAPI from litellm.proxy.proxy_server import router, save_worker_config, initialize # Replace with the actual module where your FastAPI router is defined -filepath = os.path.dirname(os.path.abspath(__file__)) -config_fp = f"{filepath}/test_configs/test_config_no_auth.yaml" -save_worker_config(config=config_fp, model=None, alias=None, api_base=None, api_version=None, debug=False, temperature=None, max_tokens=None, request_timeout=600, max_budget=None, telemetry=False, drop_params=True, add_function_to_prompt=False, headers=None, save=False, use_queue=False) -app = FastAPI() -app.include_router(router) # Include your router in the test app -@app.on_event("startup") -async def wrapper_startup_event(): - initialize(config=config_fp) # Your bearer token -token = os.getenv("PROXY_MASTER_KEY") +token = "" headers = { "Authorization": f"Bearer {token}" } -# Here you create a fixture that will be used by your tests -# Make sure the fixture returns TestClient(app) -@pytest.fixture(autouse=True) -def client(): - with TestClient(app) as client: - yield client +@pytest.fixture(scope="function") +def client_no_auth(): + # Assuming litellm.proxy.proxy_server is an object + from litellm.proxy.proxy_server import cleanup_router_config_variables + cleanup_router_config_variables() + filepath = os.path.dirname(os.path.abspath(__file__)) + config_fp = f"{filepath}/test_configs/test_config_no_auth.yaml" + # initialize can get run in parallel, it sets specific variables for the fast api app, sinc eit gets run in parallel different tests use the wrong variables + initialize(config=config_fp) + app = FastAPI() + app.include_router(router) # Include your router in the test app -def test_chat_completion(client): + return TestClient(app) + +def test_chat_completion(client_no_auth): global headers try: # Your test data @@ -62,8 +61,8 @@ def test_chat_completion(client): "max_tokens": 10, } - print("testing proxy server") - response = client.post("/v1/chat/completions", json=test_data, headers=headers) + print("testing proxy server with chat completions") + response = client_no_auth.post("/v1/chat/completions", json=test_data) print(f"response - {response.text}") assert response.status_code == 200 result = response.json() @@ -73,7 +72,8 @@ def test_chat_completion(client): # Run the test -def test_chat_completion_azure(client): +def test_chat_completion_azure(client_no_auth): + global headers try: # Your test data @@ -88,8 +88,8 @@ def test_chat_completion_azure(client): "max_tokens": 10, } - print("testing proxy server with Azure Request") - response = client.post("/v1/chat/completions", json=test_data, headers=headers) + print("testing proxy server with Azure Request /chat/completions") + response = client_no_auth.post("/v1/chat/completions", json=test_data) assert response.status_code == 200 result = response.json() @@ -102,15 +102,55 @@ def test_chat_completion_azure(client): # test_chat_completion_azure() -def test_embedding(client): +def test_embedding(client_no_auth): global headers + from litellm.proxy.proxy_server import user_custom_auth + try: test_data = { "model": "azure/azure-embedding-model", "input": ["good morning from litellm"], } - print("testing proxy server with OpenAI embedding") - response = client.post("/v1/embeddings", json=test_data, headers=headers) + + response = client_no_auth.post("/v1/embeddings", json=test_data) + + assert response.status_code == 200 + result = response.json() + print(len(result["data"][0]["embedding"])) + assert len(result["data"][0]["embedding"]) > 10 # this usually has len==1536 so + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") + +def test_bedrock_embedding(client_no_auth): + global headers + from litellm.proxy.proxy_server import user_custom_auth + + try: + test_data = { + "model": "amazon-embeddings", + "input": ["good morning from litellm"], + } + + response = client_no_auth.post("/v1/embeddings", json=test_data) + + assert response.status_code == 200 + result = response.json() + print(len(result["data"][0]["embedding"])) + assert len(result["data"][0]["embedding"]) > 10 # this usually has len==1536 so + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") + +def test_sagemaker_embedding(client_no_auth): + global headers + from litellm.proxy.proxy_server import user_custom_auth + + try: + test_data = { + "model": "GPT-J 6B - Sagemaker Text Embedding (Internal)", + "input": ["good morning from litellm"], + } + + response = client_no_auth.post("/v1/embeddings", json=test_data) assert response.status_code == 200 result = response.json() @@ -122,8 +162,8 @@ def test_embedding(client): # Run the test # test_embedding() -@pytest.mark.skip(reason="hitting yaml load issues on circle-ci") -def test_add_new_model(client): +# @pytest.mark.skip(reason="hitting yaml load issues on circle-ci") +def test_add_new_model(client_no_auth): global headers try: test_data = { @@ -135,15 +175,15 @@ def test_add_new_model(client): "description": "this is a test openai model" } } - client.post("/model/new", json=test_data, headers=headers) - response = client.get("/model/info", headers=headers) + client_no_auth.post("/model/new", json=test_data, headers=headers) + response = client_no_auth.get("/model/info", headers=headers) assert response.status_code == 200 result = response.json() print(f"response: {result}") model_info = None for m in result["data"]: - if m["id"]["model_name"] == "test_openai_models": - model_info = m["id"]["model_info"] + if m["model_name"] == "test_openai_models": + model_info = m["model_info"] assert model_info["description"] == "this is a test openai model" except Exception as e: pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") @@ -164,10 +204,9 @@ class MyCustomHandler(CustomLogger): customHandler = MyCustomHandler() -def test_chat_completion_optional_params(client): +def test_chat_completion_optional_params(client_no_auth): # [PROXY: PROD TEST] - DO NOT DELETE # This tests if all the /chat/completion params are passed to litellm - try: # Your test data litellm.set_verbose=True @@ -185,7 +224,7 @@ def test_chat_completion_optional_params(client): litellm.callbacks = [customHandler] print("testing proxy server: optional params") - response = client.post("/v1/chat/completions", json=test_data, headers=headers) + response = client_no_auth.post("/v1/chat/completions", json=test_data) assert response.status_code == 200 result = response.json() print(f"Received response: {result}") @@ -217,6 +256,29 @@ def test_load_router_config(): print(result) assert len(result[1]) == 2 + # tests for litellm.cache set from config + print("testing reading proxy config for cache") + litellm.cache = None + load_router_config( + router=None, + config_file_path=f"{filepath}/example_config_yaml/cache_no_params.yaml" + ) + assert litellm.cache is not None + assert "redis_client" in vars(litellm.cache.cache) # it should default to redis on proxy + assert litellm.cache.supported_call_types == ['completion', 'acompletion', 'embedding', 'aembedding'] # init with all call types + + print("testing reading proxy config for cache with params") + load_router_config( + router=None, + config_file_path=f"{filepath}/example_config_yaml/cache_with_params.yaml" + ) + assert litellm.cache is not None + print(litellm.cache) + print(litellm.cache.supported_call_types) + print(vars(litellm.cache.cache)) + assert "redis_client" in vars(litellm.cache.cache) # it should default to redis on proxy + assert litellm.cache.supported_call_types == ['embedding', 'aembedding'] # init with all call types + except Exception as e: pytest.fail("Proxy: Got exception reading config", e) -# test_load_router_config() +# test_load_router_config() \ No newline at end of file diff --git a/litellm/tests/test_proxy_server_keys.py b/litellm/tests/test_proxy_server_keys.py index db083c30c..14b239ae1 100644 --- a/litellm/tests/test_proxy_server_keys.py +++ b/litellm/tests/test_proxy_server_keys.py @@ -37,6 +37,8 @@ async def wrapper_startup_event(): # Make sure the fixture returns TestClient(app) @pytest.fixture(autouse=True) def client(): + from litellm.proxy.proxy_server import cleanup_router_config_variables + cleanup_router_config_variables() with TestClient(app) as client: yield client @@ -69,6 +71,38 @@ def test_add_new_key(client): except Exception as e: pytest.fail(f"LiteLLM Proxy test failed. Exception: {str(e)}") + +def test_update_new_key(client): + try: + # Your test data + test_data = { + "models": ["gpt-3.5-turbo", "gpt-4", "claude-2", "azure-model"], + "aliases": {"mistral-7b": "gpt-3.5-turbo"}, + "duration": "20m" + } + print("testing proxy server") + # Your bearer token + token = os.getenv("PROXY_MASTER_KEY") + + headers = { + "Authorization": f"Bearer {token}" + } + response = client.post("/key/generate", json=test_data, headers=headers) + print(f"response: {response.text}") + assert response.status_code == 200 + result = response.json() + assert result["key"].startswith("sk-") + def _post_data(): + json_data = {'models': ['bedrock-models'], "key": result["key"]} + response = client.post("/key/update", json=json_data, headers=headers) + print(f"response text: {response.text}") + assert response.status_code == 200 + return response + _post_data() + print(f"Received response: {result}") + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception: {str(e)}") + # # Run the test - only runs via pytest diff --git a/litellm/tests/test_router.py b/litellm/tests/test_router.py index 4ec91ec0d..403c8dc2a 100644 --- a/litellm/tests/test_router.py +++ b/litellm/tests/test_router.py @@ -366,69 +366,12 @@ def test_function_calling(): } ] - router = Router(model_list=model_list, routing_strategy="latency-based-routing") + router = Router(model_list=model_list) response = router.completion(model="gpt-3.5-turbo-0613", messages=messages, functions=functions) router.reset() print(response) -def test_acompletion_on_router(): - # tests acompletion + caching on router - try: - litellm.set_verbose = True - model_list = [ - { - "model_name": "gpt-3.5-turbo", - "litellm_params": { - "model": "gpt-3.5-turbo-0613", - "api_key": os.getenv("OPENAI_API_KEY"), - }, - "tpm": 100000, - "rpm": 10000, - }, - { - "model_name": "gpt-3.5-turbo", - "litellm_params": { - "model": "azure/chatgpt-v-2", - "api_key": os.getenv("AZURE_API_KEY"), - "api_base": os.getenv("AZURE_API_BASE"), - "api_version": os.getenv("AZURE_API_VERSION") - }, - "tpm": 100000, - "rpm": 10000, - } - ] - - messages = [ - {"role": "user", "content": f"write a one sentence poem {time.time()}?"} - ] - start_time = time.time() - router = Router(model_list=model_list, - redis_host=os.environ["REDIS_HOST"], - redis_password=os.environ["REDIS_PASSWORD"], - redis_port=os.environ["REDIS_PORT"], - cache_responses=True, - timeout=30, - routing_strategy="simple-shuffle") - async def get_response(): - print("Testing acompletion + caching on router") - response1 = await router.acompletion(model="gpt-3.5-turbo", messages=messages, temperature=1) - print(f"response1: {response1}") - response2 = await router.acompletion(model="gpt-3.5-turbo", messages=messages, temperature=1) - print(f"response2: {response2}") - assert response1.id == response2.id - assert len(response1.choices[0].message.content) > 0 - assert response1.choices[0].message.content == response2.choices[0].message.content - asyncio.run(get_response()) - router.reset() - except litellm.Timeout as e: - end_time = time.time() - print(f"timeout error occurred: {end_time - start_time}") - pass - except Exception as e: - traceback.print_exc() - pytest.fail(f"Error occurred: {e}") - -test_acompletion_on_router() +# test_acompletion_on_router() def test_function_calling_on_router(): try: @@ -507,7 +450,6 @@ def test_aembedding_on_router(): model="text-embedding-ada-002", input=["good morning from litellm 2"], ) - print("sync embedding response: ", response) router.reset() except Exception as e: traceback.print_exc() @@ -591,6 +533,30 @@ def test_bedrock_on_router(): pytest.fail(f"Error occurred: {e}") # test_bedrock_on_router() +# test openai-compatible endpoint +@pytest.mark.asyncio +async def test_mistral_on_router(): + litellm.set_verbose = True + model_list = [ + { + "model_name": "gpt-3.5-turbo", + "litellm_params": { + "model": "mistral/mistral-medium", + }, + }, + ] + router = Router(model_list=model_list) + response = await router.acompletion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": "hello from litellm test", + } + ] + ) + print(response) +asyncio.run(test_mistral_on_router()) def test_openai_completion_on_router(): # [PROD Use Case] - Makes an acompletion call + async acompletion call, and sync acompletion call, sync completion + stream diff --git a/litellm/tests/test_router_caching.py b/litellm/tests/test_router_caching.py new file mode 100644 index 000000000..27191c8d2 --- /dev/null +++ b/litellm/tests/test_router_caching.py @@ -0,0 +1,127 @@ +#### What this tests #### +# This tests caching on the router +import sys, os, time +import traceback, asyncio +import pytest +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import litellm +from litellm import Router + +## Scenarios +## 1. 2 models - openai + azure - 1 model group "gpt-3.5-turbo", +## 2. 2 models - openai, azure - 2 diff model groups, 1 caching group + +@pytest.mark.asyncio +async def test_acompletion_caching_on_router(): + # tests acompletion + caching on router + try: + litellm.set_verbose = True + model_list = [ + { + "model_name": "gpt-3.5-turbo", + "litellm_params": { + "model": "gpt-3.5-turbo-0613", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "tpm": 100000, + "rpm": 10000, + }, + { + "model_name": "gpt-3.5-turbo", + "litellm_params": { + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_base": os.getenv("AZURE_API_BASE"), + "api_version": os.getenv("AZURE_API_VERSION") + }, + "tpm": 100000, + "rpm": 10000, + } + ] + + messages = [ + {"role": "user", "content": f"write a one sentence poem {time.time()}?"} + ] + start_time = time.time() + router = Router(model_list=model_list, + redis_host=os.environ["REDIS_HOST"], + redis_password=os.environ["REDIS_PASSWORD"], + redis_port=os.environ["REDIS_PORT"], + cache_responses=True, + timeout=30, + routing_strategy="simple-shuffle") + response1 = await router.acompletion(model="gpt-3.5-turbo", messages=messages, temperature=1) + print(f"response1: {response1}") + await asyncio.sleep(1) # add cache is async, async sleep for cache to get set + response2 = await router.acompletion(model="gpt-3.5-turbo", messages=messages, temperature=1) + print(f"response2: {response2}") + assert response1.id == response2.id + assert len(response1.choices[0].message.content) > 0 + assert response1.choices[0].message.content == response2.choices[0].message.content + router.reset() + except litellm.Timeout as e: + end_time = time.time() + print(f"timeout error occurred: {end_time - start_time}") + pass + except Exception as e: + traceback.print_exc() + pytest.fail(f"Error occurred: {e}") + +@pytest.mark.asyncio +async def test_acompletion_caching_on_router_caching_groups(): + # tests acompletion + caching on router + try: + litellm.set_verbose = True + model_list = [ + { + "model_name": "openai-gpt-3.5-turbo", + "litellm_params": { + "model": "gpt-3.5-turbo-0613", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "tpm": 100000, + "rpm": 10000, + }, + { + "model_name": "azure-gpt-3.5-turbo", + "litellm_params": { + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_base": os.getenv("AZURE_API_BASE"), + "api_version": os.getenv("AZURE_API_VERSION") + }, + "tpm": 100000, + "rpm": 10000, + } + ] + + messages = [ + {"role": "user", "content": f"write a one sentence poem {time.time()}?"} + ] + start_time = time.time() + router = Router(model_list=model_list, + redis_host=os.environ["REDIS_HOST"], + redis_password=os.environ["REDIS_PASSWORD"], + redis_port=os.environ["REDIS_PORT"], + cache_responses=True, + timeout=30, + routing_strategy="simple-shuffle", + caching_groups=[("openai-gpt-3.5-turbo", "azure-gpt-3.5-turbo")]) + response1 = await router.acompletion(model="openai-gpt-3.5-turbo", messages=messages, temperature=1) + print(f"response1: {response1}") + await asyncio.sleep(1) # add cache is async, async sleep for cache to get set + response2 = await router.acompletion(model="azure-gpt-3.5-turbo", messages=messages, temperature=1) + print(f"response2: {response2}") + assert response1.id == response2.id + assert len(response1.choices[0].message.content) > 0 + assert response1.choices[0].message.content == response2.choices[0].message.content + router.reset() + except litellm.Timeout as e: + end_time = time.time() + print(f"timeout error occurred: {end_time - start_time}") + pass + except Exception as e: + traceback.print_exc() + pytest.fail(f"Error occurred: {e}") \ No newline at end of file diff --git a/litellm/tests/test_router_fallbacks.py b/litellm/tests/test_router_fallbacks.py index 3779dc09a..22b5f121e 100644 --- a/litellm/tests/test_router_fallbacks.py +++ b/litellm/tests/test_router_fallbacks.py @@ -21,80 +21,89 @@ class MyCustomHandler(CustomLogger): print(f"Pre-API Call") def log_post_api_call(self, kwargs, response_obj, start_time, end_time): - print(f"Post-API Call") + print(f"Post-API Call - response object: {response_obj}; model: {kwargs['model']}") + def log_stream_event(self, kwargs, response_obj, start_time, end_time): print(f"On Stream") + + def async_log_stream_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Stream") def log_success_event(self, kwargs, response_obj, start_time, end_time): print(f"previous_models: {kwargs['litellm_params']['metadata']['previous_models']}") self.previous_models += len(kwargs["litellm_params"]["metadata"]["previous_models"]) # {"previous_models": [{"model": litellm_model_name, "exception_type": AuthenticationError, "exception_string": }]} print(f"self.previous_models: {self.previous_models}") print(f"On Success") + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + print(f"previous_models: {kwargs['litellm_params']['metadata']['previous_models']}") + self.previous_models += len(kwargs["litellm_params"]["metadata"]["previous_models"]) # {"previous_models": [{"model": litellm_model_name, "exception_type": AuthenticationError, "exception_string": }]} + print(f"self.previous_models: {self.previous_models}") + print(f"On Success") def log_failure_event(self, kwargs, response_obj, start_time, end_time): print(f"On Failure") -model_list = [ - { # list of model deployments - "model_name": "azure/gpt-3.5-turbo", # openai model name - "litellm_params": { # params for litellm completion/embedding call - "model": "azure/chatgpt-v-2", - "api_key": "bad-key", - "api_version": os.getenv("AZURE_API_VERSION"), - "api_base": os.getenv("AZURE_API_BASE") - }, - "tpm": 240000, - "rpm": 1800 - }, - { # list of model deployments - "model_name": "azure/gpt-3.5-turbo-context-fallback", # openai model name - "litellm_params": { # params for litellm completion/embedding call - "model": "azure/chatgpt-v-2", - "api_key": os.getenv("AZURE_API_KEY"), - "api_version": os.getenv("AZURE_API_VERSION"), - "api_base": os.getenv("AZURE_API_BASE") - }, - "tpm": 240000, - "rpm": 1800 - }, - { - "model_name": "azure/gpt-3.5-turbo", # openai model name - "litellm_params": { # params for litellm completion/embedding call - "model": "azure/chatgpt-functioncalling", - "api_key": "bad-key", - "api_version": os.getenv("AZURE_API_VERSION"), - "api_base": os.getenv("AZURE_API_BASE") - }, - "tpm": 240000, - "rpm": 1800 - }, - { - "model_name": "gpt-3.5-turbo", # openai model name - "litellm_params": { # params for litellm completion/embedding call - "model": "gpt-3.5-turbo", - "api_key": os.getenv("OPENAI_API_KEY"), - }, - "tpm": 1000000, - "rpm": 9000 - }, - { - "model_name": "gpt-3.5-turbo-16k", # openai model name - "litellm_params": { # params for litellm completion/embedding call - "model": "gpt-3.5-turbo-16k", - "api_key": os.getenv("OPENAI_API_KEY"), - }, - "tpm": 1000000, - "rpm": 9000 - } -] - - kwargs = {"model": "azure/gpt-3.5-turbo", "messages": [{"role": "user", "content":"Hey, how's it going?"}]} def test_sync_fallbacks(): try: + model_list = [ + { # list of model deployments + "model_name": "azure/gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": "bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { # list of model deployments + "model_name": "azure/gpt-3.5-turbo-context-fallback", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { + "model_name": "azure/gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-functioncalling", + "api_key": "bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { + "model_name": "gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "gpt-3.5-turbo", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "tpm": 1000000, + "rpm": 9000 + }, + { + "model_name": "gpt-3.5-turbo-16k", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "gpt-3.5-turbo-16k", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "tpm": 1000000, + "rpm": 9000 + } + ] + litellm.set_verbose = True customHandler = MyCustomHandler() litellm.callbacks = [customHandler] @@ -106,62 +115,93 @@ def test_sync_fallbacks(): print(f"response: {response}") time.sleep(0.05) # allow a delay as success_callbacks are on a separate thread assert customHandler.previous_models == 1 # 0 retries, 1 fallback + + print("Passed ! Test router_fallbacks: test_sync_fallbacks()") router.reset() except Exception as e: print(e) # test_sync_fallbacks() -def test_async_fallbacks(): +@pytest.mark.asyncio +async def test_async_fallbacks(): litellm.set_verbose = False + model_list = [ + { # list of model deployments + "model_name": "azure/gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": "bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { # list of model deployments + "model_name": "azure/gpt-3.5-turbo-context-fallback", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { + "model_name": "azure/gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-functioncalling", + "api_key": "bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { + "model_name": "gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "gpt-3.5-turbo", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "tpm": 1000000, + "rpm": 9000 + }, + { + "model_name": "gpt-3.5-turbo-16k", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "gpt-3.5-turbo-16k", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "tpm": 1000000, + "rpm": 9000 + } + ] + router = Router(model_list=model_list, fallbacks=[{"azure/gpt-3.5-turbo": ["gpt-3.5-turbo"]}], context_window_fallbacks=[{"azure/gpt-3.5-turbo-context-fallback": ["gpt-3.5-turbo-16k"]}, {"gpt-3.5-turbo": ["gpt-3.5-turbo-16k"]}], set_verbose=False) - async def test_get_response(): - customHandler = MyCustomHandler() - litellm.callbacks = [customHandler] - user_message = "Hello, how are you?" - messages = [{"content": user_message, "role": "user"}] - try: - response = await router.acompletion(**kwargs) - print(f"customHandler.previous_models: {customHandler.previous_models}") - time.sleep(0.05) # allow a delay as success_callbacks are on a separate thread - assert customHandler.previous_models == 1 # 0 retries, 1 fallback - router.reset() - except litellm.Timeout as e: - pass - except Exception as e: - pytest.fail(f"An exception occurred: {e}") - finally: - router.reset() - asyncio.run(test_get_response()) + customHandler = MyCustomHandler() + litellm.callbacks = [customHandler] + user_message = "Hello, how are you?" + messages = [{"content": user_message, "role": "user"}] + try: + response = await router.acompletion(**kwargs) + print(f"customHandler.previous_models: {customHandler.previous_models}") + await asyncio.sleep(0.05) # allow a delay as success_callbacks are on a separate thread + assert customHandler.previous_models == 1 # 0 retries, 1 fallback + router.reset() + except litellm.Timeout as e: + pass + except Exception as e: + pytest.fail(f"An exception occurred: {e}") + finally: + router.reset() # test_async_fallbacks() -## COMMENTING OUT as the context size exceeds both gpt-3.5-turbo and gpt-3.5-turbo-16k, need a better message here -# def test_sync_context_window_fallbacks(): -# try: -# customHandler = MyCustomHandler() -# litellm.callbacks = [customHandler] -# sample_text = "Say error 50 times" * 10000 -# kwargs["model"] = "azure/gpt-3.5-turbo-context-fallback" -# kwargs["messages"] = [{"role": "user", "content": sample_text}] -# router = Router(model_list=model_list, -# fallbacks=[{"azure/gpt-3.5-turbo": ["gpt-3.5-turbo"]}], -# context_window_fallbacks=[{"azure/gpt-3.5-turbo-context-fallback": ["gpt-3.5-turbo-16k"]}, {"gpt-3.5-turbo": ["gpt-3.5-turbo-16k"]}], -# set_verbose=False) -# response = router.completion(**kwargs) -# print(f"response: {response}") -# time.sleep(0.05) # allow a delay as success_callbacks are on a separate thread -# assert customHandler.previous_models == 1 # 0 retries, 1 fallback -# router.reset() -# except Exception as e: -# print(f"An exception occurred - {e}") -# finally: -# router.reset() - -# test_sync_context_window_fallbacks() - def test_dynamic_fallbacks_sync(): """ Allow setting the fallback in the router.completion() call. @@ -169,6 +209,60 @@ def test_dynamic_fallbacks_sync(): try: customHandler = MyCustomHandler() litellm.callbacks = [customHandler] + model_list = [ + { # list of model deployments + "model_name": "azure/gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": "bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { # list of model deployments + "model_name": "azure/gpt-3.5-turbo-context-fallback", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { + "model_name": "azure/gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-functioncalling", + "api_key": "bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { + "model_name": "gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "gpt-3.5-turbo", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "tpm": 1000000, + "rpm": 9000 + }, + { + "model_name": "gpt-3.5-turbo-16k", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "gpt-3.5-turbo-16k", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "tpm": 1000000, + "rpm": 9000 + } + ] + router = Router(model_list=model_list, set_verbose=True) kwargs = {} kwargs["model"] = "azure/gpt-3.5-turbo" @@ -184,26 +278,83 @@ def test_dynamic_fallbacks_sync(): # test_dynamic_fallbacks_sync() -def test_dynamic_fallbacks_async(): +@pytest.mark.asyncio +async def test_dynamic_fallbacks_async(): """ Allow setting the fallback in the router.completion() call. """ - async def test_get_response(): - try: - customHandler = MyCustomHandler() - litellm.callbacks = [customHandler] - router = Router(model_list=model_list, set_verbose=True) - kwargs = {} - kwargs["model"] = "azure/gpt-3.5-turbo" - kwargs["messages"] = [{"role": "user", "content": "Hey, how's it going?"}] - kwargs["fallbacks"] = [{"azure/gpt-3.5-turbo": ["gpt-3.5-turbo"]}] - response = await router.acompletion(**kwargs) - print(f"response: {response}") - time.sleep(0.05) # allow a delay as success_callbacks are on a separate thread - assert customHandler.previous_models == 1 # 0 retries, 1 fallback - router.reset() - except Exception as e: - pytest.fail(f"An exception occurred - {e}") - asyncio.run(test_get_response()) + try: + model_list = [ + { # list of model deployments + "model_name": "azure/gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": "bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { # list of model deployments + "model_name": "azure/gpt-3.5-turbo-context-fallback", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { + "model_name": "azure/gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "azure/chatgpt-functioncalling", + "api_key": "bad-key", + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE") + }, + "tpm": 240000, + "rpm": 1800 + }, + { + "model_name": "gpt-3.5-turbo", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "gpt-3.5-turbo", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "tpm": 1000000, + "rpm": 9000 + }, + { + "model_name": "gpt-3.5-turbo-16k", # openai model name + "litellm_params": { # params for litellm completion/embedding call + "model": "gpt-3.5-turbo-16k", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + "tpm": 1000000, + "rpm": 9000 + } + ] -# test_dynamic_fallbacks_async() \ No newline at end of file + print() + print() + print() + print() + print(f"STARTING DYNAMIC ASYNC") + customHandler = MyCustomHandler() + litellm.callbacks = [customHandler] + router = Router(model_list=model_list, set_verbose=True) + kwargs = {} + kwargs["model"] = "azure/gpt-3.5-turbo" + kwargs["messages"] = [{"role": "user", "content": "Hey, how's it going?"}] + kwargs["fallbacks"] = [{"azure/gpt-3.5-turbo": ["gpt-3.5-turbo"]}] + response = await router.acompletion(**kwargs) + print(f"RESPONSE: {response}") + await asyncio.sleep(0.05) # allow a delay as success_callbacks are on a separate thread + assert customHandler.previous_models == 1 # 0 retries, 1 fallback + router.reset() + except Exception as e: + pytest.fail(f"An exception occurred - {e}") +# asyncio.run(test_dynamic_fallbacks_async()) \ No newline at end of file diff --git a/litellm/tests/test_router_get_deployments.py b/litellm/tests/test_router_get_deployments.py index 3e6bbd2b6..a71fc3823 100644 --- a/litellm/tests/test_router_get_deployments.py +++ b/litellm/tests/test_router_get_deployments.py @@ -49,7 +49,7 @@ def test_weighted_selection_router(): for _ in range(1000): selected_model = router.get_available_deployment("gpt-3.5-turbo") selected_model_id = selected_model["litellm_params"]["model"] - selected_model_name = litellm.utils.remove_model_id(selected_model_id) + selected_model_name = selected_model_id selection_counts[selected_model_name] +=1 print(selection_counts) @@ -101,7 +101,7 @@ def test_weighted_selection_router_tpm(): for _ in range(1000): selected_model = router.get_available_deployment("gpt-3.5-turbo") selected_model_id = selected_model["litellm_params"]["model"] - selected_model_name = litellm.utils.remove_model_id(selected_model_id) + selected_model_name = selected_model_id selection_counts[selected_model_name] +=1 print(selection_counts) @@ -154,7 +154,7 @@ def test_weighted_selection_router_tpm_as_router_param(): for _ in range(1000): selected_model = router.get_available_deployment("gpt-3.5-turbo") selected_model_id = selected_model["litellm_params"]["model"] - selected_model_name = litellm.utils.remove_model_id(selected_model_id) + selected_model_name = selected_model_id selection_counts[selected_model_name] +=1 print(selection_counts) @@ -210,7 +210,7 @@ def test_weighted_selection_router_rpm_as_router_param(): for _ in range(1000): selected_model = router.get_available_deployment("gpt-3.5-turbo") selected_model_id = selected_model["litellm_params"]["model"] - selected_model_name = litellm.utils.remove_model_id(selected_model_id) + selected_model_name = selected_model_id selection_counts[selected_model_name] +=1 print(selection_counts) @@ -270,7 +270,7 @@ def test_weighted_selection_router_no_rpm_set(): for _ in range(1000): selected_model = router.get_available_deployment("claude-1") selected_model_id = selected_model["litellm_params"]["model"] - selected_model_name = litellm.utils.remove_model_id(selected_model_id) + selected_model_name = selected_model_id selection_counts[selected_model_name] +=1 print(selection_counts) @@ -291,14 +291,13 @@ def test_weighted_selection_router_no_rpm_set(): def test_model_group_aliases(): try: litellm.set_verbose = False - litellm.model_group_alias_map = {"gpt-4": "gpt-3.5-turbo"} model_list = [ { "model_name": "gpt-3.5-turbo", "litellm_params": { "model": "gpt-3.5-turbo-0613", "api_key": os.getenv("OPENAI_API_KEY"), - "rpm": 6, + "tpm": 1, }, }, { @@ -308,29 +307,46 @@ def test_model_group_aliases(): "api_key": os.getenv("AZURE_API_KEY"), "api_base": os.getenv("AZURE_API_BASE"), "api_version": os.getenv("AZURE_API_VERSION"), - "rpm": 1440, + "tpm": 99, }, }, { "model_name": "claude-1", "litellm_params": { "model": "bedrock/claude1.2", - "rpm": 1440, + "tpm": 1, }, } ] router = Router( model_list=model_list, + model_group_alias={"gpt-4": "gpt-3.5-turbo"} # gpt-4 requests sent to gpt-3.5-turbo ) + + # test that gpt-4 requests are sent to gpt-3.5-turbo for _ in range(20): selected_model = router.get_available_deployment("gpt-4") print("\n selected model", selected_model) selected_model_name = selected_model.get("model_name") - if selected_model_name is not "gpt-3.5-turbo": + if selected_model_name != "gpt-3.5-turbo": pytest.fail(f"Selected model {selected_model_name} is not gpt-3.5-turbo") - + + # test that + # call get_available_deployment 1k times, it should pick azure/chatgpt-v-2 about 90% of the time + selection_counts = defaultdict(int) + for _ in range(1000): + selected_model = router.get_available_deployment("gpt-3.5-turbo") + selected_model_id = selected_model["litellm_params"]["model"] + selected_model_name = selected_model_id + selection_counts[selected_model_name] +=1 + print(selection_counts) + + total_requests = sum(selection_counts.values()) + + # Assert that 'azure/chatgpt-v-2' has about 90% of the total requests + assert selection_counts['azure/chatgpt-v-2'] / total_requests > 0.89, f"Assertion failed: 'azure/chatgpt-v-2' does not have about 90% of the total requests in the weighted load balancer. Selection counts {selection_counts}" + router.reset() - litellm.model_group_alias_map = {} except Exception as e: traceback.print_exc() pytest.fail(f"Error occurred: {e}") diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py index 38af89631..8e6bd346f 100644 --- a/litellm/tests/test_streaming.py +++ b/litellm/tests/test_streaming.py @@ -58,6 +58,7 @@ def validate_first_format(chunk): for choice in chunk['choices']: assert isinstance(choice['index'], int), "'index' should be an integer." assert isinstance(choice['delta']['role'], str), "'role' should be a string." + assert "messages" not in choice # openai v1.0.0 returns content as None assert (choice['finish_reason'] is None) or isinstance(choice['finish_reason'], str), "'finish_reason' should be None or a string." @@ -230,7 +231,7 @@ def test_completion_cohere_stream_bad_key(): def test_completion_azure_stream(): try: - litellm.set_verbose = True + litellm.set_verbose = False messages = [ {"role": "system", "content": "You are a helpful assistant."}, { @@ -243,14 +244,14 @@ def test_completion_azure_stream(): ) complete_response = "" # Add any assertions here to check the response - for idx, chunk in enumerate(response): - chunk, finished = streaming_format_tests(idx, chunk) - if finished: - break + for idx, init_chunk in enumerate(response): + chunk, finished = streaming_format_tests(idx, init_chunk) complete_response += chunk + if finished: + assert isinstance(init_chunk.choices[0], litellm.utils.StreamingChoices) + break if complete_response.strip() == "": raise Exception("Empty response received") - print(f"completion_response: {complete_response}") except Exception as e: pytest.fail(f"Error occurred: {e}") # test_completion_azure_stream() @@ -334,6 +335,37 @@ def test_completion_palm_stream(): pytest.fail(f"Error occurred: {e}") # test_completion_palm_stream() + +def test_completion_mistral_api_stream(): + try: + litellm.set_verbose = True + print("Testing streaming mistral api response") + response = completion( + model="mistral/mistral-medium", + messages=[ + { + "role": "user", + "content": "Hey, how's it going?", + } + ], + max_tokens=10, + stream=True + ) + complete_response = "" + for idx, chunk in enumerate(response): + print(chunk) + # print(chunk.choices[0].delta) + chunk, finished = streaming_format_tests(idx, chunk) + if finished: + break + complete_response += chunk + if complete_response.strip() == "": + raise Exception("Empty response received") + print(f"completion_response: {complete_response}") + except Exception as e: + pytest.fail(f"Error occurred: {e}") +# test_completion_mistral_api_stream() + def test_completion_deep_infra_stream(): # deep infra currently includes role in the 2nd chunk # waiting for them to make a fix on this @@ -364,6 +396,7 @@ def test_completion_deep_infra_stream(): pytest.fail(f"Error occurred: {e}") # test_completion_deep_infra_stream() +@pytest.mark.skip() def test_completion_nlp_cloud_stream(): try: messages = [ @@ -633,6 +666,47 @@ def test_completion_bedrock_ai21_stream(): # test_completion_bedrock_ai21_stream() +def test_sagemaker_weird_response(): + """ + When the stream ends, flush any remaining holding chunks. + """ + try: + chunk = """[INST] Hey, how's it going? [/INST] + + I'm doing well, thanks for asking! How about you? Is there anything you'd like to chat about or ask? I'm here to help with any questions you might have.""" + + logging_obj = litellm.Logging(model="berri-benchmarking-Llama-2-70b-chat-hf-4", messages=messages, stream=True, litellm_call_id="1234", function_id="function_id", call_type="acompletion", start_time=time.time()) + response = litellm.CustomStreamWrapper(completion_stream=chunk, model="berri-benchmarking-Llama-2-70b-chat-hf-4", custom_llm_provider="sagemaker", logging_obj=logging_obj) + complete_response = "" + for chunk in response: + complete_response += chunk["choices"][0]["delta"]["content"] + assert len(complete_response) > 0 + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") +# test_sagemaker_weird_response() + +@pytest.mark.asyncio +async def test_sagemaker_streaming_async(): + try: + messages = [{"role": "user", "content": "Hey, how's it going?"}] + litellm.set_verbose=True + response = await litellm.acompletion( + model="sagemaker/berri-benchmarking-Llama-2-70b-chat-hf-4", + messages=messages, + max_tokens=100, + temperature=0.7, + stream=True, + ) + + # Add any assertions here to check the response + complete_response = "" + async for chunk in response: + complete_response += chunk.choices[0].delta.content or "" + print(f"complete_response: {complete_response}") + assert len(complete_response) > 0 + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") + # def test_completion_sagemaker_stream(): # try: # response = completion( diff --git a/litellm/tests/test_text_completion.py b/litellm/tests/test_text_completion.py index 660367651..9257a07f3 100644 --- a/litellm/tests/test_text_completion.py +++ b/litellm/tests/test_text_completion.py @@ -1,4 +1,4 @@ -import sys, os +import sys, os, asyncio import traceback from dotenv import load_dotenv @@ -10,7 +10,7 @@ sys.path.insert( ) # Adds the parent directory to the system path import pytest import litellm -from litellm import embedding, completion, text_completion, completion_cost +from litellm import embedding, completion, text_completion, completion_cost, atext_completion from litellm import RateLimitError @@ -61,7 +61,7 @@ def test_completion_openai_engine(): #print(response.choices[0].text) except Exception as e: pytest.fail(f"Error occurred: {e}") -test_completion_openai_engine() +# test_completion_openai_engine() def test_completion_chatgpt_prompt(): @@ -142,7 +142,7 @@ def test_completion_hf_prompt_array(): prompt=token_prompt, # token prompt is a 2d list, max_tokens=0, temperature=0.0, - echo=True, + # echo=True, # hugging face inference api is currently raising errors for this, looks like they have a regression on their side ) print("\n\n response") @@ -163,8 +163,23 @@ def test_text_completion_stream(): max_tokens=10, ) for chunk in response: - print(chunk) + print(f"chunk: {chunk}") except Exception as e: pytest.fail(f"GOT exception for HF In streaming{e}") -test_text_completion_stream() +# test_text_completion_stream() + +async def test_text_completion_async_stream(): + try: + response = await atext_completion( + model="text-completion-openai/text-davinci-003", + prompt="good morning", + stream=True, + max_tokens=10, + ) + async for chunk in response: + print(f"chunk: {chunk}") + except Exception as e: + pytest.fail(f"GOT exception for HF In streaming{e}") + +asyncio.run(test_text_completion_async_stream()) \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 2b23d6a78..bdd935119 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -8,6 +8,7 @@ # Thank you users! We ❤️ you! - Krrish & Ishaan import sys, re +import litellm import dotenv, json, traceback, threading import subprocess, os import litellm, openai @@ -19,6 +20,7 @@ import uuid import aiohttp import logging import asyncio, httpx, inspect +from inspect import iscoroutine import copy from tokenizers import Tokenizer from dataclasses import ( @@ -38,12 +40,14 @@ from .integrations.langsmith import LangsmithLogger from .integrations.weights_biases import WeightsBiasesLogger from .integrations.custom_logger import CustomLogger from .integrations.langfuse import LangFuseLogger +from .integrations.dynamodb import DyanmoDBLogger from .integrations.litedebugger import LiteDebugger from openai import OpenAIError as OriginalError from openai._models import BaseModel as OpenAIObject from .exceptions import ( AuthenticationError, BadRequestError, + NotFoundError, RateLimitError, ServiceUnavailableError, OpenAIError, @@ -51,7 +55,8 @@ from .exceptions import ( Timeout, APIConnectionError, APIError, - BudgetExceededError + BudgetExceededError, + UnprocessableEntityError ) from typing import cast, List, Dict, Union, Optional, Literal from .caching import Cache @@ -75,6 +80,7 @@ langsmithLogger = None weightsBiasesLogger = None customLogger = None langFuseLogger = None +dynamoLogger = None llmonitorLogger = None aispendLogger = None berrispendLogger = None @@ -126,7 +132,7 @@ def map_finish_reason(finish_reason: str): # openai supports 5 stop sequences - # cohere mapping - https://docs.cohere.com/reference/generate elif finish_reason == "COMPLETE": return "stop" - elif finish_reason == "MAX_TOKENS": + elif finish_reason == "MAX_TOKENS": # cohere + vertex ai return "length" elif finish_reason == "ERROR_TOXIC": return "content_filter" @@ -135,6 +141,10 @@ def map_finish_reason(finish_reason: str): # openai supports 5 stop sequences - # huggingface mapping https://huggingface.github.io/text-generation-inference/#/Text%20Generation%20Inference/generate_stream elif finish_reason == "eos_token" or finish_reason == "stop_sequence": return "stop" + elif finish_reason == "FINISH_REASON_UNSPECIFIED" or finish_reason == "STOP": # vertex ai - got from running `print(dir(response_obj.candidates[0].finish_reason))`: ['FINISH_REASON_UNSPECIFIED', 'MAX_TOKENS', 'OTHER', 'RECITATION', 'SAFETY', 'STOP',] + return "stop" + elif finish_reason == "SAFETY": # vertex ai + return "content_filter" return finish_reason class FunctionCall(OpenAIObject): @@ -178,6 +188,14 @@ class Message(OpenAIObject): # Allow dictionary-style assignment of attributes setattr(self, key, value) + def json(self, **kwargs): + try: + return self.model_dump() # noqa + except: + # if using pydantic v1 + return self.dict() + + class Delta(OpenAIObject): def __init__(self, content=None, role=None, **params): super(Delta, self).__init__(**params) @@ -352,6 +370,13 @@ class ModelResponse(OpenAIObject): def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value) + + def json(self, **kwargs): + try: + return self.model_dump() # noqa + except: + # if using pydantic v1 + return self.dict() class Embedding(OpenAIObject): embedding: list = [] @@ -417,6 +442,13 @@ class EmbeddingResponse(OpenAIObject): def __setitem__(self, key, value): # Allow dictionary-style assignment of attributes setattr(self, key, value) + + def json(self, **kwargs): + try: + return self.model_dump() # noqa + except: + # if using pydantic v1 + return self.dict() class TextChoices(OpenAIObject): def __init__(self, finish_reason=None, index=0, text=None, logprobs=None, **params): @@ -546,8 +578,9 @@ class Logging: self.litellm_call_id = litellm_call_id self.function_id = function_id self.streaming_chunks = [] # for generating complete stream response + self.model_call_details = {} - def update_environment_variables(self, model, user, optional_params, litellm_params): + def update_environment_variables(self, model, user, optional_params, litellm_params, **additional_params): self.optional_params = optional_params self.model = model self.user = user @@ -562,7 +595,9 @@ class Logging: "start_time": self.start_time, "stream": self.stream, "user": user, - **self.optional_params + "call_type": str(self.call_type), + **self.optional_params, + **additional_params } def _pre_call(self, input, api_key, model=None, additional_args={}): @@ -793,7 +828,7 @@ class Logging: ) pass - def _success_handler_helper_fn(self, result=None, start_time=None, end_time=None): + def _success_handler_helper_fn(self, result=None, start_time=None, end_time=None, cache_hit=None): try: if start_time is None: start_time = self.start_time @@ -801,9 +836,7 @@ class Logging: end_time = datetime.datetime.now() self.model_call_details["log_event_type"] = "successful_api_call" self.model_call_details["end_time"] = end_time - - if isinstance(result, OpenAIObject): - result = result.model_dump() + self.model_call_details["cache_hit"] = cache_hit if litellm.max_budget and self.stream: time_diff = (end_time - start_time).total_seconds() @@ -811,36 +844,33 @@ class Logging: litellm._current_cost += litellm.completion_cost(model=self.model, prompt="", completion=result["content"], total_time=float_diff) return start_time, end_time, result - except: - pass + except Exception as e: + print_verbose(f"[Non-Blocking] LiteLLM.Success_Call Error: {str(e)}") - def success_handler(self, result=None, start_time=None, end_time=None, **kwargs): + def success_handler(self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs): print_verbose( f"Logging Details LiteLLM-Success Call" ) + # print(f"original response in success handler: {self.model_call_details['original_response']}") try: - print_verbose(f"success callbacks: {litellm.success_callback}") + print_verbose(f"success callbacks: {litellm.success_callback}") ## BUILD COMPLETE STREAMED RESPONSE complete_streaming_response = None - if self.stream == True and self.model_call_details.get("litellm_params", {}).get("acompletion", False) == True: - # if it's acompletion == True, chunks are built/appended in async_success_handler + if self.stream and self.model_call_details.get("litellm_params", {}).get("acompletion", False) == False: # only call stream chunk builder if it's not acompletion() if result.choices[0].finish_reason is not None: # if it's the last chunk - complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks, messages=self.model_call_details.get("messages", None)) - else: - # this is a completion() call - if self.stream: - print_verbose("success callback - assembling complete streaming response") - if result.choices[0].finish_reason is not None: # if it's the last chunk - print_verbose(f"success callback - Got the very Last chunk. Assembling {self.streaming_chunks}") - self.streaming_chunks.append(result) + self.streaming_chunks.append(result) + # print_verbose(f"final set of received chunks: {self.streaming_chunks}") + try: complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks, messages=self.model_call_details.get("messages", None)) - print_verbose(f"success callback - complete streamign response{complete_streaming_response}") - else: - self.streaming_chunks.append(result) + except: + complete_streaming_response = None + else: + self.streaming_chunks.append(result) + if complete_streaming_response: self.model_call_details["complete_streaming_response"] = complete_streaming_response - start_time, end_time, result = self._success_handler_helper_fn(start_time=start_time, end_time=end_time, result=result) + start_time, end_time, result = self._success_handler_helper_fn(start_time=start_time, end_time=end_time, result=result, cache_hit=cache_hit) for callback in litellm.success_callback: try: if callback == "lite_debugger": @@ -857,9 +887,6 @@ class Logging: call_type = self.call_type, stream = self.stream, ) - if callback == "api_manager": - print_verbose("reaches api manager for updating model cost") - litellm.apiManager.update_cost(completion_obj=result, user=self.user) if callback == "promptlayer": print_verbose("reaches promptlayer for logging!") promptLayerLogger.log_event( @@ -947,6 +974,7 @@ class Logging: print_verbose=print_verbose, ) if callback == "langfuse": + global langFuseLogger print_verbose("reaches langfuse for logging!") kwargs = {} for k, v in self.model_call_details.items(): @@ -959,7 +987,8 @@ class Logging: else: print_verbose("reaches langfuse for streaming logging!") result = kwargs["complete_streaming_response"] - + if langFuseLogger is None: + langFuseLogger = LangFuseLogger() langFuseLogger.log_event( kwargs=kwargs, response_obj=result, @@ -967,7 +996,7 @@ class Logging: end_time=end_time, print_verbose=print_verbose, ) - if callback == "cache": + if callback == "cache" and litellm.cache is not None: # this only logs streaming once, complete_streaming_response exists i.e when stream ends print_verbose("success_callback: reaches cache for logging!") kwargs = self.model_call_details @@ -992,7 +1021,7 @@ class Logging: end_time=end_time, print_verbose=print_verbose, ) - if isinstance(callback, CustomLogger): # custom logger class + elif isinstance(callback, CustomLogger) and self.model_call_details.get("litellm_params", {}).get("acompletion", False) == False and self.model_call_details.get("litellm_params", {}).get("aembedding", False) == False: # custom logger class print_verbose(f"success callbacks: Running Custom Logger Class") if self.stream and complete_streaming_response is None: callback.log_stream_event( @@ -1037,26 +1066,31 @@ class Logging: ) pass - async def async_success_handler(self, result=None, start_time=None, end_time=None, **kwargs): + async def async_success_handler(self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs): """ Implementing async callbacks, to handle asyncio event loop issues when custom integrations need to use async functions. """ print_verbose(f"Async success callbacks: {litellm._async_success_callback}") - ## BUILD COMPLETE STREAMED RESPONSE complete_streaming_response = None if self.stream: if result.choices[0].finish_reason is not None: # if it's the last chunk self.streaming_chunks.append(result) - complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks, messages=self.model_call_details.get("messages", None)) + # print_verbose(f"final set of received chunks: {self.streaming_chunks}") + try: + complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks, messages=self.model_call_details.get("messages", None)) + except Exception as e: + print_verbose(f"Error occurred building stream chunk: {traceback.format_exc()}") + complete_streaming_response = None else: self.streaming_chunks.append(result) if complete_streaming_response: + print_verbose("Async success callbacks: Got a complete streaming response") self.model_call_details["complete_streaming_response"] = complete_streaming_response - start_time, end_time, result = self._success_handler_helper_fn(start_time=start_time, end_time=end_time, result=result) + start_time, end_time, result = self._success_handler_helper_fn(start_time=start_time, end_time=end_time, result=result, cache_hit=cache_hit) for callback in litellm._async_success_callback: try: - if callback == "cache": + if callback == "cache" and litellm.cache is not None: # set_cache once complete streaming response is built print_verbose("async success_callback: reaches cache for logging!") kwargs = self.model_call_details @@ -1079,6 +1113,13 @@ class Logging: start_time=start_time, end_time=end_time, ) + else: + await callback.async_log_stream_event( # [TODO]: move this to being an async log stream event function + kwargs=self.model_call_details, + response_obj=result, + start_time=start_time, + end_time=end_time + ) else: await callback.async_log_success_event( kwargs=self.model_call_details, @@ -1096,29 +1137,82 @@ class Logging: print_verbose=print_verbose, callback_func=callback ) + if callback == "dynamodb": + global dynamoLogger + if dynamoLogger is None: + dynamoLogger = DyanmoDBLogger() + if self.stream: + if "complete_streaming_response" in self.model_call_details: + print_verbose("DynamoDB Logger: Got Stream Event - Completed Stream Response") + await dynamoLogger._async_log_event( + kwargs=self.model_call_details, + response_obj=self.model_call_details["complete_streaming_response"], + start_time=start_time, + end_time=end_time, + print_verbose=print_verbose + ) + else: + print_verbose("DynamoDB Logger: Got Stream Event - No complete stream response as yet") + else: + await dynamoLogger._async_log_event( + kwargs=self.model_call_details, + response_obj=result, + start_time=start_time, + end_time=end_time, + print_verbose=print_verbose, + ) + if callback == "langfuse": + global langFuseLogger + print_verbose("reaches langfuse for logging!") + kwargs = {} + for k, v in self.model_call_details.items(): + if k != "original_response": # copy.deepcopy raises errors as this could be a coroutine + kwargs[k] = v + # this only logs streaming once, complete_streaming_response exists i.e when stream ends + if self.stream: + if "complete_streaming_response" not in kwargs: + return + else: + print_verbose("reaches langfuse for streaming logging!") + result = kwargs["complete_streaming_response"] + if langFuseLogger is None: + langFuseLogger = LangFuseLogger() + await langFuseLogger._async_log_event( + kwargs=kwargs, + response_obj=result, + start_time=start_time, + end_time=end_time, + print_verbose=print_verbose, + ) except: print_verbose( f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}" ) + pass + + def _failure_handler_helper_fn(self, exception, traceback_exception, start_time=None, end_time=None): + if start_time is None: + start_time = self.start_time + if end_time is None: + end_time = datetime.datetime.now() + + # on some exceptions, model_call_details is not always initialized, this ensures that we still log those exceptions + if not hasattr(self, "model_call_details"): + self.model_call_details = {} + + self.model_call_details["log_event_type"] = "failed_api_call" + self.model_call_details["exception"] = exception + self.model_call_details["traceback_exception"] = traceback_exception + self.model_call_details["end_time"] = end_time + self.model_call_details.setdefault("original_response", None) + return start_time, end_time def failure_handler(self, exception, traceback_exception, start_time=None, end_time=None): print_verbose( f"Logging Details LiteLLM-Failure Call" ) try: - if start_time is None: - start_time = self.start_time - if end_time is None: - end_time = datetime.datetime.now() - - # on some exceptions, model_call_details is not always initialized, this ensures that we still log those exceptions - if not hasattr(self, "model_call_details"): - self.model_call_details = {} - - self.model_call_details["log_event_type"] = "failed_api_call" - self.model_call_details["exception"] = exception - self.model_call_details["traceback_exception"] = traceback_exception - self.model_call_details["end_time"] = end_time + start_time, end_time = self._failure_handler_helper_fn(exception=exception, traceback_exception=traceback_exception, start_time=start_time, end_time=end_time) result = None # result sent to all loggers, init this to None incase it's not created for callback in litellm.failure_callback: try: @@ -1184,7 +1278,7 @@ class Logging: print_verbose=print_verbose, callback_func=callback ) - elif isinstance(callback, CustomLogger): # custom logger class + elif isinstance(callback, CustomLogger) and self.model_call_details.get("litellm_params", {}).get("acompletion", False) == False and self.model_call_details.get("litellm_params", {}).get("aembedding", False) == False: # custom logger class callback.log_failure_event( start_time=start_time, end_time=end_time, @@ -1210,16 +1304,8 @@ class Logging: """ Implementing async callbacks, to handle asyncio event loop issues when custom integrations need to use async functions. """ - # on some exceptions, model_call_details is not always initialized, this ensures that we still log those exceptions - if not hasattr(self, "model_call_details"): - self.model_call_details = {} - - self.model_call_details["log_event_type"] = "failed_api_call" - self.model_call_details["exception"] = exception - self.model_call_details["traceback_exception"] = traceback_exception - self.model_call_details["end_time"] = end_time - result = {} # result sent to all loggers, init this to None incase it's not created - + start_time, end_time = self._failure_handler_helper_fn(exception=exception, traceback_exception=traceback_exception, start_time=start_time, end_time=end_time) + result = None # result sent to all loggers, init this to None incase it's not created for callback in litellm._async_failure_callback: try: if isinstance(callback, CustomLogger): # custom logger class @@ -1238,7 +1324,7 @@ class Logging: print_verbose=print_verbose, callback_func=callback ) - except: + except Exception as e: print_verbose( f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}" ) @@ -1378,6 +1464,15 @@ def client(original_function): if inspect.iscoroutinefunction(callback): litellm._async_success_callback.append(callback) removed_async_items.append(index) + elif callback == "dynamodb": + # dynamo is an async callback, it's used for the proxy and needs to be async + # we only support async dynamo db logging for acompletion/aembedding since that's used on proxy + litellm._async_success_callback.append(callback) + removed_async_items.append(index) + elif callback == "langfuse" and inspect.iscoroutinefunction(original_function): + # use async success callback for langfuse if this is litellm.acompletion(). Streaming logging does not work otherwise + litellm._async_success_callback.append(callback) + removed_async_items.append(index) # Pop the async items from success_callback in reverse order to avoid index issues for index in reversed(removed_async_items): @@ -1407,6 +1502,7 @@ def client(original_function): model = args[0] if len(args) > 0 else kwargs["model"] call_type = original_function.__name__ if call_type == CallTypes.completion.value or call_type == CallTypes.acompletion.value: + messages = None if len(args) > 1: messages = args[1] elif kwargs.get("messages", None): @@ -1477,16 +1573,15 @@ def client(original_function): raise BudgetExceededError(current_cost=litellm._current_cost, max_budget=litellm.max_budget) # [OPTIONAL] CHECK CACHE - # remove this after deprecating litellm.caching - if (litellm.caching or litellm.caching_with_models) and litellm.cache is None: - litellm.cache = Cache() - print_verbose(f"kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}") # if caching is false, don't run this if (kwargs.get("caching", None) is None and litellm.cache is not None) or kwargs.get("caching", False) == True: # allow users to control returning cached responses from the completion function # checking cache - if (litellm.cache != None or litellm.caching or litellm.caching_with_models): + print_verbose(f"INSIDE CHECKING CACHE") + if litellm.cache is not None and str(original_function.__name__) in litellm.cache.supported_call_types: print_verbose(f"Checking Cache") + preset_cache_key = litellm.cache.get_cache_key(*args, **kwargs) + kwargs["preset_cache_key"] = preset_cache_key # for streaming calls, we need to pass the preset_cache_key cached_result = litellm.cache.get_cache(*args, **kwargs) if cached_result != None: if "detail" in cached_result: @@ -1522,17 +1617,12 @@ def client(original_function): post_call_processing(original_response=result, model=model) # [OPTIONAL] ADD TO CACHE - if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object + if litellm.cache is not None and str(original_function.__name__) in litellm.cache.supported_call_types: litellm.cache.add_cache(result, *args, **kwargs) # LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated print_verbose(f"Wrapper: Completed Call, calling success_handler") threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start() - # threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start() - my_thread = threading.Thread( - target=handle_success, args=(args, kwargs, result, start_time, end_time) - ) # don't interrupt execution of main thread - my_thread.start() # RETURN RESULT result._response_ms = (end_time - start_time).total_seconds() * 1000 # return response latency in ms like openai return result @@ -1605,7 +1695,7 @@ def client(original_function): if (kwargs.get("caching", None) is None and litellm.cache is not None) or kwargs.get("caching", False) == True: # allow users to control returning cached responses from the completion function # checking cache print_verbose(f"INSIDE CHECKING CACHE") - if litellm.cache is not None: + if litellm.cache is not None and str(original_function.__name__) in litellm.cache.supported_call_types: print_verbose(f"Checking Cache") cached_result = litellm.cache.get_cache(*args, **kwargs) if cached_result != None: @@ -1613,13 +1703,22 @@ def client(original_function): call_type = original_function.__name__ if call_type == CallTypes.acompletion.value and isinstance(cached_result, dict): if kwargs.get("stream", False) == True: - return convert_to_streaming_response_async( + cached_result = convert_to_streaming_response_async( response_object=cached_result, ) else: - return convert_to_model_response_object(response_object=cached_result, model_response_object=ModelResponse()) - else: - return cached_result + cached_result = convert_to_model_response_object(response_object=cached_result, model_response_object=ModelResponse()) + elif call_type == CallTypes.aembedding.value and isinstance(cached_result, dict): + cached_result = convert_to_model_response_object(response_object=cached_result, model_response_object=EmbeddingResponse(), response_type="embedding") + # LOG SUCCESS + cache_hit = True + end_time = datetime.datetime.now() + model, custom_llm_provider, dynamic_api_key, api_base = litellm.get_llm_provider(model=model, custom_llm_provider=kwargs.get('custom_llm_provider', None), api_base=kwargs.get('api_base', None), api_key=kwargs.get('api_key', None)) + print_verbose(f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}") + logging_obj.update_environment_variables(model=model, user=kwargs.get('user', None), optional_params={}, litellm_params={"logger_fn": kwargs.get('logger_fn', None), "acompletion": True, "metadata": kwargs.get("metadata", {}), "model_info": kwargs.get("model_info", {}), "proxy_server_request": kwargs.get("proxy_server_request", None), "preset_cache_key": kwargs.get("preset_cache_key", None), "stream_response": kwargs.get("stream_response", {})}, input=kwargs.get('messages', ""), api_key=kwargs.get('api_key', None), original_response=str(cached_result), additional_args=None, stream=kwargs.get('stream', False)) + asyncio.create_task(logging_obj.async_success_handler(cached_result, start_time, end_time, cache_hit)) + threading.Thread(target=logging_obj.success_handler, args=(cached_result, start_time, end_time, cache_hit)).start() + return cached_result # MODEL CALL result = await original_function(*args, **kwargs) end_time = datetime.datetime.now() @@ -1636,10 +1735,13 @@ def client(original_function): post_call_processing(original_response=result, model=model) # [OPTIONAL] ADD TO CACHE - if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object - litellm.cache.add_cache(result, *args, **kwargs) + if litellm.cache is not None and str(original_function.__name__) in litellm.cache.supported_call_types: + if isinstance(result, litellm.ModelResponse) or isinstance(result, litellm.EmbeddingResponse): + asyncio.create_task(litellm.cache._async_add_cache(result.json(), *args, **kwargs)) + else: + asyncio.create_task(litellm.cache._async_add_cache(result, *args, **kwargs)) # LOG SUCCESS - handle streaming success logging in the _next_ object - print_verbose(f"Async Wrapper: Completed Call, calling async_success_handler") + print_verbose(f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}") asyncio.create_task(logging_obj.async_success_handler(result, start_time, end_time)) threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start() # RETURN RESULT @@ -1647,6 +1749,19 @@ def client(original_function): result._response_ms = (end_time - start_time).total_seconds() * 1000 # return response latency in ms like openai return result except Exception as e: + traceback_exception = traceback.format_exc() + crash_reporting(*args, **kwargs, exception=traceback_exception) + end_time = datetime.datetime.now() + if logging_obj: + try: + logging_obj.failure_handler(e, traceback_exception, start_time, end_time) # DO NOT MAKE THREADED - router retry fallback relies on this! + except Exception as e: + raise e + try: + await logging_obj.async_failure_handler(e, traceback_exception, start_time, end_time) + except Exception as e: + raise e + call_type = original_function.__name__ if call_type == CallTypes.acompletion.value: num_retries = ( @@ -1656,27 +1771,24 @@ def client(original_function): ) litellm.num_retries = None # set retries to None to prevent infinite loops context_window_fallback_dict = kwargs.get("context_window_fallback_dict", {}) - + if num_retries: - kwargs["num_retries"] = num_retries - kwargs["original_function"] = original_function - if (isinstance(e, openai.RateLimitError)): # rate limiting specific error - kwargs["retry_strategy"] = "exponential_backoff_retry" - elif (isinstance(e, openai.APIError)): # generic api error - kwargs["retry_strategy"] = "constant_retry" - return await litellm.acompletion_with_retries(*args, **kwargs) + try: + kwargs["num_retries"] = num_retries + kwargs["original_function"] = original_function + if (isinstance(e, openai.RateLimitError)): # rate limiting specific error + kwargs["retry_strategy"] = "exponential_backoff_retry" + elif (isinstance(e, openai.APIError)): # generic api error + kwargs["retry_strategy"] = "constant_retry" + return await litellm.acompletion_with_retries(*args, **kwargs) + except: + pass elif isinstance(e, litellm.exceptions.ContextWindowExceededError) and context_window_fallback_dict and model in context_window_fallback_dict: if len(args) > 0: args[0] = context_window_fallback_dict[model] else: kwargs["model"] = context_window_fallback_dict[model] return await original_function(*args, **kwargs) - traceback_exception = traceback.format_exc() - crash_reporting(*args, **kwargs, exception=traceback_exception) - end_time = datetime.datetime.now() - if logging_obj: - logging_obj.failure_handler(e, traceback_exception, start_time, end_time) # DO NOT MAKE THREADED - router retry fallback relies on this! - asyncio.create_task(logging_obj.async_failure_handler(e, traceback_exception, start_time, end_time)) raise e is_coroutine = inspect.iscoroutinefunction(original_function) @@ -2056,7 +2168,6 @@ def register_model(model_cost: Union[str, dict]): return model_cost def get_litellm_params( - return_async=False, api_key=None, force_timeout=600, azure=False, @@ -2074,10 +2185,10 @@ def get_litellm_params( model_info=None, proxy_server_request=None, acompletion=None, + preset_cache_key = None ): litellm_params = { "acompletion": acompletion, - "return_async": return_async, "api_key": api_key, "force_timeout": force_timeout, "logger_fn": logger_fn, @@ -2090,12 +2201,46 @@ def get_litellm_params( "metadata": metadata, "model_info": model_info, "proxy_server_request": proxy_server_request, + "preset_cache_key": preset_cache_key, "stream_response": {} # litellm_call_id: ModelResponse Dict } return litellm_params +def get_optional_params_embeddings( + # 2 optional params + user=None, + encoding_format=None, + custom_llm_provider="", + **kwargs +): + # retrieve all parameters passed to the function + passed_params = locals() + custom_llm_provider = passed_params.pop("custom_llm_provider", None) + special_params = passed_params.pop("kwargs") + for k, v in special_params.items(): + passed_params[k] = v + + default_params = { + "user": None, + "encoding_format": None + } + + non_default_params = {k: v for k, v in passed_params.items() if (k in default_params and v != default_params[k])} + ## raise exception if non-default value passed for non-openai/azure embedding calls + if custom_llm_provider != "openai" and custom_llm_provider != "azure": + if len(non_default_params.keys()) > 0: + if litellm.drop_params is True: # drop the unsupported non-default values + keys = list(non_default_params.keys()) + for k in keys: + non_default_params.pop(k, None) + return non_default_params + raise UnsupportedParamsError(status_code=500, message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.") + + final_params = {**non_default_params, **kwargs} + return final_params + def get_optional_params( # use the openai defaults # 12 optional params functions=[], @@ -2107,7 +2252,7 @@ def get_optional_params( # use the openai defaults stop=None, max_tokens=None, presence_penalty=None, - frequency_penalty=0, + frequency_penalty=None, logit_bias=None, user=None, model=None, @@ -2591,6 +2736,28 @@ def get_optional_params( # use the openai defaults optional_params["stream"] = stream if max_tokens: optional_params["max_tokens"] = max_tokens + elif custom_llm_provider == "mistral": + supported_params = ["temperature", "top_p", "stream", "max_tokens"] + _check_valid_arg(supported_params=supported_params) + optional_params = non_default_params + if temperature is not None: + optional_params["temperature"] = temperature + if top_p is not None: + optional_params["top_p"] = top_p + if stream is not None: + optional_params["stream"] = stream + if max_tokens is not None: + optional_params["max_tokens"] = max_tokens + + # check safe_mode, random_seed: https://docs.mistral.ai/api/#operation/createChatCompletion + safe_mode = passed_params.pop("safe_mode", None) + random_seed = passed_params.pop("random_seed", None) + extra_body = {} + if safe_mode is not None: + extra_body["safe_mode"] = safe_mode + if random_seed is not None: + extra_body["random_seed"] = random_seed + optional_params["extra_body"] = extra_body # openai client supports `extra_body` param else: # assume passing in params for openai/azure openai supported_params = ["functions", "function_call", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", "seed", "tools", "tool_choice", "max_retries"] _check_valid_arg(supported_params=supported_params) @@ -2661,6 +2828,10 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_ # deepinfra is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1 api_base = "https://api.deepinfra.com/v1/openai" dynamic_api_key = get_secret("DEEPINFRA_API_KEY") + elif custom_llm_provider == "mistral": + # mistral is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.mistral.ai + api_base = "https://api.mistral.ai/v1" + dynamic_api_key = get_secret("MISTRAL_API_KEY") return model, custom_llm_provider, dynamic_api_key, api_base # check if api base is a known openai compatible endpoint @@ -2676,6 +2847,9 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_ elif endpoint == "api.deepinfra.com/v1/openai": custom_llm_provider = "deepinfra" dynamic_api_key = get_secret("DEEPINFRA_API_KEY") + elif endpoint == "api.mistral.ai/v1": + custom_llm_provider = "mistral" + dynamic_api_key = get_secret("MISTRAL_API_KEY") return model, custom_llm_provider, dynamic_api_key, api_base # check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.) @@ -2703,12 +2877,13 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_ ## openrouter elif model in litellm.maritalk_models: custom_llm_provider = "maritalk" - ## vertex - text + chat models + ## vertex - text + chat + language (gemini) models elif( model in litellm.vertex_chat_models or model in litellm.vertex_code_chat_models or model in litellm.vertex_text_models or - model in litellm.vertex_code_text_models + model in litellm.vertex_code_text_models or + model in litellm.vertex_language_models ): custom_llm_provider = "vertex_ai" ## ai21 @@ -2736,7 +2911,18 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_ print() # noqa print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m") # noqa print() # noqa - raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers") + error_str = f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model={model}\n Pass model as E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/starcoder',..)` Learn more: https://docs.litellm.ai/docs/providers" + # maps to openai.NotFoundError, this is raised when openai does not recognize the llm + raise litellm.exceptions.NotFoundError( # type: ignore + message=error_str, + model=model, + response=httpx.Response( + status_code=404, + content=error_str, + request=httpx.request(method="completion", url="https://github.com/BerriAI/litellm") # type: ignore + ), + llm_provider="" + ) return model, custom_llm_provider, dynamic_api_key, api_base except Exception as e: raise e @@ -3280,7 +3466,7 @@ def validate_environment(model: Optional[str]=None) -> dict: return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys} def set_callbacks(callback_list, function_id=None): - global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger + global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger, dynamoLogger try: for callback in callback_list: print_verbose(f"callback: {callback}") @@ -3343,6 +3529,8 @@ def set_callbacks(callback_list, function_id=None): promptLayerLogger = PromptLayerLogger() elif callback == "langfuse": langFuseLogger = LangFuseLogger() + elif callback == "dynamodb": + dynamoLogger = DyanmoDBLogger() elif callback == "wandb": weightsBiasesLogger = WeightsBiasesLogger() elif callback == "langsmith": @@ -4043,7 +4231,7 @@ def exception_type( llm_provider=custom_llm_provider ) - if custom_llm_provider == "openai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "custom_openai": + if custom_llm_provider == "openai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "custom_openai" or custom_llm_provider in litellm.openai_compatible_providers: if "This model's maximum context length is" in error_str or "Request too large" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( @@ -4052,6 +4240,14 @@ def exception_type( model=model, response=original_exception.response ) + elif "invalid_request_error" in error_str and "model_not_found" in error_str: + exception_mapping_worked = True + raise NotFoundError( + message=f"OpenAIException - {original_exception.message}", + llm_provider="openai", + model=model, + response=original_exception.response + ) elif "invalid_request_error" in error_str and "Incorrect API key provided" not in error_str: exception_mapping_worked = True raise BadRequestError( @@ -4070,6 +4266,14 @@ def exception_type( model=model, response=original_exception.response ) + elif original_exception.status_code == 404: + exception_mapping_worked = True + raise NotFoundError( + message=f"OpenAIException - {original_exception.message}", + model=model, + llm_provider="openai", + response=original_exception.response + ) elif original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( @@ -4077,7 +4281,7 @@ def exception_type( model=model, llm_provider="openai", ) - if original_exception.status_code == 422: + elif original_exception.status_code == 422: exception_mapping_worked = True raise BadRequestError( message=f"OpenAIException - {original_exception.message}", @@ -4354,7 +4558,15 @@ def exception_type( ) elif "403" in error_str: exception_mapping_worked = True - raise AuthenticationError( + raise U( + message=f"VertexAIException - {error_str}", + model=model, + llm_provider="vertex_ai", + response=original_exception.response + ) + elif "The response was blocked." in error_str: + exception_mapping_worked = True + raise UnprocessableEntityError( message=f"VertexAIException - {error_str}", model=model, llm_provider="vertex_ai", @@ -4733,29 +4945,30 @@ def exception_type( llm_provider="together_ai", response=original_exception.response ) - elif original_exception.status_code == 408: + if hasattr(original_exception, "status_code"): + if original_exception.status_code == 408: + exception_mapping_worked = True + raise Timeout( + message=f"TogetherAIException - {original_exception.message}", + model=model, + llm_provider="together_ai", + request=original_exception.request + ) + elif original_exception.status_code == 429: + exception_mapping_worked = True + raise RateLimitError( + message=f"TogetherAIException - {original_exception.message}", + llm_provider="together_ai", + model=model, + response=original_exception.response + ) + elif original_exception.status_code == 524: exception_mapping_worked = True raise Timeout( - message=f"TogetherAIException - {original_exception.message}", - model=model, - llm_provider="together_ai", - request=original_exception.request - ) - elif original_exception.status_code == 429: - exception_mapping_worked = True - raise RateLimitError( message=f"TogetherAIException - {original_exception.message}", llm_provider="together_ai", model=model, - response=original_exception.response ) - elif original_exception.status_code == 524: - exception_mapping_worked = True - raise Timeout( - message=f"TogetherAIException - {original_exception.message}", - llm_provider="together_ai", - model=model, - ) else: exception_mapping_worked = True raise APIError( @@ -4868,6 +5081,14 @@ def exception_type( model=model, response=original_exception.response ) + elif "DeploymentNotFound" in error_str: + exception_mapping_worked = True + raise NotFoundError( + message=f"AzureException - {original_exception.message}", + llm_provider="azure", + model=model, + response=original_exception.response + ) elif "invalid_request_error" in error_str: exception_mapping_worked = True raise BadRequestError( @@ -5088,9 +5309,6 @@ class CustomStreamWrapper: self.special_tokens = ["<|assistant|>", "<|system|>", "<|user|>", "", ""] self.holding_chunk = "" self.complete_response = "" - if self.logging_obj: - # Log the type of the received item - self.logging_obj.post_call(str(type(completion_stream))) def __iter__(self): return self @@ -5115,10 +5333,6 @@ class CustomStreamWrapper: except Exception as e: raise e - def logging(self, text): - if self.logging_obj: - self.logging_obj.post_call(text) - def check_special_tokens(self, chunk: str, finish_reason: Optional[str]): hold = False if finish_reason: @@ -5413,6 +5627,30 @@ class CustomStreamWrapper: traceback.print_exc() return "" + def handle_ollama_stream(self, chunk): + try: + json_chunk = json.loads(chunk) + if "error" in json_chunk: + raise Exception(f"Ollama Error - {json_chunk}") + + text = "" + is_finished = False + finish_reason = None + if json_chunk["done"] == True: + text = "" + is_finished = True + finish_reason = "stop" + return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason} + elif json_chunk["response"]: + print_verbose(f"delta content: {json_chunk}") + text = json_chunk["response"] + return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason} + else: + raise Exception(f"Ollama Error - {json_chunk}") + except Exception as e: + raise e + + def handle_bedrock_stream(self, chunk): if hasattr(chunk, "get"): chunk = chunk.get('chunk') @@ -5458,6 +5696,7 @@ class CustomStreamWrapper: def chunk_creator(self, chunk): model_response = ModelResponse(stream=True, model=self.model) + model_response.choices = [StreamingChoices()] model_response.choices[0].finish_reason = None response_obj = {} try: @@ -5548,6 +5787,7 @@ class CustomStreamWrapper: model_response.choices[0].finish_reason = response_obj["finish_reason"] self.sent_last_chunk = True elif self.custom_llm_provider == "sagemaker": + print_verbose(f"ENTERS SAGEMAKER STREAMING") if len(self.completion_stream)==0: if self.sent_last_chunk: raise StopIteration @@ -5555,6 +5795,7 @@ class CustomStreamWrapper: model_response.choices[0].finish_reason = "stop" self.sent_last_chunk = True new_chunk = self.completion_stream + print_verbose(f"sagemaker chunk: {new_chunk}") completion_obj["content"] = new_chunk self.completion_stream = self.completion_stream[len(self.completion_stream):] elif self.custom_llm_provider == "petals": @@ -5584,9 +5825,11 @@ class CustomStreamWrapper: self.completion_stream = self.completion_stream[chunk_size:] time.sleep(0.05) elif self.custom_llm_provider == "ollama": - if "error" in chunk: - exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=chunk["error"]) - completion_obj = chunk + response_obj = self.handle_ollama_stream(chunk) + completion_obj["content"] = response_obj["text"] + print_verbose(f"completion obj content: {completion_obj['content']}") + if response_obj["is_finished"]: + model_response.choices[0].finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "text-completion-openai": response_obj = self.handle_openai_text_completion_chunk(chunk) completion_obj["content"] = response_obj["text"] @@ -5632,16 +5875,19 @@ class CustomStreamWrapper: completion_obj["role"] = "assistant" self.sent_first_chunk = True model_response.choices[0].delta = Delta(**completion_obj) - # LOGGING - threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start() print_verbose(f"model_response: {model_response}") return model_response else: return elif model_response.choices[0].finish_reason: + # flush any remaining holding chunk + if len(self.holding_chunk) > 0: + if model_response.choices[0].delta.content is None: + model_response.choices[0].delta.content = self.holding_chunk + else: + model_response.choices[0].delta.content = self.holding_chunk + model_response.choices[0].delta.content + self.holding_chunk = "" model_response.choices[0].finish_reason = map_finish_reason(model_response.choices[0].finish_reason) # ensure consistent output to openai - # LOGGING - threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start() return model_response elif response_obj is not None and response_obj.get("original_chunk", None) is not None: # function / tool calling branch - only set for openai/azure compatible endpoints # enter this branch when no content has been passed in response @@ -5662,8 +5908,6 @@ class CustomStreamWrapper: if self.sent_first_chunk == False: model_response.choices[0].delta["role"] = "assistant" self.sent_first_chunk = True - # LOGGING - threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start() # log response return model_response else: return @@ -5672,26 +5916,33 @@ class CustomStreamWrapper: except Exception as e: traceback_exception = traceback.format_exc() e.message = str(e) - # LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated - threading.Thread(target=self.logging_obj.failure_handler, args=(e, traceback_exception)).start() raise exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=e) ## needs to handle the empty string case (even starting chunk can be an empty string) def __next__(self): try: - while True: + while True: if isinstance(self.completion_stream, str) or isinstance(self.completion_stream, bytes): chunk = self.completion_stream else: chunk = next(self.completion_stream) + print_verbose(f"value of chunk: {chunk} ") if chunk is not None and chunk != b'': + print_verbose(f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}") response = self.chunk_creator(chunk=chunk) - if response is not None: - return response + print_verbose(f"PROCESSED CHUNK POST CHUNK CREATOR: {response}") + if response is None: + continue + ## LOGGING + threading.Thread(target=self.logging_obj.success_handler, args=(response,)).start() # log response + return response except StopIteration: raise # Re-raise StopIteration except Exception as e: - # Handle other exceptions if needed + print_verbose(f"HITS AN ERROR: {str(e)}\n\n {traceback.format_exc()}") + traceback_exception = traceback.format_exc() + # LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated + threading.Thread(target=self.logging_obj.failure_handler, args=(e, traceback_exception)).start() raise e @@ -5702,24 +5953,41 @@ class CustomStreamWrapper: or self.custom_llm_provider == "azure" or self.custom_llm_provider == "custom_openai" or self.custom_llm_provider == "text-completion-openai" - or self.custom_llm_provider == "huggingface"): + or self.custom_llm_provider == "huggingface" + or self.custom_llm_provider == "ollama" + or self.custom_llm_provider == "vertex_ai"): + print_verbose(f"INSIDE ASYNC STREAMING!!!") + print_verbose(f"value of async completion stream: {self.completion_stream}") async for chunk in self.completion_stream: + print_verbose(f"value of async chunk: {chunk}") if chunk == "None" or chunk is None: raise Exception # chunk_creator() does logging/stream chunk building. We need to let it know its being called in_async_func, so we don't double add chunks. # __anext__ also calls async_success_handler, which does logging + print_verbose(f"PROCESSED ASYNC CHUNK PRE CHUNK CREATOR: {chunk}") processed_chunk = self.chunk_creator(chunk=chunk) + print_verbose(f"PROCESSED ASYNC CHUNK POST CHUNK CREATOR: {processed_chunk}") if processed_chunk is None: continue ## LOGGING + threading.Thread(target=self.logging_obj.success_handler, args=(processed_chunk,)).start() # log response asyncio.create_task(self.logging_obj.async_success_handler(processed_chunk,)) return processed_chunk raise StopAsyncIteration else: # temporary patch for non-aiohttp async calls - return next(self) + # example - boto3 bedrock llms + processed_chunk = next(self) + asyncio.create_task(self.logging_obj.async_success_handler(processed_chunk,)) + return processed_chunk + except StopAsyncIteration: + raise + except StopIteration: + raise StopAsyncIteration # Re-raise StopIteration except Exception as e: + traceback_exception = traceback.format_exc() # Handle any exceptions that might occur during streaming + asyncio.create_task(self.logging_obj.async_failure_handler(e, traceback_exception)) raise StopAsyncIteration class TextCompletionStreamWrapper: @@ -5732,31 +6000,48 @@ class TextCompletionStreamWrapper: def __aiter__(self): return self + + def convert_to_text_completion_object(self, chunk: ModelResponse): + try: + response = TextCompletionResponse() + response["id"] = chunk.get("id", None) + response["object"] = "text_completion" + response["created"] = response.get("created", None) + response["model"] = response.get("model", None) + text_choices = TextChoices() + if isinstance(chunk, Choices): # chunk should always be of type StreamingChoices + raise Exception + text_choices["text"] = chunk["choices"][0]["delta"]["content"] + text_choices["index"] = response["choices"][0]["index"] + text_choices["finish_reason"] = response["choices"][0]["finish_reason"] + response["choices"] = [text_choices] + return response + except Exception as e: + raise Exception(f"Error occurred converting to text completion object - chunk: {chunk}; Error: {str(e)}") def __next__(self): # model_response = ModelResponse(stream=True, model=self.model) response = TextCompletionResponse() try: - while True: # loop until a non-empty string is found - # return this for all models - chunk = next(self.completion_stream) - response["id"] = chunk.get("id", None) - response["object"] = "text_completion" - response["created"] = response.get("created", None) - response["model"] = response.get("model", None) - text_choices = TextChoices() - text_choices["text"] = chunk["choices"][0]["delta"]["content"] - text_choices["index"] = response["choices"][0]["index"] - text_choices["finish_reason"] = response["choices"][0]["finish_reason"] - response["choices"] = [text_choices] - return response + for chunk in self.completion_stream: + if chunk == "None" or chunk is None: + raise Exception + processed_chunk = self.convert_to_text_completion_object(chunk=chunk) + return processed_chunk + raise StopIteration except StopIteration: raise StopIteration except Exception as e: print(f"got exception {e}") # noqa + async def __anext__(self): try: - return next(self) + async for chunk in self.completion_stream: + if chunk == "None" or chunk is None: + raise Exception + processed_chunk = self.convert_to_text_completion_object(chunk=chunk) + return processed_chunk + raise StopIteration except StopIteration: raise StopAsyncIteration @@ -6228,12 +6513,3 @@ def transform_logprobs(hf_response): transformed_logprobs = token_info return transformed_logprobs - -# used in LiteLLM Router -def remove_model_id(original_model_string): - # Find the index of "ModelID" in the string - index_of_model_id = original_model_string.find("-ModelID") - # Remove everything after "-ModelID" if it exists - if index_of_model_id != -1: - return original_model_string[:index_of_model_id] - return original_model_string \ No newline at end of file diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 454b2504a..32d28e675 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -210,6 +210,27 @@ "litellm_provider": "anthropic", "mode": "chat" }, + "mistral/mistral-tiny": { + "max_tokens": 8192, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.00000046, + "litellm_provider": "mistral", + "mode": "chat" + }, + "mistral/mistral-small": { + "max_tokens": 8192, + "input_cost_per_token": 0.00000066, + "output_cost_per_token": 0.00000197, + "litellm_provider": "mistral", + "mode": "chat" + }, + "mistral/mistral-medium": { + "max_tokens": 8192, + "input_cost_per_token": 0.00000273, + "output_cost_per_token": 0.00000820, + "litellm_provider": "mistral", + "mode": "chat" + }, "claude-instant-1.2": { "max_tokens": 100000, "max_output_tokens": 8191, @@ -262,6 +283,13 @@ "litellm_provider": "vertex_ai-chat-models", "mode": "chat" }, + "chat-bison@002": { + "max_tokens": 4096, + "input_cost_per_token": 0.000000125, + "output_cost_per_token": 0.000000125, + "litellm_provider": "vertex_ai-chat-models", + "mode": "chat" + }, "chat-bison-32k": { "max_tokens": 32000, "input_cost_per_token": 0.000000125, @@ -287,14 +315,21 @@ "max_tokens": 2048, "input_cost_per_token": 0.000000125, "output_cost_per_token": 0.000000125, - "litellm_provider": "vertex_ai-chat-models", + "litellm_provider": "vertex_ai-code-text-models", "mode": "completion" }, - "code-gecko@latest": { + "code-gecko@002": { "max_tokens": 2048, "input_cost_per_token": 0.000000125, "output_cost_per_token": 0.000000125, - "litellm_provider": "vertex_ai-chat-models", + "litellm_provider": "vertex_ai-code-text-models", + "mode": "completion" + }, + "code-gecko": { + "max_tokens": 2048, + "input_cost_per_token": 0.000000125, + "output_cost_per_token": 0.000000125, + "litellm_provider": "vertex_ai-code-text-models", "mode": "completion" }, "codechat-bison": { @@ -318,6 +353,22 @@ "litellm_provider": "vertex_ai-code-chat-models", "mode": "chat" }, + "gemini-pro": { + "max_tokens": 30720, + "max_output_tokens": 2048, + "input_cost_per_token": 0.00000025, + "output_cost_per_token": 0.0000005, + "litellm_provider": "vertex_ai-language-models", + "mode": "chat" + }, + "gemini-pro-vision": { + "max_tokens": 30720, + "max_output_tokens": 2048, + "input_cost_per_token": 0.00000025, + "output_cost_per_token": 0.0000005, + "litellm_provider": "vertex_ai-vision-models", + "mode": "chat" + }, "palm/chat-bison": { "max_tokens": 4096, "input_cost_per_token": 0.000000125, diff --git a/poetry.lock b/poetry.lock index 58dd309df..1d8765bfd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,112 +1,100 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "aiohttp" -version = "3.8.6" +version = "3.9.1" description = "Async http client/server framework (asyncio)" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:41d55fc043954cddbbd82503d9cc3f4814a40bcef30b3569bc7b5e34130718c1"}, - {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1d84166673694841d8953f0a8d0c90e1087739d24632fe86b1a08819168b4566"}, - {file = "aiohttp-3.8.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:253bf92b744b3170eb4c4ca2fa58f9c4b87aeb1df42f71d4e78815e6e8b73c9e"}, - {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fd194939b1f764d6bb05490987bfe104287bbf51b8d862261ccf66f48fb4096"}, - {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c5f938d199a6fdbdc10bbb9447496561c3a9a565b43be564648d81e1102ac22"}, - {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2817b2f66ca82ee699acd90e05c95e79bbf1dc986abb62b61ec8aaf851e81c93"}, - {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fa375b3d34e71ccccf172cab401cd94a72de7a8cc01847a7b3386204093bb47"}, - {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9de50a199b7710fa2904be5a4a9b51af587ab24c8e540a7243ab737b45844543"}, - {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e1d8cb0b56b3587c5c01de3bf2f600f186da7e7b5f7353d1bf26a8ddca57f965"}, - {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8e31e9db1bee8b4f407b77fd2507337a0a80665ad7b6c749d08df595d88f1cf5"}, - {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7bc88fc494b1f0311d67f29fee6fd636606f4697e8cc793a2d912ac5b19aa38d"}, - {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ec00c3305788e04bf6d29d42e504560e159ccaf0be30c09203b468a6c1ccd3b2"}, - {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad1407db8f2f49329729564f71685557157bfa42b48f4b93e53721a16eb813ed"}, - {file = "aiohttp-3.8.6-cp310-cp310-win32.whl", hash = "sha256:ccc360e87341ad47c777f5723f68adbb52b37ab450c8bc3ca9ca1f3e849e5fe2"}, - {file = "aiohttp-3.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:93c15c8e48e5e7b89d5cb4613479d144fda8344e2d886cf694fd36db4cc86865"}, - {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e2f9cc8e5328f829f6e1fb74a0a3a939b14e67e80832975e01929e320386b34"}, - {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e6a00ffcc173e765e200ceefb06399ba09c06db97f401f920513a10c803604ca"}, - {file = "aiohttp-3.8.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:41bdc2ba359032e36c0e9de5a3bd00d6fb7ea558a6ce6b70acedf0da86458321"}, - {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14cd52ccf40006c7a6cd34a0f8663734e5363fd981807173faf3a017e202fec9"}, - {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2d5b785c792802e7b275c420d84f3397668e9d49ab1cb52bd916b3b3ffcf09ad"}, - {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1bed815f3dc3d915c5c1e556c397c8667826fbc1b935d95b0ad680787896a358"}, - {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96603a562b546632441926cd1293cfcb5b69f0b4159e6077f7c7dbdfb686af4d"}, - {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d76e8b13161a202d14c9584590c4df4d068c9567c99506497bdd67eaedf36403"}, - {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e3f1e3f1a1751bb62b4a1b7f4e435afcdade6c17a4fd9b9d43607cebd242924a"}, - {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:76b36b3124f0223903609944a3c8bf28a599b2cc0ce0be60b45211c8e9be97f8"}, - {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a2ece4af1f3c967a4390c284797ab595a9f1bc1130ef8b01828915a05a6ae684"}, - {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:16d330b3b9db87c3883e565340d292638a878236418b23cc8b9b11a054aaa887"}, - {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42c89579f82e49db436b69c938ab3e1559e5a4409eb8639eb4143989bc390f2f"}, - {file = "aiohttp-3.8.6-cp311-cp311-win32.whl", hash = "sha256:efd2fcf7e7b9d7ab16e6b7d54205beded0a9c8566cb30f09c1abe42b4e22bdcb"}, - {file = "aiohttp-3.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:3b2ab182fc28e7a81f6c70bfbd829045d9480063f5ab06f6e601a3eddbbd49a0"}, - {file = "aiohttp-3.8.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fdee8405931b0615220e5ddf8cd7edd8592c606a8e4ca2a00704883c396e4479"}, - {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d25036d161c4fe2225d1abff2bd52c34ed0b1099f02c208cd34d8c05729882f0"}, - {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d791245a894be071d5ab04bbb4850534261a7d4fd363b094a7b9963e8cdbd31"}, - {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0cccd1de239afa866e4ce5c789b3032442f19c261c7d8a01183fd956b1935349"}, - {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f13f60d78224f0dace220d8ab4ef1dbc37115eeeab8c06804fec11bec2bbd07"}, - {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a9b5a0606faca4f6cc0d338359d6fa137104c337f489cd135bb7fbdbccb1e39"}, - {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:13da35c9ceb847732bf5c6c5781dcf4780e14392e5d3b3c689f6d22f8e15ae31"}, - {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:4d4cbe4ffa9d05f46a28252efc5941e0462792930caa370a6efaf491f412bc66"}, - {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:229852e147f44da0241954fc6cb910ba074e597f06789c867cb7fb0621e0ba7a"}, - {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:713103a8bdde61d13490adf47171a1039fd880113981e55401a0f7b42c37d071"}, - {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:45ad816b2c8e3b60b510f30dbd37fe74fd4a772248a52bb021f6fd65dff809b6"}, - {file = "aiohttp-3.8.6-cp36-cp36m-win32.whl", hash = "sha256:2b8d4e166e600dcfbff51919c7a3789ff6ca8b3ecce16e1d9c96d95dd569eb4c"}, - {file = "aiohttp-3.8.6-cp36-cp36m-win_amd64.whl", hash = "sha256:0912ed87fee967940aacc5306d3aa8ba3a459fcd12add0b407081fbefc931e53"}, - {file = "aiohttp-3.8.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e2a988a0c673c2e12084f5e6ba3392d76c75ddb8ebc6c7e9ead68248101cd446"}, - {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebf3fd9f141700b510d4b190094db0ce37ac6361a6806c153c161dc6c041ccda"}, - {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3161ce82ab85acd267c8f4b14aa226047a6bee1e4e6adb74b798bd42c6ae1f80"}, - {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95fc1bf33a9a81469aa760617b5971331cdd74370d1214f0b3109272c0e1e3c"}, - {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c43ecfef7deaf0617cee936836518e7424ee12cb709883f2c9a1adda63cc460"}, - {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca80e1b90a05a4f476547f904992ae81eda5c2c85c66ee4195bb8f9c5fb47f28"}, - {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:90c72ebb7cb3a08a7f40061079817133f502a160561d0675b0a6adf231382c92"}, - {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bb54c54510e47a8c7c8e63454a6acc817519337b2b78606c4e840871a3e15349"}, - {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:de6a1c9f6803b90e20869e6b99c2c18cef5cc691363954c93cb9adeb26d9f3ae"}, - {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:a3628b6c7b880b181a3ae0a0683698513874df63783fd89de99b7b7539e3e8a8"}, - {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fc37e9aef10a696a5a4474802930079ccfc14d9f9c10b4662169671ff034b7df"}, - {file = "aiohttp-3.8.6-cp37-cp37m-win32.whl", hash = "sha256:f8ef51e459eb2ad8e7a66c1d6440c808485840ad55ecc3cafefadea47d1b1ba2"}, - {file = "aiohttp-3.8.6-cp37-cp37m-win_amd64.whl", hash = "sha256:b2fe42e523be344124c6c8ef32a011444e869dc5f883c591ed87f84339de5976"}, - {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e2ee0ac5a1f5c7dd3197de309adfb99ac4617ff02b0603fd1e65b07dc772e4b"}, - {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01770d8c04bd8db568abb636c1fdd4f7140b284b8b3e0b4584f070180c1e5c62"}, - {file = "aiohttp-3.8.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3c68330a59506254b556b99a91857428cab98b2f84061260a67865f7f52899f5"}, - {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89341b2c19fb5eac30c341133ae2cc3544d40d9b1892749cdd25892bbc6ac951"}, - {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71783b0b6455ac8f34b5ec99d83e686892c50498d5d00b8e56d47f41b38fbe04"}, - {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f628dbf3c91e12f4d6c8b3f092069567d8eb17814aebba3d7d60c149391aee3a"}, - {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04691bc6601ef47c88f0255043df6f570ada1a9ebef99c34bd0b72866c217ae"}, - {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee912f7e78287516df155f69da575a0ba33b02dd7c1d6614dbc9463f43066e3"}, - {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9c19b26acdd08dd239e0d3669a3dddafd600902e37881f13fbd8a53943079dbc"}, - {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:99c5ac4ad492b4a19fc132306cd57075c28446ec2ed970973bbf036bcda1bcc6"}, - {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f0f03211fd14a6a0aed2997d4b1c013d49fb7b50eeb9ffdf5e51f23cfe2c77fa"}, - {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8d399dade330c53b4106160f75f55407e9ae7505263ea86f2ccca6bfcbdb4921"}, - {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ec4fd86658c6a8964d75426517dc01cbf840bbf32d055ce64a9e63a40fd7b771"}, - {file = "aiohttp-3.8.6-cp38-cp38-win32.whl", hash = "sha256:33164093be11fcef3ce2571a0dccd9041c9a93fa3bde86569d7b03120d276c6f"}, - {file = "aiohttp-3.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:bdf70bfe5a1414ba9afb9d49f0c912dc524cf60141102f3a11143ba3d291870f"}, - {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d52d5dc7c6682b720280f9d9db41d36ebe4791622c842e258c9206232251ab2b"}, - {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ac39027011414dbd3d87f7edb31680e1f430834c8cef029f11c66dad0670aa5"}, - {file = "aiohttp-3.8.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3f5c7ce535a1d2429a634310e308fb7d718905487257060e5d4598e29dc17f0b"}, - {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b30e963f9e0d52c28f284d554a9469af073030030cef8693106d918b2ca92f54"}, - {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:918810ef188f84152af6b938254911055a72e0f935b5fbc4c1a4ed0b0584aed1"}, - {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:002f23e6ea8d3dd8d149e569fd580c999232b5fbc601c48d55398fbc2e582e8c"}, - {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fcf3eabd3fd1a5e6092d1242295fa37d0354b2eb2077e6eb670accad78e40e1"}, - {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:255ba9d6d5ff1a382bb9a578cd563605aa69bec845680e21c44afc2670607a95"}, - {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d67f8baed00870aa390ea2590798766256f31dc5ed3ecc737debb6e97e2ede78"}, - {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:86f20cee0f0a317c76573b627b954c412ea766d6ada1a9fcf1b805763ae7feeb"}, - {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:39a312d0e991690ccc1a61f1e9e42daa519dcc34ad03eb6f826d94c1190190dd"}, - {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e827d48cf802de06d9c935088c2924e3c7e7533377d66b6f31ed175c1620e05e"}, - {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bd111d7fc5591ddf377a408ed9067045259ff2770f37e2d94e6478d0f3fc0c17"}, - {file = "aiohttp-3.8.6-cp39-cp39-win32.whl", hash = "sha256:caf486ac1e689dda3502567eb89ffe02876546599bbf915ec94b1fa424eeffd4"}, - {file = "aiohttp-3.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3f0e27e5b733803333bb2371249f41cf42bae8884863e8e8965ec69bebe53132"}, - {file = "aiohttp-3.8.6.tar.gz", hash = "sha256:b0cf2a4501bff9330a8a5248b4ce951851e415bdcce9dc158e76cfd55e15085c"}, + {file = "aiohttp-3.9.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1f80197f8b0b846a8d5cf7b7ec6084493950d0882cc5537fb7b96a69e3c8590"}, + {file = "aiohttp-3.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72444d17777865734aa1a4d167794c34b63e5883abb90356a0364a28904e6c0"}, + {file = "aiohttp-3.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b05d5cbe9dafcdc733262c3a99ccf63d2f7ce02543620d2bd8db4d4f7a22f83"}, + {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c4fa235d534b3547184831c624c0b7c1e262cd1de847d95085ec94c16fddcd5"}, + {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:289ba9ae8e88d0ba16062ecf02dd730b34186ea3b1e7489046fc338bdc3361c4"}, + {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bff7e2811814fa2271be95ab6e84c9436d027a0e59665de60edf44e529a42c1f"}, + {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81b77f868814346662c96ab36b875d7814ebf82340d3284a31681085c051320f"}, + {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b9c7426923bb7bd66d409da46c41e3fb40f5caf679da624439b9eba92043fa6"}, + {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8d44e7bf06b0c0a70a20f9100af9fcfd7f6d9d3913e37754c12d424179b4e48f"}, + {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22698f01ff5653fe66d16ffb7658f582a0ac084d7da1323e39fd9eab326a1f26"}, + {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ca7ca5abfbfe8d39e653870fbe8d7710be7a857f8a8386fc9de1aae2e02ce7e4"}, + {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:8d7f98fde213f74561be1d6d3fa353656197f75d4edfbb3d94c9eb9b0fc47f5d"}, + {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5216b6082c624b55cfe79af5d538e499cd5f5b976820eac31951fb4325974501"}, + {file = "aiohttp-3.9.1-cp310-cp310-win32.whl", hash = "sha256:0e7ba7ff228c0d9a2cd66194e90f2bca6e0abca810b786901a569c0de082f489"}, + {file = "aiohttp-3.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:c7e939f1ae428a86e4abbb9a7c4732bf4706048818dfd979e5e2839ce0159f23"}, + {file = "aiohttp-3.9.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:df9cf74b9bc03d586fc53ba470828d7b77ce51b0582d1d0b5b2fb673c0baa32d"}, + {file = "aiohttp-3.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecca113f19d5e74048c001934045a2b9368d77b0b17691d905af18bd1c21275e"}, + {file = "aiohttp-3.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8cef8710fb849d97c533f259103f09bac167a008d7131d7b2b0e3a33269185c0"}, + {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea94403a21eb94c93386d559bce297381609153e418a3ffc7d6bf772f59cc35"}, + {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91c742ca59045dce7ba76cab6e223e41d2c70d79e82c284a96411f8645e2afff"}, + {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c93b7c2e52061f0925c3382d5cb8980e40f91c989563d3d32ca280069fd6a87"}, + {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee2527134f95e106cc1653e9ac78846f3a2ec1004cf20ef4e02038035a74544d"}, + {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11ff168d752cb41e8492817e10fb4f85828f6a0142b9726a30c27c35a1835f01"}, + {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b8c3a67eb87394386847d188996920f33b01b32155f0a94f36ca0e0c635bf3e3"}, + {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c7b5d5d64e2a14e35a9240b33b89389e0035e6de8dbb7ffa50d10d8b65c57449"}, + {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:69985d50a2b6f709412d944ffb2e97d0be154ea90600b7a921f95a87d6f108a2"}, + {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:c9110c06eaaac7e1f5562caf481f18ccf8f6fdf4c3323feab28a93d34cc646bd"}, + {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737e69d193dac7296365a6dcb73bbbf53bb760ab25a3727716bbd42022e8d7a"}, + {file = "aiohttp-3.9.1-cp311-cp311-win32.whl", hash = "sha256:4ee8caa925aebc1e64e98432d78ea8de67b2272252b0a931d2ac3bd876ad5544"}, + {file = "aiohttp-3.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:a34086c5cc285be878622e0a6ab897a986a6e8bf5b67ecb377015f06ed316587"}, + {file = "aiohttp-3.9.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f800164276eec54e0af5c99feb9494c295118fc10a11b997bbb1348ba1a52065"}, + {file = "aiohttp-3.9.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:500f1c59906cd142d452074f3811614be04819a38ae2b3239a48b82649c08821"}, + {file = "aiohttp-3.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0b0a6a36ed7e164c6df1e18ee47afbd1990ce47cb428739d6c99aaabfaf1b3af"}, + {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69da0f3ed3496808e8cbc5123a866c41c12c15baaaead96d256477edf168eb57"}, + {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:176df045597e674fa950bf5ae536be85699e04cea68fa3a616cf75e413737eb5"}, + {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b796b44111f0cab6bbf66214186e44734b5baab949cb5fb56154142a92989aeb"}, + {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f27fdaadce22f2ef950fc10dcdf8048407c3b42b73779e48a4e76b3c35bca26c"}, + {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcb6532b9814ea7c5a6a3299747c49de30e84472fa72821b07f5a9818bce0f66"}, + {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:54631fb69a6e44b2ba522f7c22a6fb2667a02fd97d636048478db2fd8c4e98fe"}, + {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4b4c452d0190c5a820d3f5c0f3cd8a28ace48c54053e24da9d6041bf81113183"}, + {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:cae4c0c2ca800c793cae07ef3d40794625471040a87e1ba392039639ad61ab5b"}, + {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:565760d6812b8d78d416c3c7cfdf5362fbe0d0d25b82fed75d0d29e18d7fc30f"}, + {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54311eb54f3a0c45efb9ed0d0a8f43d1bc6060d773f6973efd90037a51cd0a3f"}, + {file = "aiohttp-3.9.1-cp312-cp312-win32.whl", hash = "sha256:85c3e3c9cb1d480e0b9a64c658cd66b3cfb8e721636ab8b0e746e2d79a7a9eed"}, + {file = "aiohttp-3.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:11cb254e397a82efb1805d12561e80124928e04e9c4483587ce7390b3866d213"}, + {file = "aiohttp-3.9.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8a22a34bc594d9d24621091d1b91511001a7eea91d6652ea495ce06e27381f70"}, + {file = "aiohttp-3.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:598db66eaf2e04aa0c8900a63b0101fdc5e6b8a7ddd805c56d86efb54eb66672"}, + {file = "aiohttp-3.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c9376e2b09895c8ca8b95362283365eb5c03bdc8428ade80a864160605715f1"}, + {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41473de252e1797c2d2293804e389a6d6986ef37cbb4a25208de537ae32141dd"}, + {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c5857612c9813796960c00767645cb5da815af16dafb32d70c72a8390bbf690"}, + {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffcd828e37dc219a72c9012ec44ad2e7e3066bec6ff3aaa19e7d435dbf4032ca"}, + {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:219a16763dc0294842188ac8a12262b5671817042b35d45e44fd0a697d8c8361"}, + {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f694dc8a6a3112059258a725a4ebe9acac5fe62f11c77ac4dcf896edfa78ca28"}, + {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bcc0ea8d5b74a41b621ad4a13d96c36079c81628ccc0b30cfb1603e3dfa3a014"}, + {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:90ec72d231169b4b8d6085be13023ece8fa9b1bb495e4398d847e25218e0f431"}, + {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:cf2a0ac0615842b849f40c4d7f304986a242f1e68286dbf3bd7a835e4f83acfd"}, + {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0e49b08eafa4f5707ecfb321ab9592717a319e37938e301d462f79b4e860c32a"}, + {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2c59e0076ea31c08553e868cec02d22191c086f00b44610f8ab7363a11a5d9d8"}, + {file = "aiohttp-3.9.1-cp38-cp38-win32.whl", hash = "sha256:4831df72b053b1eed31eb00a2e1aff6896fb4485301d4ccb208cac264b648db4"}, + {file = "aiohttp-3.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:3135713c5562731ee18f58d3ad1bf41e1d8883eb68b363f2ffde5b2ea4b84cc7"}, + {file = "aiohttp-3.9.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cfeadf42840c1e870dc2042a232a8748e75a36b52d78968cda6736de55582766"}, + {file = "aiohttp-3.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:70907533db712f7aa791effb38efa96f044ce3d4e850e2d7691abd759f4f0ae0"}, + {file = "aiohttp-3.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cdefe289681507187e375a5064c7599f52c40343a8701761c802c1853a504558"}, + {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7481f581251bb5558ba9f635db70908819caa221fc79ee52a7f58392778c636"}, + {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:49f0c1b3c2842556e5de35f122fc0f0b721334ceb6e78c3719693364d4af8499"}, + {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d406b01a9f5a7e232d1b0d161b40c05275ffbcbd772dc18c1d5a570961a1ca4"}, + {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d8e4450e7fe24d86e86b23cc209e0023177b6d59502e33807b732d2deb6975f"}, + {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0266cd6f005e99f3f51e583012de2778e65af6b73860038b968a0a8888487a"}, + {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab221850108a4a063c5b8a70f00dd7a1975e5a1713f87f4ab26a46e5feac5a0e"}, + {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c88a15f272a0ad3d7773cf3a37cc7b7d077cbfc8e331675cf1346e849d97a4e5"}, + {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:237533179d9747080bcaad4d02083ce295c0d2eab3e9e8ce103411a4312991a0"}, + {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:02ab6006ec3c3463b528374c4cdce86434e7b89ad355e7bf29e2f16b46c7dd6f"}, + {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04fa38875e53eb7e354ece1607b1d2fdee2d175ea4e4d745f6ec9f751fe20c7c"}, + {file = "aiohttp-3.9.1-cp39-cp39-win32.whl", hash = "sha256:82eefaf1a996060602f3cc1112d93ba8b201dbf5d8fd9611227de2003dddb3b7"}, + {file = "aiohttp-3.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:9b05d33ff8e6b269e30a7957bd3244ffbce2a7a35a81b81c382629b80af1a8bf"}, + {file = "aiohttp-3.9.1.tar.gz", hash = "sha256:8fc49a87ac269d4529da45871e2ffb6874e87779c3d0e2ccd813c0899221239d"}, ] [package.dependencies] aiosignal = ">=1.1.2" -async-timeout = ">=4.0.0a3,<5.0" +async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} attrs = ">=17.3.0" -charset-normalizer = ">=2.0,<4.0" frozenlist = ">=1.1.1" multidict = ">=4.5,<7.0" yarl = ">=1.0,<2.0" [package.extras] -speedups = ["Brotli", "aiodns", "cchardet"] +speedups = ["Brotli", "aiodns", "brotlicffi"] [[package]] name = "aiosignal" @@ -122,6 +110,41 @@ files = [ [package.dependencies] frozenlist = ">=1.1.0" +[[package]] +name = "annotated-types" +version = "0.6.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "anyio" +version = "3.7.1" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.7" +files = [ + {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, + {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, +] + +[package.dependencies] +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] +test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (<0.22)"] + [[package]] name = "appdirs" version = "1.4.4" @@ -162,99 +185,125 @@ docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib- tests = ["attrs[tests-no-zope]", "zope-interface"] tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = true +python-versions = ">=3.7,<4.0" +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + [[package]] name = "certifi" -version = "2023.7.22" +version = "2023.11.17" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, + {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, + {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, ] [[package]] name = "charset-normalizer" -version = "3.2.0" +version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"}, - {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, ] [[package]] @@ -282,21 +331,66 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "distro" +version = "1.8.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"}, + {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.0" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, + {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "fastapi" +version = "0.104.1" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = true +python-versions = ">=3.8" +files = [ + {file = "fastapi-0.104.1-py3-none-any.whl", hash = "sha256:752dc31160cdbd0436bb93bad51560b57e525cbb1d4bbf6f4904ceee75548241"}, + {file = "fastapi-0.104.1.tar.gz", hash = "sha256:e5e4540a7c5e1dcfbbcf5b903c234feddcdcd881f191977a1c5dfd917487e7ae"}, +] + +[package.dependencies] +anyio = ">=3.7.1,<4.0.0" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +starlette = ">=0.27.0,<0.28.0" +typing-extensions = ">=4.8.0" + +[package.extras] +all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] + [[package]] name = "filelock" -version = "3.12.4" +version = "3.13.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.12.4-py3-none-any.whl", hash = "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4"}, - {file = "filelock-3.12.4.tar.gz", hash = "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd"}, + {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, + {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, ] [package.extras] -docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"] -typing = ["typing-extensions (>=4.7.1)"] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +typing = ["typing-extensions (>=4.8)"] [[package]] name = "frozenlist" @@ -370,13 +464,13 @@ files = [ [[package]] name = "fsspec" -version = "2023.9.2" +version = "2023.12.2" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2023.9.2-py3-none-any.whl", hash = "sha256:603dbc52c75b84da501b9b2ec8c11e1f61c25984c4a0dda1f129ef391fbfc9b4"}, - {file = "fsspec-2023.9.2.tar.gz", hash = "sha256:80bfb8c70cc27b2178cc62a935ecf242fc6e8c3fb801f9c571fc01b1e715ba7d"}, + {file = "fsspec-2023.12.2-py3-none-any.whl", hash = "sha256:d800d87f72189a745fa3d6b033b9dc4a34ad069f60ca60b943a63599f5501960"}, + {file = "fsspec-2023.12.2.tar.gz", hash = "sha256:8548d39e8810b59c38014934f6b31e57f40c1b20f911f4cc2b85389c7e9bf0cb"}, ] [package.extras] @@ -403,20 +497,76 @@ smb = ["smbprotocol"] ssh = ["paramiko"] tqdm = ["tqdm"] +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.2" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.2-py3-none-any.whl", hash = "sha256:096cc05bca73b8e459a1fc3dcf585148f63e534eae4339559c9b8a8d6399acc7"}, + {file = "httpcore-1.0.2.tar.gz", hash = "sha256:9fc092e4799b26174648e54b74ed5f683132a464e95643b226e00c2ed2fa6535"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.23.0)"] + +[[package]] +name = "httpx" +version = "0.25.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"}, + {file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + [[package]] name = "huggingface-hub" -version = "0.16.4" +version = "0.19.4" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.16.4-py3-none-any.whl", hash = "sha256:0d3df29932f334fead024afc7cb4cc5149d955238b8b5e42dcf9740d6995a349"}, - {file = "huggingface_hub-0.16.4.tar.gz", hash = "sha256:608c7d4f3d368b326d1747f91523dbd1f692871e8e2e7a4750314a2dd8b63e14"}, + {file = "huggingface_hub-0.19.4-py3-none-any.whl", hash = "sha256:dba013f779da16f14b606492828f3760600a1e1801432d09fe1c33e50b825bb5"}, + {file = "huggingface_hub-0.19.4.tar.gz", hash = "sha256:176a4fc355a851c17550e7619488f383189727eab209534d7cef2114dae77b22"}, ] [package.dependencies] filelock = "*" -fsspec = "*" +fsspec = ">=2023.5.0" packaging = ">=20.9" pyyaml = ">=5.1" requests = "*" @@ -424,44 +574,45 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +docs = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "hf-doc-builder", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)", "watchdog"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -inference = ["aiohttp", "pydantic"] -quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"] +inference = ["aiohttp", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)"] +quality = ["mypy (==1.5.1)", "ruff (>=0.1.3)"] tensorflow = ["graphviz", "pydot", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["torch"] -typing = ["pydantic", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] +typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] name = "idna" -version = "3.4" +version = "3.6" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, + {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, + {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, ] [[package]] name = "importlib-metadata" -version = "6.8.0" +version = "7.0.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"}, - {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"}, + {file = "importlib_metadata-7.0.0-py3-none-any.whl", hash = "sha256:d97503976bb81f40a193d41ee6570868479c69d5068651eb039c40d850c59d67"}, + {file = "importlib_metadata-7.0.0.tar.gz", hash = "sha256:7fc841f8b8332803464e5dc1c63a2e59121f46ca186c0e2e182e80bf8c1319f7"}, ] [package.dependencies] zipp = ">=0.5" [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] @@ -636,37 +787,174 @@ files = [ [[package]] name = "openai" -version = "0.28.1" -description = "Python client library for the OpenAI API" +version = "1.3.8" +description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-0.28.1-py3-none-any.whl", hash = "sha256:d18690f9e3d31eedb66b57b88c2165d760b24ea0a01f150dd3f068155088ce68"}, - {file = "openai-0.28.1.tar.gz", hash = "sha256:4be1dad329a65b4ce1a660fe6d5431b438f429b5855c883435f0f7fcb6d2dcc8"}, + {file = "openai-1.3.8-py3-none-any.whl", hash = "sha256:ac5a17352b96db862390d2e6f51de9f7eb32e733f412467b2f160fbd3d0f2609"}, + {file = "openai-1.3.8.tar.gz", hash = "sha256:54963ff247abe185aad6ee443820e48ad9f87eb4de970acb2514bc113ced748c"}, ] [package.dependencies] -aiohttp = "*" -requests = ">=2.20" -tqdm = "*" +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.5,<5" [package.extras] -datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] -dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] -embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] -wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "packaging" -version = "23.1" +version = "23.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, - {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] +[[package]] +name = "pydantic" +version = "2.5.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-2.5.2-py3-none-any.whl", hash = "sha256:80c50fb8e3dcecfddae1adbcc00ec5822918490c99ab31f6cf6140ca1c1429f0"}, + {file = "pydantic-2.5.2.tar.gz", hash = "sha256:ff177ba64c6faf73d7afa2e8cad38fd456c0dbe01c9954e71038001cd15a6edd"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.14.5" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.14.5" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic_core-2.14.5-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:7e88f5696153dc516ba6e79f82cc4747e87027205f0e02390c21f7cb3bd8abfd"}, + {file = "pydantic_core-2.14.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4641e8ad4efb697f38a9b64ca0523b557c7931c5f84e0fd377a9a3b05121f0de"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:774de879d212db5ce02dfbf5b0da9a0ea386aeba12b0b95674a4ce0593df3d07"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ebb4e035e28f49b6f1a7032920bb9a0c064aedbbabe52c543343d39341a5b2a3"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b53e9ad053cd064f7e473a5f29b37fc4cc9dc6d35f341e6afc0155ea257fc911"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aa1768c151cf562a9992462239dfc356b3d1037cc5a3ac829bb7f3bda7cc1f9"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eac5c82fc632c599f4639a5886f96867ffced74458c7db61bc9a66ccb8ee3113"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2ae91f50ccc5810b2f1b6b858257c9ad2e08da70bf890dee02de1775a387c66"}, + {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6b9ff467ffbab9110e80e8c8de3bcfce8e8b0fd5661ac44a09ae5901668ba997"}, + {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61ea96a78378e3bd5a0be99b0e5ed00057b71f66115f5404d0dae4819f495093"}, + {file = "pydantic_core-2.14.5-cp310-none-win32.whl", hash = "sha256:bb4c2eda937a5e74c38a41b33d8c77220380a388d689bcdb9b187cf6224c9720"}, + {file = "pydantic_core-2.14.5-cp310-none-win_amd64.whl", hash = "sha256:b7851992faf25eac90bfcb7bfd19e1f5ffa00afd57daec8a0042e63c74a4551b"}, + {file = "pydantic_core-2.14.5-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:4e40f2bd0d57dac3feb3a3aed50f17d83436c9e6b09b16af271b6230a2915459"}, + {file = "pydantic_core-2.14.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ab1cdb0f14dc161ebc268c09db04d2c9e6f70027f3b42446fa11c153521c0e88"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aae7ea3a1c5bb40c93cad361b3e869b180ac174656120c42b9fadebf685d121b"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:60b7607753ba62cf0739177913b858140f11b8af72f22860c28eabb2f0a61937"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2248485b0322c75aee7565d95ad0e16f1c67403a470d02f94da7344184be770f"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:823fcc638f67035137a5cd3f1584a4542d35a951c3cc68c6ead1df7dac825c26"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96581cfefa9123accc465a5fd0cc833ac4d75d55cc30b633b402e00e7ced00a6"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a33324437018bf6ba1bb0f921788788641439e0ed654b233285b9c69704c27b4"}, + {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9bd18fee0923ca10f9a3ff67d4851c9d3e22b7bc63d1eddc12f439f436f2aada"}, + {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:853a2295c00f1d4429db4c0fb9475958543ee80cfd310814b5c0ef502de24dda"}, + {file = "pydantic_core-2.14.5-cp311-none-win32.whl", hash = "sha256:cb774298da62aea5c80a89bd58c40205ab4c2abf4834453b5de207d59d2e1651"}, + {file = "pydantic_core-2.14.5-cp311-none-win_amd64.whl", hash = "sha256:e87fc540c6cac7f29ede02e0f989d4233f88ad439c5cdee56f693cc9c1c78077"}, + {file = "pydantic_core-2.14.5-cp311-none-win_arm64.whl", hash = "sha256:57d52fa717ff445cb0a5ab5237db502e6be50809b43a596fb569630c665abddf"}, + {file = "pydantic_core-2.14.5-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e60f112ac88db9261ad3a52032ea46388378034f3279c643499edb982536a093"}, + {file = "pydantic_core-2.14.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6e227c40c02fd873c2a73a98c1280c10315cbebe26734c196ef4514776120aeb"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0cbc7fff06a90bbd875cc201f94ef0ee3929dfbd5c55a06674b60857b8b85ed"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:103ef8d5b58596a731b690112819501ba1db7a36f4ee99f7892c40da02c3e189"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c949f04ecad823f81b1ba94e7d189d9dfb81edbb94ed3f8acfce41e682e48cef"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1452a1acdf914d194159439eb21e56b89aa903f2e1c65c60b9d874f9b950e5d"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb4679d4c2b089e5ef89756bc73e1926745e995d76e11925e3e96a76d5fa51fc"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf9d3fe53b1ee360e2421be95e62ca9b3296bf3f2fb2d3b83ca49ad3f925835e"}, + {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:70f4b4851dbb500129681d04cc955be2a90b2248d69273a787dda120d5cf1f69"}, + {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:59986de5710ad9613ff61dd9b02bdd2f615f1a7052304b79cc8fa2eb4e336d2d"}, + {file = "pydantic_core-2.14.5-cp312-none-win32.whl", hash = "sha256:699156034181e2ce106c89ddb4b6504c30db8caa86e0c30de47b3e0654543260"}, + {file = "pydantic_core-2.14.5-cp312-none-win_amd64.whl", hash = "sha256:5baab5455c7a538ac7e8bf1feec4278a66436197592a9bed538160a2e7d11e36"}, + {file = "pydantic_core-2.14.5-cp312-none-win_arm64.whl", hash = "sha256:e47e9a08bcc04d20975b6434cc50bf82665fbc751bcce739d04a3120428f3e27"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:af36f36538418f3806048f3b242a1777e2540ff9efaa667c27da63d2749dbce0"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:45e95333b8418ded64745f14574aa9bfc212cb4fbeed7a687b0c6e53b5e188cd"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e47a76848f92529879ecfc417ff88a2806438f57be4a6a8bf2961e8f9ca9ec7"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d81e6987b27bc7d101c8597e1cd2bcaa2fee5e8e0f356735c7ed34368c471550"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34708cc82c330e303f4ce87758828ef6e457681b58ce0e921b6e97937dd1e2a3"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:652c1988019752138b974c28f43751528116bcceadad85f33a258869e641d753"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e4d090e73e0725b2904fdbdd8d73b8802ddd691ef9254577b708d413bf3006e"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5c7d5b5005f177764e96bd584d7bf28d6e26e96f2a541fdddb934c486e36fd59"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a71891847f0a73b1b9eb86d089baee301477abef45f7eaf303495cd1473613e4"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a717aef6971208f0851a2420b075338e33083111d92041157bbe0e2713b37325"}, + {file = "pydantic_core-2.14.5-cp37-none-win32.whl", hash = "sha256:de790a3b5aa2124b8b78ae5faa033937a72da8efe74b9231698b5a1dd9be3405"}, + {file = "pydantic_core-2.14.5-cp37-none-win_amd64.whl", hash = "sha256:6c327e9cd849b564b234da821236e6bcbe4f359a42ee05050dc79d8ed2a91588"}, + {file = "pydantic_core-2.14.5-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:ef98ca7d5995a82f43ec0ab39c4caf6a9b994cb0b53648ff61716370eadc43cf"}, + {file = "pydantic_core-2.14.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6eae413494a1c3f89055da7a5515f32e05ebc1a234c27674a6956755fb2236f"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcf4e6d85614f7a4956c2de5a56531f44efb973d2fe4a444d7251df5d5c4dcfd"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6637560562134b0e17de333d18e69e312e0458ee4455bdad12c37100b7cad706"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77fa384d8e118b3077cccfcaf91bf83c31fe4dc850b5e6ee3dc14dc3d61bdba1"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16e29bad40bcf97aac682a58861249ca9dcc57c3f6be22f506501833ddb8939c"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531f4b4252fac6ca476fbe0e6f60f16f5b65d3e6b583bc4d87645e4e5ddde331"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:074f3d86f081ce61414d2dc44901f4f83617329c6f3ab49d2bc6c96948b2c26b"}, + {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c2adbe22ab4babbca99c75c5d07aaf74f43c3195384ec07ccbd2f9e3bddaecec"}, + {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0f6116a558fd06d1b7c2902d1c4cf64a5bd49d67c3540e61eccca93f41418124"}, + {file = "pydantic_core-2.14.5-cp38-none-win32.whl", hash = "sha256:fe0a5a1025eb797752136ac8b4fa21aa891e3d74fd340f864ff982d649691867"}, + {file = "pydantic_core-2.14.5-cp38-none-win_amd64.whl", hash = "sha256:079206491c435b60778cf2b0ee5fd645e61ffd6e70c47806c9ed51fc75af078d"}, + {file = "pydantic_core-2.14.5-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:a6a16f4a527aae4f49c875da3cdc9508ac7eef26e7977952608610104244e1b7"}, + {file = "pydantic_core-2.14.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:abf058be9517dc877227ec3223f0300034bd0e9f53aebd63cf4456c8cb1e0863"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49b08aae5013640a3bfa25a8eebbd95638ec3f4b2eaf6ed82cf0c7047133f03b"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2d97e906b4ff36eb464d52a3bc7d720bd6261f64bc4bcdbcd2c557c02081ed2"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3128e0bbc8c091ec4375a1828d6118bc20404883169ac95ffa8d983b293611e6"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88e74ab0cdd84ad0614e2750f903bb0d610cc8af2cc17f72c28163acfcf372a4"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c339dabd8ee15f8259ee0f202679b6324926e5bc9e9a40bf981ce77c038553db"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3387277f1bf659caf1724e1afe8ee7dbc9952a82d90f858ebb931880216ea955"}, + {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ba6b6b3846cfc10fdb4c971980a954e49d447cd215ed5a77ec8190bc93dd7bc5"}, + {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca61d858e4107ce5e1330a74724fe757fc7135190eb5ce5c9d0191729f033209"}, + {file = "pydantic_core-2.14.5-cp39-none-win32.whl", hash = "sha256:ec1e72d6412f7126eb7b2e3bfca42b15e6e389e1bc88ea0069d0cc1742f477c6"}, + {file = "pydantic_core-2.14.5-cp39-none-win_amd64.whl", hash = "sha256:c0b97ec434041827935044bbbe52b03d6018c2897349670ff8fe11ed24d1d4ab"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:79e0a2cdbdc7af3f4aee3210b1172ab53d7ddb6a2d8c24119b5706e622b346d0"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:678265f7b14e138d9a541ddabbe033012a2953315739f8cfa6d754cc8063e8ca"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b15e855ae44f0c6341ceb74df61b606e11f1087e87dcb7482377374aac6abe"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09b0e985fbaf13e6b06a56d21694d12ebca6ce5414b9211edf6f17738d82b0f8"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ad873900297bb36e4b6b3f7029d88ff9829ecdc15d5cf20161775ce12306f8a"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2d0ae0d8670164e10accbeb31d5ad45adb71292032d0fdb9079912907f0085f4"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d37f8ec982ead9ba0a22a996129594938138a1503237b87318392a48882d50b7"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:35613015f0ba7e14c29ac6c2483a657ec740e5ac5758d993fdd5870b07a61d8b"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab4ea451082e684198636565224bbb179575efc1658c48281b2c866bfd4ddf04"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ce601907e99ea5b4adb807ded3570ea62186b17f88e271569144e8cca4409c7"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb2ed8b3fe4bf4506d6dab3b93b83bbc22237e230cba03866d561c3577517d18"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70f947628e074bb2526ba1b151cee10e4c3b9670af4dbb4d73bc8a89445916b5"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4bc536201426451f06f044dfbf341c09f540b4ebdb9fd8d2c6164d733de5e634"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4791cf0f8c3104ac668797d8c514afb3431bc3305f5638add0ba1a5a37e0d88"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:038c9f763e650712b899f983076ce783175397c848da04985658e7628cbe873b"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:27548e16c79702f1e03f5628589c6057c9ae17c95b4c449de3c66b589ead0520"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97bee68898f3f4344eb02fec316db93d9700fb1e6a5b760ffa20d71d9a46ce3"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9b759b77f5337b4ea024f03abc6464c9f35d9718de01cfe6bae9f2e139c397e"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:439c9afe34638ace43a49bf72d201e0ffc1a800295bed8420c2a9ca8d5e3dbb3"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ba39688799094c75ea8a16a6b544eb57b5b0f3328697084f3f2790892510d144"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ccd4d5702bb90b84df13bd491be8d900b92016c5a455b7e14630ad7449eb03f8"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:81982d78a45d1e5396819bbb4ece1fadfe5f079335dd28c4ab3427cd95389944"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:7f8210297b04e53bc3da35db08b7302a6a1f4889c79173af69b72ec9754796b8"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:8c8a8812fe6f43a3a5b054af6ac2d7b8605c7bcab2804a8a7d68b53f3cd86e00"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:206ed23aecd67c71daf5c02c3cd19c0501b01ef3cbf7782db9e4e051426b3d0d"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2027d05c8aebe61d898d4cffd774840a9cb82ed356ba47a90d99ad768f39789"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40180930807ce806aa71eda5a5a5447abb6b6a3c0b4b3b1b1962651906484d68"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:615a0a4bff11c45eb3c1996ceed5bdaa2f7b432425253a7c2eed33bb86d80abc"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5e412d717366e0677ef767eac93566582518fe8be923361a5c204c1a62eaafe"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:513b07e99c0a267b1d954243845d8a833758a6726a3b5d8948306e3fe14675e3"}, + {file = "pydantic_core-2.14.5.tar.gz", hash = "sha256:6d30226dfc816dd0fdf120cae611dd2215117e4f9b124af8c60ab9093b6e8e71"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + [[package]] name = "python-dotenv" version = "1.0.0" @@ -740,101 +1028,119 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "redis" +version = "5.0.1" +description = "Python client for Redis database and key-value store" +optional = true +python-versions = ">=3.7" +files = [ + {file = "redis-5.0.1-py3-none-any.whl", hash = "sha256:ed4802971884ae19d640775ba3b03aa2e7bd5e8fb8dfaed2decce4d0fc48391f"}, + {file = "redis-5.0.1.tar.gz", hash = "sha256:0dab495cd5753069d3bc650a0dde8a8f9edde16fc5691b689a566eda58100d0f"}, +] + +[package.dependencies] +async-timeout = {version = ">=4.0.2", markers = "python_full_version <= \"3.11.2\""} + +[package.extras] +hiredis = ["hiredis (>=1.0.0)"] +ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"] + [[package]] name = "regex" -version = "2023.8.8" +version = "2023.10.3" description = "Alternative regular expression module, to replace re." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "regex-2023.8.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:88900f521c645f784260a8d346e12a1590f79e96403971241e64c3a265c8ecdb"}, - {file = "regex-2023.8.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3611576aff55918af2697410ff0293d6071b7e00f4b09e005d614686ac4cd57c"}, - {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8a0ccc8f2698f120e9e5742f4b38dc944c38744d4bdfc427616f3a163dd9de5"}, - {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c662a4cbdd6280ee56f841f14620787215a171c4e2d1744c9528bed8f5816c96"}, - {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf0633e4a1b667bfe0bb10b5e53fe0d5f34a6243ea2530eb342491f1adf4f739"}, - {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:551ad543fa19e94943c5b2cebc54c73353ffff08228ee5f3376bd27b3d5b9800"}, - {file = "regex-2023.8.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54de2619f5ea58474f2ac211ceea6b615af2d7e4306220d4f3fe690c91988a61"}, - {file = "regex-2023.8.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ec4b3f0aebbbe2fc0134ee30a791af522a92ad9f164858805a77442d7d18570"}, - {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ae646c35cb9f820491760ac62c25b6d6b496757fda2d51be429e0e7b67ae0ab"}, - {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ca339088839582d01654e6f83a637a4b8194d0960477b9769d2ff2cfa0fa36d2"}, - {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:d9b6627408021452dcd0d2cdf8da0534e19d93d070bfa8b6b4176f99711e7f90"}, - {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:bd3366aceedf274f765a3a4bc95d6cd97b130d1dda524d8f25225d14123c01db"}, - {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7aed90a72fc3654fba9bc4b7f851571dcc368120432ad68b226bd593f3f6c0b7"}, - {file = "regex-2023.8.8-cp310-cp310-win32.whl", hash = "sha256:80b80b889cb767cc47f31d2b2f3dec2db8126fbcd0cff31b3925b4dc6609dcdb"}, - {file = "regex-2023.8.8-cp310-cp310-win_amd64.whl", hash = "sha256:b82edc98d107cbc7357da7a5a695901b47d6eb0420e587256ba3ad24b80b7d0b"}, - {file = "regex-2023.8.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1e7d84d64c84ad97bf06f3c8cb5e48941f135ace28f450d86af6b6512f1c9a71"}, - {file = "regex-2023.8.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ce0f9fbe7d295f9922c0424a3637b88c6c472b75eafeaff6f910494a1fa719ef"}, - {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06c57e14ac723b04458df5956cfb7e2d9caa6e9d353c0b4c7d5d54fcb1325c46"}, - {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7a9aaa5a1267125eef22cef3b63484c3241aaec6f48949b366d26c7250e0357"}, - {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b7408511fca48a82a119d78a77c2f5eb1b22fe88b0d2450ed0756d194fe7a9a"}, - {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14dc6f2d88192a67d708341f3085df6a4f5a0c7b03dec08d763ca2cd86e9f559"}, - {file = "regex-2023.8.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48c640b99213643d141550326f34f0502fedb1798adb3c9eb79650b1ecb2f177"}, - {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0085da0f6c6393428bf0d9c08d8b1874d805bb55e17cb1dfa5ddb7cfb11140bf"}, - {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:964b16dcc10c79a4a2be9f1273fcc2684a9eedb3906439720598029a797b46e6"}, - {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7ce606c14bb195b0e5108544b540e2c5faed6843367e4ab3deb5c6aa5e681208"}, - {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:40f029d73b10fac448c73d6eb33d57b34607f40116e9f6e9f0d32e9229b147d7"}, - {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3b8e6ea6be6d64104d8e9afc34c151926f8182f84e7ac290a93925c0db004bfd"}, - {file = "regex-2023.8.8-cp311-cp311-win32.whl", hash = "sha256:942f8b1f3b223638b02df7df79140646c03938d488fbfb771824f3d05fc083a8"}, - {file = "regex-2023.8.8-cp311-cp311-win_amd64.whl", hash = "sha256:51d8ea2a3a1a8fe4f67de21b8b93757005213e8ac3917567872f2865185fa7fb"}, - {file = "regex-2023.8.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e951d1a8e9963ea51efd7f150450803e3b95db5939f994ad3d5edac2b6f6e2b4"}, - {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704f63b774218207b8ccc6c47fcef5340741e5d839d11d606f70af93ee78e4d4"}, - {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22283c769a7b01c8ac355d5be0715bf6929b6267619505e289f792b01304d898"}, - {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91129ff1bb0619bc1f4ad19485718cc623a2dc433dff95baadbf89405c7f6b57"}, - {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de35342190deb7b866ad6ba5cbcccb2d22c0487ee0cbb251efef0843d705f0d4"}, - {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b993b6f524d1e274a5062488a43e3f9f8764ee9745ccd8e8193df743dbe5ee61"}, - {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3026cbcf11d79095a32d9a13bbc572a458727bd5b1ca332df4a79faecd45281c"}, - {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:293352710172239bf579c90a9864d0df57340b6fd21272345222fb6371bf82b3"}, - {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d909b5a3fff619dc7e48b6b1bedc2f30ec43033ba7af32f936c10839e81b9217"}, - {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3d370ff652323c5307d9c8e4c62efd1956fb08051b0e9210212bc51168b4ff56"}, - {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:b076da1ed19dc37788f6a934c60adf97bd02c7eea461b73730513921a85d4235"}, - {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e9941a4ada58f6218694f382e43fdd256e97615db9da135e77359da257a7168b"}, - {file = "regex-2023.8.8-cp36-cp36m-win32.whl", hash = "sha256:a8c65c17aed7e15a0c824cdc63a6b104dfc530f6fa8cb6ac51c437af52b481c7"}, - {file = "regex-2023.8.8-cp36-cp36m-win_amd64.whl", hash = "sha256:aadf28046e77a72f30dcc1ab185639e8de7f4104b8cb5c6dfa5d8ed860e57236"}, - {file = "regex-2023.8.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:423adfa872b4908843ac3e7a30f957f5d5282944b81ca0a3b8a7ccbbfaa06103"}, - {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ae594c66f4a7e1ea67232a0846649a7c94c188d6c071ac0210c3e86a5f92109"}, - {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e51c80c168074faa793685656c38eb7a06cbad7774c8cbc3ea05552d615393d8"}, - {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09b7f4c66aa9d1522b06e31a54f15581c37286237208df1345108fcf4e050c18"}, - {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e73e5243af12d9cd6a9d6a45a43570dbe2e5b1cdfc862f5ae2b031e44dd95a8"}, - {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:941460db8fe3bd613db52f05259c9336f5a47ccae7d7def44cc277184030a116"}, - {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f0ccf3e01afeb412a1a9993049cb160d0352dba635bbca7762b2dc722aa5742a"}, - {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2e9216e0d2cdce7dbc9be48cb3eacb962740a09b011a116fd7af8c832ab116ca"}, - {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5cd9cd7170459b9223c5e592ac036e0704bee765706445c353d96f2890e816c8"}, - {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4873ef92e03a4309b3ccd8281454801b291b689f6ad45ef8c3658b6fa761d7ac"}, - {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:239c3c2a339d3b3ddd51c2daef10874410917cd2b998f043c13e2084cb191684"}, - {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1005c60ed7037be0d9dea1f9c53cc42f836188227366370867222bda4c3c6bd7"}, - {file = "regex-2023.8.8-cp37-cp37m-win32.whl", hash = "sha256:e6bd1e9b95bc5614a7a9c9c44fde9539cba1c823b43a9f7bc11266446dd568e3"}, - {file = "regex-2023.8.8-cp37-cp37m-win_amd64.whl", hash = "sha256:9a96edd79661e93327cfeac4edec72a4046e14550a1d22aa0dd2e3ca52aec921"}, - {file = "regex-2023.8.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f2181c20ef18747d5f4a7ea513e09ea03bdd50884a11ce46066bb90fe4213675"}, - {file = "regex-2023.8.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a2ad5add903eb7cdde2b7c64aaca405f3957ab34f16594d2b78d53b8b1a6a7d6"}, - {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9233ac249b354c54146e392e8a451e465dd2d967fc773690811d3a8c240ac601"}, - {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:920974009fb37b20d32afcdf0227a2e707eb83fe418713f7a8b7de038b870d0b"}, - {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd2b6c5dfe0929b6c23dde9624483380b170b6e34ed79054ad131b20203a1a63"}, - {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96979d753b1dc3b2169003e1854dc67bfc86edf93c01e84757927f810b8c3c93"}, - {file = "regex-2023.8.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ae54a338191e1356253e7883d9d19f8679b6143703086245fb14d1f20196be9"}, - {file = "regex-2023.8.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2162ae2eb8b079622176a81b65d486ba50b888271302190870b8cc488587d280"}, - {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c884d1a59e69e03b93cf0dfee8794c63d7de0ee8f7ffb76e5f75be8131b6400a"}, - {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf9273e96f3ee2ac89ffcb17627a78f78e7516b08f94dc435844ae72576a276e"}, - {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:83215147121e15d5f3a45d99abeed9cf1fe16869d5c233b08c56cdf75f43a504"}, - {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f7454aa427b8ab9101f3787eb178057c5250478e39b99540cfc2b889c7d0586"}, - {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0640913d2c1044d97e30d7c41728195fc37e54d190c5385eacb52115127b882"}, - {file = "regex-2023.8.8-cp38-cp38-win32.whl", hash = "sha256:0c59122ceccb905a941fb23b087b8eafc5290bf983ebcb14d2301febcbe199c7"}, - {file = "regex-2023.8.8-cp38-cp38-win_amd64.whl", hash = "sha256:c12f6f67495ea05c3d542d119d270007090bad5b843f642d418eb601ec0fa7be"}, - {file = "regex-2023.8.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:82cd0a69cd28f6cc3789cc6adeb1027f79526b1ab50b1f6062bbc3a0ccb2dbc3"}, - {file = "regex-2023.8.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bb34d1605f96a245fc39790a117ac1bac8de84ab7691637b26ab2c5efb8f228c"}, - {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:987b9ac04d0b38ef4f89fbc035e84a7efad9cdd5f1e29024f9289182c8d99e09"}, - {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9dd6082f4e2aec9b6a0927202c85bc1b09dcab113f97265127c1dc20e2e32495"}, - {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7eb95fe8222932c10d4436e7a6f7c99991e3fdd9f36c949eff16a69246dee2dc"}, - {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7098c524ba9f20717a56a8d551d2ed491ea89cbf37e540759ed3b776a4f8d6eb"}, - {file = "regex-2023.8.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b694430b3f00eb02c594ff5a16db30e054c1b9589a043fe9174584c6efa8033"}, - {file = "regex-2023.8.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2aeab3895d778155054abea5238d0eb9a72e9242bd4b43f42fd911ef9a13470"}, - {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:988631b9d78b546e284478c2ec15c8a85960e262e247b35ca5eaf7ee22f6050a"}, - {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:67ecd894e56a0c6108ec5ab1d8fa8418ec0cff45844a855966b875d1039a2e34"}, - {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:14898830f0a0eb67cae2bbbc787c1a7d6e34ecc06fbd39d3af5fe29a4468e2c9"}, - {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:f2200e00b62568cfd920127782c61bc1c546062a879cdc741cfcc6976668dfcf"}, - {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9691a549c19c22d26a4f3b948071e93517bdf86e41b81d8c6ac8a964bb71e5a6"}, - {file = "regex-2023.8.8-cp39-cp39-win32.whl", hash = "sha256:6ab2ed84bf0137927846b37e882745a827458689eb969028af8032b1b3dac78e"}, - {file = "regex-2023.8.8-cp39-cp39-win_amd64.whl", hash = "sha256:5543c055d8ec7801901e1193a51570643d6a6ab8751b1f7dd9af71af467538bb"}, - {file = "regex-2023.8.8.tar.gz", hash = "sha256:fcbdc5f2b0f1cd0f6a56cdb46fe41d2cce1e644e3b68832f3eeebc5fb0f7712e"}, + {file = "regex-2023.10.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4c34d4f73ea738223a094d8e0ffd6d2c1a1b4c175da34d6b0de3d8d69bee6bcc"}, + {file = "regex-2023.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8f4e49fc3ce020f65411432183e6775f24e02dff617281094ba6ab079ef0915"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cd1bccf99d3ef1ab6ba835308ad85be040e6a11b0977ef7ea8c8005f01a3c29"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:81dce2ddc9f6e8f543d94b05d56e70d03a0774d32f6cca53e978dc01e4fc75b8"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c6b4d23c04831e3ab61717a707a5d763b300213db49ca680edf8bf13ab5d91b"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c15ad0aee158a15e17e0495e1e18741573d04eb6da06d8b84af726cfc1ed02ee"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6239d4e2e0b52c8bd38c51b760cd870069f0bdf99700a62cd509d7a031749a55"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4a8bf76e3182797c6b1afa5b822d1d5802ff30284abe4599e1247be4fd6b03be"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9c727bbcf0065cbb20f39d2b4f932f8fa1631c3e01fcedc979bd4f51fe051c5"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3ccf2716add72f80714b9a63899b67fa711b654be3fcdd34fa391d2d274ce767"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:107ac60d1bfdc3edb53be75e2a52aff7481b92817cfdddd9b4519ccf0e54a6ff"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:00ba3c9818e33f1fa974693fb55d24cdc8ebafcb2e4207680669d8f8d7cca79a"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f0a47efb1dbef13af9c9a54a94a0b814902e547b7f21acb29434504d18f36e3a"}, + {file = "regex-2023.10.3-cp310-cp310-win32.whl", hash = "sha256:36362386b813fa6c9146da6149a001b7bd063dabc4d49522a1f7aa65b725c7ec"}, + {file = "regex-2023.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:c65a3b5330b54103e7d21cac3f6bf3900d46f6d50138d73343d9e5b2900b2353"}, + {file = "regex-2023.10.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:90a79bce019c442604662d17bf69df99090e24cdc6ad95b18b6725c2988a490e"}, + {file = "regex-2023.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c7964c2183c3e6cce3f497e3a9f49d182e969f2dc3aeeadfa18945ff7bdd7051"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ef80829117a8061f974b2fda8ec799717242353bff55f8a29411794d635d964"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5addc9d0209a9afca5fc070f93b726bf7003bd63a427f65ef797a931782e7edc"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c148bec483cc4b421562b4bcedb8e28a3b84fcc8f0aa4418e10898f3c2c0eb9b"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d1f21af4c1539051049796a0f50aa342f9a27cde57318f2fc41ed50b0dbc4ac"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b9ac09853b2a3e0d0082104036579809679e7715671cfbf89d83c1cb2a30f58"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ebedc192abbc7fd13c5ee800e83a6df252bec691eb2c4bedc9f8b2e2903f5e2a"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d8a993c0a0ffd5f2d3bda23d0cd75e7086736f8f8268de8a82fbc4bd0ac6791e"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:be6b7b8d42d3090b6c80793524fa66c57ad7ee3fe9722b258aec6d0672543fd0"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4023e2efc35a30e66e938de5aef42b520c20e7eda7bb5fb12c35e5d09a4c43f6"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0d47840dc05e0ba04fe2e26f15126de7c755496d5a8aae4a08bda4dd8d646c54"}, + {file = "regex-2023.10.3-cp311-cp311-win32.whl", hash = "sha256:9145f092b5d1977ec8c0ab46e7b3381b2fd069957b9862a43bd383e5c01d18c2"}, + {file = "regex-2023.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:b6104f9a46bd8743e4f738afef69b153c4b8b592d35ae46db07fc28ae3d5fb7c"}, + {file = "regex-2023.10.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bff507ae210371d4b1fe316d03433ac099f184d570a1a611e541923f78f05037"}, + {file = "regex-2023.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be5e22bbb67924dea15039c3282fa4cc6cdfbe0cbbd1c0515f9223186fc2ec5f"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a992f702c9be9c72fa46f01ca6e18d131906a7180950958f766c2aa294d4b41"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7434a61b158be563c1362d9071358f8ab91b8d928728cd2882af060481244c9e"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2169b2dcabf4e608416f7f9468737583ce5f0a6e8677c4efbf795ce81109d7c"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9e908ef5889cda4de038892b9accc36d33d72fb3e12c747e2799a0e806ec841"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12bd4bc2c632742c7ce20db48e0d99afdc05e03f0b4c1af90542e05b809a03d9"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bc72c231f5449d86d6c7d9cc7cd819b6eb30134bb770b8cfdc0765e48ef9c420"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bce8814b076f0ce5766dc87d5a056b0e9437b8e0cd351b9a6c4e1134a7dfbda9"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ba7cd6dc4d585ea544c1412019921570ebd8a597fabf475acc4528210d7c4a6f"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b0c7d2f698e83f15228ba41c135501cfe7d5740181d5903e250e47f617eb4292"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5a8f91c64f390ecee09ff793319f30a0f32492e99f5dc1c72bc361f23ccd0a9a"}, + {file = "regex-2023.10.3-cp312-cp312-win32.whl", hash = "sha256:ad08a69728ff3c79866d729b095872afe1e0557251da4abb2c5faff15a91d19a"}, + {file = "regex-2023.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:39cdf8d141d6d44e8d5a12a8569d5a227f645c87df4f92179bd06e2e2705e76b"}, + {file = "regex-2023.10.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4a3ee019a9befe84fa3e917a2dd378807e423d013377a884c1970a3c2792d293"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76066d7ff61ba6bf3cb5efe2428fc82aac91802844c022d849a1f0f53820502d"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe50b61bab1b1ec260fa7cd91106fa9fece57e6beba05630afe27c71259c59b"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fd88f373cb71e6b59b7fa597e47e518282455c2734fd4306a05ca219a1991b0"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ab05a182c7937fb374f7e946f04fb23a0c0699c0450e9fb02ef567412d2fa3"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dac37cf08fcf2094159922edc7a2784cfcc5c70f8354469f79ed085f0328ebdf"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e54ddd0bb8fb626aa1f9ba7b36629564544954fff9669b15da3610c22b9a0991"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3367007ad1951fde612bf65b0dffc8fd681a4ab98ac86957d16491400d661302"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:16f8740eb6dbacc7113e3097b0a36065a02e37b47c936b551805d40340fb9971"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:f4f2ca6df64cbdd27f27b34f35adb640b5d2d77264228554e68deda54456eb11"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:39807cbcbe406efca2a233884e169d056c35aa7e9f343d4e78665246a332f597"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7eece6fbd3eae4a92d7c748ae825cbc1ee41a89bb1c3db05b5578ed3cfcfd7cb"}, + {file = "regex-2023.10.3-cp37-cp37m-win32.whl", hash = "sha256:ce615c92d90df8373d9e13acddd154152645c0dc060871abf6bd43809673d20a"}, + {file = "regex-2023.10.3-cp37-cp37m-win_amd64.whl", hash = "sha256:0f649fa32fe734c4abdfd4edbb8381c74abf5f34bc0b3271ce687b23729299ed"}, + {file = "regex-2023.10.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9b98b7681a9437262947f41c7fac567c7e1f6eddd94b0483596d320092004533"}, + {file = "regex-2023.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:91dc1d531f80c862441d7b66c4505cd6ea9d312f01fb2f4654f40c6fdf5cc37a"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82fcc1f1cc3ff1ab8a57ba619b149b907072e750815c5ba63e7aa2e1163384a4"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7979b834ec7a33aafae34a90aad9f914c41fd6eaa8474e66953f3f6f7cbd4368"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef71561f82a89af6cfcbee47f0fabfdb6e63788a9258e913955d89fdd96902ab"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd829712de97753367153ed84f2de752b86cd1f7a88b55a3a775eb52eafe8a94"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00e871d83a45eee2f8688d7e6849609c2ca2a04a6d48fba3dff4deef35d14f07"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:706e7b739fdd17cb89e1fbf712d9dc21311fc2333f6d435eac2d4ee81985098c"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cc3f1c053b73f20c7ad88b0d1d23be7e7b3901229ce89f5000a8399746a6e039"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f85739e80d13644b981a88f529d79c5bdf646b460ba190bffcaf6d57b2a9863"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:741ba2f511cc9626b7561a440f87d658aabb3d6b744a86a3c025f866b4d19e7f"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e77c90ab5997e85901da85131fd36acd0ed2221368199b65f0d11bca44549711"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:979c24cbefaf2420c4e377ecd1f165ea08cc3d1fbb44bdc51bccbbf7c66a2cb4"}, + {file = "regex-2023.10.3-cp38-cp38-win32.whl", hash = "sha256:58837f9d221744d4c92d2cf7201c6acd19623b50c643b56992cbd2b745485d3d"}, + {file = "regex-2023.10.3-cp38-cp38-win_amd64.whl", hash = "sha256:c55853684fe08d4897c37dfc5faeff70607a5f1806c8be148f1695be4a63414b"}, + {file = "regex-2023.10.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2c54e23836650bdf2c18222c87f6f840d4943944146ca479858404fedeb9f9af"}, + {file = "regex-2023.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:69c0771ca5653c7d4b65203cbfc5e66db9375f1078689459fe196fe08b7b4930"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ac965a998e1388e6ff2e9781f499ad1eaa41e962a40d11c7823c9952c77123e"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c0e8fae5b27caa34177bdfa5a960c46ff2f78ee2d45c6db15ae3f64ecadde14"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c56c3d47da04f921b73ff9415fbaa939f684d47293f071aa9cbb13c94afc17d"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ef1e014eed78ab650bef9a6a9cbe50b052c0aebe553fb2881e0453717573f52"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d29338556a59423d9ff7b6eb0cb89ead2b0875e08fe522f3e068b955c3e7b59b"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9c6d0ced3c06d0f183b73d3c5920727268d2201aa0fe6d55c60d68c792ff3588"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:994645a46c6a740ee8ce8df7911d4aee458d9b1bc5639bc968226763d07f00fa"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:66e2fe786ef28da2b28e222c89502b2af984858091675044d93cb50e6f46d7af"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:11175910f62b2b8c055f2b089e0fedd694fe2be3941b3e2633653bc51064c528"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:06e9abc0e4c9ab4779c74ad99c3fc10d3967d03114449acc2c2762ad4472b8ca"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fb02e4257376ae25c6dd95a5aec377f9b18c09be6ebdefa7ad209b9137b73d48"}, + {file = "regex-2023.10.3-cp39-cp39-win32.whl", hash = "sha256:3b2c3502603fab52d7619b882c25a6850b766ebd1b18de3df23b2f939360e1bd"}, + {file = "regex-2023.10.3-cp39-cp39-win_amd64.whl", hash = "sha256:adbccd17dcaff65704c856bd29951c58a1bd4b2b0f8ad6b826dbd543fe740988"}, + {file = "regex-2023.10.3.tar.gz", hash = "sha256:3fef4f844d2290ee0ba57addcec17eec9e3df73f10a2748485dfd6a3a188cc0f"}, ] [[package]] @@ -858,42 +1164,93 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rq" +version = "1.15.1" +description = "RQ is a simple, lightweight, library for creating background jobs, and processing them." +optional = true +python-versions = ">=3.6" +files = [ + {file = "rq-1.15.1-py2.py3-none-any.whl", hash = "sha256:6e243d8d9c4af4686ded4b01b25ea1ff4bac4fc260b02638fbe9c8c17b004bd1"}, + {file = "rq-1.15.1.tar.gz", hash = "sha256:1f49f4ac1a084044bb8e95b3f305c0bf17e55618b08c18e0b60c080f12d6f008"}, +] + +[package.dependencies] +click = ">=5.0.0" +redis = ">=4.0.0" + +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] + +[[package]] +name = "starlette" +version = "0.27.0" +description = "The little ASGI library that shines." +optional = true +python-versions = ">=3.7" +files = [ + {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, + {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"}, +] + +[package.dependencies] +anyio = ">=3.4.0,<5" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] + [[package]] name = "tiktoken" -version = "0.5.1" +version = "0.5.2" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.8" files = [ - {file = "tiktoken-0.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2b0bae3fd56de1c0a5874fb6577667a3c75bf231a6cef599338820210c16e40a"}, - {file = "tiktoken-0.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e529578d017045e2f0ed12d2e00e7e99f780f477234da4aae799ec4afca89f37"}, - {file = "tiktoken-0.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edd2ffbb789712d83fee19ab009949f998a35c51ad9f9beb39109357416344ff"}, - {file = "tiktoken-0.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c73d47bdc1a3f1f66ffa019af0386c48effdc6e8797e5e76875f6388ff72e9"}, - {file = "tiktoken-0.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:46b8554b9f351561b1989157c6bb54462056f3d44e43aa4e671367c5d62535fc"}, - {file = "tiktoken-0.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:92ed3bbf71a175a6a4e5fbfcdb2c422bdd72d9b20407e00f435cf22a68b4ea9b"}, - {file = "tiktoken-0.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:714efb2f4a082635d9f5afe0bf7e62989b72b65ac52f004eb7ac939f506c03a4"}, - {file = "tiktoken-0.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a10488d1d1a5f9c9d2b2052fdb4cf807bba545818cb1ef724a7f5d44d9f7c3d4"}, - {file = "tiktoken-0.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8079ac065572fe0e7c696dbd63e1fdc12ce4cdca9933935d038689d4732451df"}, - {file = "tiktoken-0.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ef730db4097f5b13df8d960f7fdda2744fe21d203ea2bb80c120bb58661b155"}, - {file = "tiktoken-0.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:426e7def5f3f23645dada816be119fa61e587dfb4755de250e136b47a045c365"}, - {file = "tiktoken-0.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:323cec0031358bc09aa965c2c5c1f9f59baf76e5b17e62dcc06d1bb9bc3a3c7c"}, - {file = "tiktoken-0.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5abd9436f02e2c8eda5cce2ff8015ce91f33e782a7423de2a1859f772928f714"}, - {file = "tiktoken-0.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:1fe99953b63aabc0c9536fbc91c3c9000d78e4755edc28cc2e10825372046a2d"}, - {file = "tiktoken-0.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dcdc630461927718b317e6f8be7707bd0fc768cee1fdc78ddaa1e93f4dc6b2b1"}, - {file = "tiktoken-0.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1f2b3b253e22322b7f53a111e1f6d7ecfa199b4f08f3efdeb0480f4033b5cdc6"}, - {file = "tiktoken-0.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43ce0199f315776dec3ea7bf86f35df86d24b6fcde1babd3e53c38f17352442f"}, - {file = "tiktoken-0.5.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a84657c083d458593c0235926b5c993eec0b586a2508d6a2020556e5347c2f0d"}, - {file = "tiktoken-0.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c008375c0f3d97c36e81725308699116cd5804fdac0f9b7afc732056329d2790"}, - {file = "tiktoken-0.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:779c4dea5edd1d3178734d144d32231e0b814976bec1ec09636d1003ffe4725f"}, - {file = "tiktoken-0.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:b5dcfcf9bfb798e86fbce76d40a1d5d9e3f92131aecfa3d1e5c9ea1a20f1ef1a"}, - {file = "tiktoken-0.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b180a22db0bbcc447f691ffc3cf7a580e9e0587d87379e35e58b826ebf5bc7b"}, - {file = "tiktoken-0.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b756a65d98b7cf760617a6b68762a23ab8b6ef79922be5afdb00f5e8a9f4e76"}, - {file = "tiktoken-0.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba9873c253ca1f670e662192a0afcb72b41e0ba3e730f16c665099e12f4dac2d"}, - {file = "tiktoken-0.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74c90d2be0b4c1a2b3f7dde95cd976757817d4df080d6af0ee8d461568c2e2ad"}, - {file = "tiktoken-0.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:709a5220891f2b56caad8327fab86281787704931ed484d9548f65598dea9ce4"}, - {file = "tiktoken-0.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5d5a187ff9c786fae6aadd49f47f019ff19e99071dc5b0fe91bfecc94d37c686"}, - {file = "tiktoken-0.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:e21840043dbe2e280e99ad41951c00eff8ee3b63daf57cd4c1508a3fd8583ea2"}, - {file = "tiktoken-0.5.1.tar.gz", hash = "sha256:27e773564232004f4f810fd1f85236673ec3a56ed7f1206fc9ed8670ebedb97a"}, + {file = "tiktoken-0.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c4e654282ef05ec1bd06ead22141a9a1687991cef2c6a81bdd1284301abc71d"}, + {file = "tiktoken-0.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7b3134aa24319f42c27718c6967f3c1916a38a715a0fa73d33717ba121231307"}, + {file = "tiktoken-0.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6092e6e77730929c8c6a51bb0d7cfdf1b72b63c4d033d6258d1f2ee81052e9e5"}, + {file = "tiktoken-0.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72ad8ae2a747622efae75837abba59be6c15a8f31b4ac3c6156bc56ec7a8e631"}, + {file = "tiktoken-0.5.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51cba7c8711afa0b885445f0637f0fcc366740798c40b981f08c5f984e02c9d1"}, + {file = "tiktoken-0.5.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3d8c7d2c9313f8e92e987d585ee2ba0f7c40a0de84f4805b093b634f792124f5"}, + {file = "tiktoken-0.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:692eca18c5fd8d1e0dde767f895c17686faaa102f37640e884eecb6854e7cca7"}, + {file = "tiktoken-0.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:138d173abbf1ec75863ad68ca289d4da30caa3245f3c8d4bfb274c4d629a2f77"}, + {file = "tiktoken-0.5.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7388fdd684690973fdc450b47dfd24d7f0cbe658f58a576169baef5ae4658607"}, + {file = "tiktoken-0.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a114391790113bcff670c70c24e166a841f7ea8f47ee2fe0e71e08b49d0bf2d4"}, + {file = "tiktoken-0.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca96f001e69f6859dd52926d950cfcc610480e920e576183497ab954e645e6ac"}, + {file = "tiktoken-0.5.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:15fed1dd88e30dfadcdd8e53a8927f04e1f6f81ad08a5ca824858a593ab476c7"}, + {file = "tiktoken-0.5.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:93f8e692db5756f7ea8cb0cfca34638316dcf0841fb8469de8ed7f6a015ba0b0"}, + {file = "tiktoken-0.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:bcae1c4c92df2ffc4fe9f475bf8148dbb0ee2404743168bbeb9dcc4b79dc1fdd"}, + {file = "tiktoken-0.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b76a1e17d4eb4357d00f0622d9a48ffbb23401dcf36f9716d9bd9c8e79d421aa"}, + {file = "tiktoken-0.5.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:01d8b171bb5df4035580bc26d4f5339a6fd58d06f069091899d4a798ea279d3e"}, + {file = "tiktoken-0.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42adf7d4fb1ed8de6e0ff2e794a6a15005f056a0d83d22d1d6755a39bffd9e7f"}, + {file = "tiktoken-0.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3f894dbe0adb44609f3d532b8ea10820d61fdcb288b325a458dfc60fefb7db"}, + {file = "tiktoken-0.5.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:58ccfddb4e62f0df974e8f7e34a667981d9bb553a811256e617731bf1d007d19"}, + {file = "tiktoken-0.5.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58902a8bad2de4268c2a701f1c844d22bfa3cbcc485b10e8e3e28a050179330b"}, + {file = "tiktoken-0.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:5e39257826d0647fcac403d8fa0a474b30d02ec8ffc012cfaf13083e9b5e82c5"}, + {file = "tiktoken-0.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8bde3b0fbf09a23072d39c1ede0e0821f759b4fa254a5f00078909158e90ae1f"}, + {file = "tiktoken-0.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2ddee082dcf1231ccf3a591d234935e6acf3e82ee28521fe99af9630bc8d2a60"}, + {file = "tiktoken-0.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35c057a6a4e777b5966a7540481a75a31429fc1cb4c9da87b71c8b75b5143037"}, + {file = "tiktoken-0.5.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c4a049b87e28f1dc60509f8eb7790bc8d11f9a70d99b9dd18dfdd81a084ffe6"}, + {file = "tiktoken-0.5.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5bf5ce759089f4f6521ea6ed89d8f988f7b396e9f4afb503b945f5c949c6bec2"}, + {file = "tiktoken-0.5.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0c964f554af1a96884e01188f480dad3fc224c4bbcf7af75d4b74c4b74ae0125"}, + {file = "tiktoken-0.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:368dd5726d2e8788e47ea04f32e20f72a2012a8a67af5b0b003d1e059f1d30a3"}, + {file = "tiktoken-0.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2deef9115b8cd55536c0a02c0203512f8deb2447f41585e6d929a0b878a0dd2"}, + {file = "tiktoken-0.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ed7d380195affbf886e2f8b92b14edfe13f4768ff5fc8de315adba5b773815e"}, + {file = "tiktoken-0.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c76fce01309c8140ffe15eb34ded2bb94789614b7d1d09e206838fc173776a18"}, + {file = "tiktoken-0.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60a5654d6a2e2d152637dd9a880b4482267dfc8a86ccf3ab1cec31a8c76bfae8"}, + {file = "tiktoken-0.5.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:41d4d3228e051b779245a8ddd21d4336f8975563e92375662f42d05a19bdff41"}, + {file = "tiktoken-0.5.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c1cdec2c92fcde8c17a50814b525ae6a88e8e5b02030dc120b76e11db93f13"}, + {file = "tiktoken-0.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:84ddb36faedb448a50b246e13d1b6ee3437f60b7169b723a4b2abad75e914f3e"}, + {file = "tiktoken-0.5.2.tar.gz", hash = "sha256:f54c581f134a8ea96ce2023ab221d4d4d81ab614efa0b2fbce926387deb56c80"}, ] [package.dependencies] @@ -905,113 +1262,113 @@ blobfile = ["blobfile (>=2)"] [[package]] name = "tokenizers" -version = "0.14.0" +version = "0.15.0" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "tokenizers-0.14.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a90e1030d9c61de64045206c62721a36f892dcfc5bbbc119dfcd417c1ca60ca"}, - {file = "tokenizers-0.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7cacc5a33767bb2a03b6090eac556c301a1d961ac2949be13977bc3f20cc4e3c"}, - {file = "tokenizers-0.14.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:81994795e1b4f868a6e73107af8cdf088d31357bae6f7abf26c42874eab16f43"}, - {file = "tokenizers-0.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ec53f832bfa91abafecbf92b4259b466fb31438ab31e8291ade0fcf07de8fc2"}, - {file = "tokenizers-0.14.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:854aa813a55d6031a6399b1bca09e4e7a79a80ec05faeea77fc6809d59deb3d5"}, - {file = "tokenizers-0.14.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c34d2f02e25e0fa96e574cadb43a6f14bdefc77f84950991da6e3732489e164"}, - {file = "tokenizers-0.14.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f17d5ad725c827d3dc7db2bbe58093a33db2de49bbb639556a6d88d82f0ca19"}, - {file = "tokenizers-0.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:337a7b7d6b32c6f904faee4304987cb018d1488c88b91aa635760999f5631013"}, - {file = "tokenizers-0.14.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:98a7ceb767e1079ef2c99f52a4e7b816f2e682b2b6fef02c8eff5000536e54e1"}, - {file = "tokenizers-0.14.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:25ad4a0f883a311a5b021ed979e21559cb4184242c7446cd36e07d046d1ed4be"}, - {file = "tokenizers-0.14.0-cp310-none-win32.whl", hash = "sha256:360706b0c2c6ba10e5e26b7eeb7aef106dbfc0a81ad5ad599a892449b4973b10"}, - {file = "tokenizers-0.14.0-cp310-none-win_amd64.whl", hash = "sha256:1c2ce437982717a5e221efa3c546e636f12f325cc3d9d407c91d2905c56593d0"}, - {file = "tokenizers-0.14.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:612d0ba4f40f4d41163af9613dac59c902d017dc4166ea4537a476af807d41c3"}, - {file = "tokenizers-0.14.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3013ad0cff561d9be9ce2cc92b76aa746b4e974f20e5b4158c03860a4c8ffe0f"}, - {file = "tokenizers-0.14.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c89a0d6d2ec393a6261df71063b1e22bdd7c6ef3d77b8826541b596132bcf524"}, - {file = "tokenizers-0.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5514417f37fc2ca8159b27853cd992a9a4982e6c51f04bd3ac3f65f68a8fa781"}, - {file = "tokenizers-0.14.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e761fd1af8409c607b11f084dc7cc50f80f08bd426d4f01d1c353b097d2640f"}, - {file = "tokenizers-0.14.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c16fbcd5ef10df9e51cc84238cdb05ee37e4228aaff39c01aa12b0a0409e29b8"}, - {file = "tokenizers-0.14.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3439d9f858dd9033b69769be5a56eb4fb79fde13fad14fab01edbf2b98033ad9"}, - {file = "tokenizers-0.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c19f8cdc3e84090464a6e28757f60461388cc8cd41c02c109e180a6b7c571f6"}, - {file = "tokenizers-0.14.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:df763ce657a297eb73008d5907243a7558a45ae0930b38ebcb575a24f8296520"}, - {file = "tokenizers-0.14.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:095b0b6683a9b76002aa94659f75c09e4359cb291b318d6e77a60965d7a7f138"}, - {file = "tokenizers-0.14.0-cp311-none-win32.whl", hash = "sha256:712ec0e68a399ded8e115e7e25e7017802fa25ee6c36b4eaad88481e50d0c638"}, - {file = "tokenizers-0.14.0-cp311-none-win_amd64.whl", hash = "sha256:917aa6d6615b33d9aa811dcdfb3109e28ff242fbe2cb89ea0b7d3613e444a672"}, - {file = "tokenizers-0.14.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8464ee7d43ecd9dd1723f51652f49b979052ea3bcd25329e3df44e950c8444d1"}, - {file = "tokenizers-0.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:84c2b96469b34825557c6fe0bc3154c98d15be58c416a9036ca90afdc9979229"}, - {file = "tokenizers-0.14.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:24b3ccec65ee6f876cd67251c1dcfa1c318c9beec5a438b134f7e33b667a8b36"}, - {file = "tokenizers-0.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde333fc56dd5fbbdf2de3067d6c0c129867d33eac81d0ba9b65752ad6ef4208"}, - {file = "tokenizers-0.14.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ddcc2f251bd8a2b2f9a7763ad4468a34cfc4ee3b0fba3cfb34d12c964950cac"}, - {file = "tokenizers-0.14.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10a34eb1416dcec3c6f9afea459acd18fcc93234687de605a768a987eda589ab"}, - {file = "tokenizers-0.14.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:56bc7252530a6a20c6eed19b029914bb9cc781efbe943ca9530856051de99d0f"}, - {file = "tokenizers-0.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07f5c2324326a00c85111081d5eae4da9d64d56abb5883389b3c98bee0b50a7c"}, - {file = "tokenizers-0.14.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5efd92e44e43f36332b5f3653743dca5a0b72cdabb012f20023e220f01f675cb"}, - {file = "tokenizers-0.14.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9223bcb77a826dbc9fd0efa6bce679a96b1a01005142778bb42ce967581c5951"}, - {file = "tokenizers-0.14.0-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:e2c1b4707344d3fbfce35d76802c2429ca54e30a5ecb05b3502c1e546039a3bb"}, - {file = "tokenizers-0.14.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:5892ba10fe0a477bde80b9f06bce05cb9d83c15a4676dcae5cbe6510f4524bfc"}, - {file = "tokenizers-0.14.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0e1818f33ac901d5d63830cb6a69a707819f4d958ae5ecb955d8a5ad823a2e44"}, - {file = "tokenizers-0.14.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d06a6fe406df1e616f9e649522683411c6c345ddaaaad7e50bbb60a2cb27e04d"}, - {file = "tokenizers-0.14.0-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b6e2d4bc223dc6a99efbe9266242f1ac03eb0bef0104e6cef9f9512dd5c816b"}, - {file = "tokenizers-0.14.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08ea1f612796e438c9a7e2ad86ab3c1c05c8fe0fad32fcab152c69a3a1a90a86"}, - {file = "tokenizers-0.14.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6ab1a58c05a3bd8ece95eb5d1bc909b3fb11acbd3ff514e3cbd1669e3ed28f5b"}, - {file = "tokenizers-0.14.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:495dc7d3b78815de79dafe7abce048a76154dadb0ffc7f09b7247738557e5cef"}, - {file = "tokenizers-0.14.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:aaa0401a245d891b3b2ba9cf027dc65ca07627e11fe3ce597644add7d07064f8"}, - {file = "tokenizers-0.14.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ae4fa13a786fd0d6549da241c6a1077f9b6320a7120d922ccc201ad1d4feea8f"}, - {file = "tokenizers-0.14.0-cp37-none-win32.whl", hash = "sha256:ae0d5b5ab6032c24a2e74cc15f65b6510070926671129e922aa3826c834558d7"}, - {file = "tokenizers-0.14.0-cp37-none-win_amd64.whl", hash = "sha256:2839369a9eb948905612f5d8e70453267d9c7bf17573e5ab49c2f28368fd635d"}, - {file = "tokenizers-0.14.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:f483af09a07fcb8b8b4cd07ac1be9f58bb739704ef9156e955531299ab17ec75"}, - {file = "tokenizers-0.14.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9c2ec661d0d63e618cb145ad15ddb6a81e16d9deb7a203f385d78141da028984"}, - {file = "tokenizers-0.14.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:97e87eb7cbeff63c3b1aa770fdcf18ea4f1c852bfb75d0c913e71b8924a99d61"}, - {file = "tokenizers-0.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98c4bd09b47f77f41785488971543de63db82608f0dc0bc6646c876b5ca44d1f"}, - {file = "tokenizers-0.14.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0cbeb5406be31f7605d032bb261f2e728da8ac1f4f196c003bc640279ceb0f52"}, - {file = "tokenizers-0.14.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe799fa48fd7dd549a68abb7bee32dd3721f50210ad2e3e55058080158c72c25"}, - {file = "tokenizers-0.14.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:66daf7c6375a95970e86cb3febc48becfeec4e38b2e0195218d348d3bb86593b"}, - {file = "tokenizers-0.14.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce4b177422af79a77c46bb8f56d73827e688fdc092878cff54e24f5c07a908db"}, - {file = "tokenizers-0.14.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a9aef7a5622648b70f979e96cbc2f795eba5b28987dd62f4dbf8f1eac6d64a1a"}, - {file = "tokenizers-0.14.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:397a24feff284d39b40fdd61c1c828bb6648dfe97b6766c84fbaf7256e272d09"}, - {file = "tokenizers-0.14.0-cp38-none-win32.whl", hash = "sha256:93cc2ec19b6ff6149b2e5127ceda3117cc187dd38556a1ed93baba13dffda069"}, - {file = "tokenizers-0.14.0-cp38-none-win_amd64.whl", hash = "sha256:bf7f540ab8a6fc53fb762963edb7539b11f00af8f70b206f0a6d1a25109ad307"}, - {file = "tokenizers-0.14.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:a58d0b34586f4c5229de5aa124cf76b9455f2e01dc5bd6ed018f6e3bb12572d3"}, - {file = "tokenizers-0.14.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:90ceca6a06bb4b0048d0a51d0d47ef250d3cb37cc36b6b43334be8c02ac18b0f"}, - {file = "tokenizers-0.14.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5f6c9554bda64799b1d65052d834553bff9a6ef4a6c2114668e2ed8f1871a2a3"}, - {file = "tokenizers-0.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ee14b41024bc05ea172fc2c87f66b60d7c5c636c3a52a09a25ec18e752e6dc7"}, - {file = "tokenizers-0.14.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:879201b1c76b24dc70ce02fc42c3eeb7ff20c353ce0ee638be6449f7c80e73ba"}, - {file = "tokenizers-0.14.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ca79ea6ddde5bb32f7ad1c51de1032829c531e76bbcae58fb3ed105a31faf021"}, - {file = "tokenizers-0.14.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd5934048e60aedddf6c5b076d44ccb388702e1650e2eb7b325a1682d883fbf9"}, - {file = "tokenizers-0.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a1566cabd4bf8f09d6c1fa7a3380a181801a495e7218289dbbd0929de471711"}, - {file = "tokenizers-0.14.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a8fc72a7adc6fa12db38100c403d659bc01fbf6e57f2cc9219e75c4eb0ea313c"}, - {file = "tokenizers-0.14.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7fd08ed6c14aa285482d9e5f48c04de52bdbcecaca0d30465d7a36bbea6b14df"}, - {file = "tokenizers-0.14.0-cp39-none-win32.whl", hash = "sha256:3279c0c1d5fdea7d3499c582fed392fb0463d1046544ca010f53aeee5d2ce12c"}, - {file = "tokenizers-0.14.0-cp39-none-win_amd64.whl", hash = "sha256:203ca081d25eb6e4bc72ea04d552e457079c5c6a3713715ece246f6ca02ca8d0"}, - {file = "tokenizers-0.14.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:b45704d5175499387e33a1dd5c8d49ab4d7ef3c36a9ba8a410bb3e68d10f80a0"}, - {file = "tokenizers-0.14.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6d17d5eb38ccc2f615a7a3692dfa285abe22a1e6d73bbfd753599e34ceee511c"}, - {file = "tokenizers-0.14.0-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4a7e6e7989ba77a20c33f7a8a45e0f5b3e7530b2deddad2c3b2a58b323156134"}, - {file = "tokenizers-0.14.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81876cefea043963abf6c92e0cf73ce6ee10bdc43245b6565ce82c0305c2e613"}, - {file = "tokenizers-0.14.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d8cd05f73d1ce875a23bfdb3a572417c0f46927c6070ca43a7f6f044c3d6605"}, - {file = "tokenizers-0.14.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:419a38b89be0081d872eac09449c03cd6589c2ee47461184592ee4b1ad93af1d"}, - {file = "tokenizers-0.14.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:4caf274a9ba944eb83bc695beef95abe24ce112907fb06217875894d8a4f62b8"}, - {file = "tokenizers-0.14.0-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:6ecb3a7741d7ebf65db93d246b102efca112860707e07233f1b88703cb01dbc5"}, - {file = "tokenizers-0.14.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cb7fe9a383cb2932848e459d0277a681d58ad31aa6ccda204468a8d130a9105c"}, - {file = "tokenizers-0.14.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4731e0577780d85788ab4f00d54e16e76fe305739396e6fb4c54b89e6fa12de"}, - {file = "tokenizers-0.14.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9900291ccd19417128e328a26672390365dab1d230cd00ee7a5e2a0319e2716"}, - {file = "tokenizers-0.14.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:493e6932fbca6875fd2e51958f1108ce4c5ae41aa6f2b8017c5f07beaff0a1ac"}, - {file = "tokenizers-0.14.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1792e6b46b89aba0d501c0497f38c96e5b54735379fd8a07a28f45736ba51bb1"}, - {file = "tokenizers-0.14.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0af26d37c7080688ef606679f3a3d44b63b881de9fa00cc45adc240ba443fd85"}, - {file = "tokenizers-0.14.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:99379ec4d7023c07baed85c68983bfad35fd210dfbc256eaafeb842df7f888e3"}, - {file = "tokenizers-0.14.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:84118aa60dcbb2686730342a0cb37e54e02fde001f936557223d46b6cd8112cd"}, - {file = "tokenizers-0.14.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d616e1859ffcc8fcda60f556c34338b96fb72ca642f6dafc3b1d2aa1812fb4dd"}, - {file = "tokenizers-0.14.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7826b79bbbffc2150bf8d621297cc600d8a1ea53992547c4fd39630de10466b4"}, - {file = "tokenizers-0.14.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:eb3931d734f1e66b77c2a8e22ebe0c196f127c7a0f48bf9601720a6f85917926"}, - {file = "tokenizers-0.14.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:6a475b5cafc7a740bf33d00334b1f2b434b6124198384d8b511931a891be39ff"}, - {file = "tokenizers-0.14.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3d3c9e286ae00b0308903d2ef7b31efc84358109aa41abaa27bd715401c3fef4"}, - {file = "tokenizers-0.14.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:27244e96810434cf705f317e9b74a1163cd2be20bdbd3ed6b96dae1914a6778c"}, - {file = "tokenizers-0.14.0-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ca9b0536fd5f03f62427230e85d9d57f9eed644ab74c319ae4877c9144356aed"}, - {file = "tokenizers-0.14.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f64cdff8c0454295b739d77e25cff7264fa9822296395e60cbfecc7f66d88fb"}, - {file = "tokenizers-0.14.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a00cdfb40544656b7a3b176049d63227d5e53cf2574912514ebb4b9da976aaa1"}, - {file = "tokenizers-0.14.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b611d96b96957cb2f39560c77cc35d2fcb28c13d5b7d741412e0edfdb6f670a8"}, - {file = "tokenizers-0.14.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:27ad1c02fdd74dcf3502fafb87393412e65f698f2e3aba4ad568a1f3b43d5c9f"}, - {file = "tokenizers-0.14.0.tar.gz", hash = "sha256:a06efa1f19dcc0e9bd0f4ffbf963cb0217af92a9694f68fe7eee5e1c6ddc4bde"}, + {file = "tokenizers-0.15.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:cd3cd0299aaa312cd2988957598f80becd04d5a07338741eca076057a2b37d6e"}, + {file = "tokenizers-0.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a922c492c721744ee175f15b91704be2d305569d25f0547c77cd6c9f210f9dc"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:331dd786d02fc38698f835fff61c99480f98b73ce75a4c65bd110c9af5e4609a"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88dd0961c437d413ab027f8b115350c121d49902cfbadf08bb8f634b15fa1814"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6fdcc55339df7761cd52e1fbe8185d3b3963bc9e3f3545faa6c84f9e8818259a"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1480b0051d8ab5408e8e4db2dc832f7082ea24aa0722c427bde2418c6f3bd07"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9855e6c258918f9cf62792d4f6ddfa6c56dccd8c8118640f867f6393ecaf8bd7"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de9529fe75efcd54ba8d516aa725e1851df9199f0669b665c55e90df08f5af86"}, + {file = "tokenizers-0.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8edcc90a36eab0705fe9121d6c77c6e42eeef25c7399864fd57dfb27173060bf"}, + {file = "tokenizers-0.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ae17884aafb3e94f34fb7cfedc29054f5f54e142475ebf8a265a4e388fee3f8b"}, + {file = "tokenizers-0.15.0-cp310-none-win32.whl", hash = "sha256:9a3241acdc9b44cff6e95c4a55b9be943ef3658f8edb3686034d353734adba05"}, + {file = "tokenizers-0.15.0-cp310-none-win_amd64.whl", hash = "sha256:4b31807cb393d6ea31926b307911c89a1209d5e27629aa79553d1599c8ffdefe"}, + {file = "tokenizers-0.15.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:af7e9be8c05d30bb137b9fd20f9d99354816599e5fd3d58a4b1e28ba3b36171f"}, + {file = "tokenizers-0.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c3d7343fa562ea29661783344a2d83662db0d3d17a6fa6a403cac8e512d2d9fd"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:32371008788aeeb0309a9244809a23e4c0259625e6b74a103700f6421373f395"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9db64c7c9954fbae698884c5bb089764edc549731e5f9b7fa1dd4e4d78d77f"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dbed5944c31195514669cf6381a0d8d47f164943000d10f93d6d02f0d45c25e0"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aab16c4a26d351d63e965b0c792f5da7227a37b69a6dc6d922ff70aa595b1b0c"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3c2b60b12fdd310bf85ce5d7d3f823456b9b65eed30f5438dd7761879c495983"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0344d6602740e44054a9e5bbe9775a5e149c4dddaff15959bb07dcce95a5a859"}, + {file = "tokenizers-0.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4525f6997d81d9b6d9140088f4f5131f6627e4c960c2c87d0695ae7304233fc3"}, + {file = "tokenizers-0.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:65975094fef8cc68919644936764efd2ce98cf1bacbe8db2687155d2b0625bee"}, + {file = "tokenizers-0.15.0-cp311-none-win32.whl", hash = "sha256:ff5d2159c5d93015f5a4542aac6c315506df31853123aa39042672031768c301"}, + {file = "tokenizers-0.15.0-cp311-none-win_amd64.whl", hash = "sha256:2dd681b53cf615e60a31a115a3fda3980e543d25ca183797f797a6c3600788a3"}, + {file = "tokenizers-0.15.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:c9cce6ee149a3d703f86877bc2a6d997e34874b2d5a2d7839e36b2273f31d3d9"}, + {file = "tokenizers-0.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a0a94bc3370e6f1cc8a07a8ae867ce13b7c1b4291432a773931a61f256d44ea"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:309cfcccfc7e502cb1f1de2c9c1c94680082a65bfd3a912d5a5b2c90c677eb60"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8413e994dd7d875ab13009127fc85633916c71213917daf64962bafd488f15dc"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d0ebf9430f901dbdc3dcb06b493ff24a3644c9f88c08e6a1d6d0ae2228b9b818"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10361e9c7864b22dd791ec5126327f6c9292fb1d23481d4895780688d5e298ac"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:babe42635b8a604c594bdc56d205755f73414fce17ba8479d142a963a6c25cbc"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3768829861e964c7a4556f5f23307fce6a23872c2ebf030eb9822dbbbf7e9b2a"}, + {file = "tokenizers-0.15.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9c91588a630adc88065e1c03ac6831e3e2112558869b9ebcb2b8afd8a14c944d"}, + {file = "tokenizers-0.15.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:77606994e793ca54ecf3a3619adc8a906a28ca223d9354b38df41cb8766a0ed6"}, + {file = "tokenizers-0.15.0-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:6fe143939f3b596681922b2df12a591a5b010e7dcfbee2202482cd0c1c2f2459"}, + {file = "tokenizers-0.15.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:b7bee0f1795e3e3561e9a557061b1539e5255b8221e3f928f58100282407e090"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5d37e7f4439b4c46192ab4f2ff38ab815e4420f153caa13dec9272ef14403d34"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caadf255cf7f951b38d10097836d1f3bcff4aeaaffadfdf748bab780bf5bff95"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:05accb9162bf711a941b1460b743d62fec61c160daf25e53c5eea52c74d77814"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26a2ef890740127cb115ee5260878f4a677e36a12831795fd7e85887c53b430b"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e54c5f26df14913620046b33e822cb3bcd091a332a55230c0e63cc77135e2169"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:669b8ed653a578bcff919566631156f5da3aab84c66f3c0b11a6281e8b4731c7"}, + {file = "tokenizers-0.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0ea480d943297df26f06f508dab6e012b07f42bf3dffdd36e70799368a5f5229"}, + {file = "tokenizers-0.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bc80a0a565ebfc7cd89de7dd581da8c2b3238addfca6280572d27d763f135f2f"}, + {file = "tokenizers-0.15.0-cp37-none-win32.whl", hash = "sha256:cdd945e678bbdf4517d5d8de66578a5030aeefecdb46f5320b034de9cad8d4dd"}, + {file = "tokenizers-0.15.0-cp37-none-win_amd64.whl", hash = "sha256:1ab96ab7dc706e002c32b2ea211a94c1c04b4f4de48354728c3a6e22401af322"}, + {file = "tokenizers-0.15.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:f21c9eb71c9a671e2a42f18b456a3d118e50c7f0fc4dd9fa8f4eb727fea529bf"}, + {file = "tokenizers-0.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a5f4543a35889679fc3052086e69e81880b2a5a28ff2a52c5a604be94b77a3f"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f8aa81afec893e952bd39692b2d9ef60575ed8c86fce1fd876a06d2e73e82dca"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1574a5a4af22c3def93fe8fe4adcc90a39bf5797ed01686a4c46d1c3bc677d2f"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c7982fd0ec9e9122d03b209dac48cebfea3de0479335100ef379a9a959b9a5a"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d16b647032df2ce2c1f9097236e046ea9fedd969b25637b9d5d734d78aa53b"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b3cdf29e6f9653da330515dc8fa414be5a93aae79e57f8acc50d4028dd843edf"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7286f3df10de840867372e3e64b99ef58c677210e3ceb653cd0e740a5c53fe78"}, + {file = "tokenizers-0.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aabc83028baa5a36ce7a94e7659250f0309c47fa4a639e5c2c38e6d5ea0de564"}, + {file = "tokenizers-0.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:72f78b0e0e276b1fc14a672fa73f3acca034ba8db4e782124a2996734a9ba9cf"}, + {file = "tokenizers-0.15.0-cp38-none-win32.whl", hash = "sha256:9680b0ecc26e7e42f16680c1aa62e924d58d1c2dd992707081cc10a374896ea2"}, + {file = "tokenizers-0.15.0-cp38-none-win_amd64.whl", hash = "sha256:f17cbd88dab695911cbdd385a5a7e3709cc61dff982351f5d1b5939f074a2466"}, + {file = "tokenizers-0.15.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:3661862df7382c5eb23ac4fbf7c75e69b02dc4f5784e4c5a734db406b5b24596"}, + {file = "tokenizers-0.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3045d191dad49647f5a5039738ecf1c77087945c7a295f7bcf051c37067e883"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9fcaad9ab0801f14457d7c820d9f246b5ab590c407fc6b073819b1573097aa7"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a79f17027f24fe9485701c8dbb269b9c713954ec3bdc1e7075a66086c0c0cd3c"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:01a3aa332abc4bee7640563949fcfedca4de8f52691b3b70f2fc6ca71bfc0f4e"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05b83896a893cdfedad8785250daa3ba9f0504848323471524d4783d7291661e"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cbbf2489fcf25d809731ba2744ff278dd07d9eb3f8b7482726bd6cae607073a4"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab806ad521a5e9de38078b7add97589c313915f6f5fec6b2f9f289d14d607bd6"}, + {file = "tokenizers-0.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4a522612d5c88a41563e3463226af64e2fa00629f65cdcc501d1995dd25d23f5"}, + {file = "tokenizers-0.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e58a38c4e6075810bdfb861d9c005236a72a152ebc7005941cc90d1bbf16aca9"}, + {file = "tokenizers-0.15.0-cp39-none-win32.whl", hash = "sha256:b8034f1041fd2bd2b84ff9f4dc4ae2e1c3b71606820a9cd5c562ebd291a396d1"}, + {file = "tokenizers-0.15.0-cp39-none-win_amd64.whl", hash = "sha256:edde9aa964145d528d0e0dbf14f244b8a85ebf276fb76869bc02e2530fa37a96"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:309445d10d442b7521b98083dc9f0b5df14eca69dbbfebeb98d781ee2cef5d30"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d3125a6499226d4d48efc54f7498886b94c418e93a205b673bc59364eecf0804"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ed56ddf0d54877bb9c6d885177db79b41576e61b5ef6defeb579dcb803c04ad5"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b22cd714706cc5b18992a232b023f736e539495f5cc61d2d28d176e55046f6c"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fac2719b1e9bc8e8e7f6599b99d0a8e24f33d023eb8ef644c0366a596f0aa926"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85ddae17570ec7e5bfaf51ffa78d044f444a8693e1316e1087ee6150596897ee"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:76f1bed992e396bf6f83e3df97b64ff47885e45e8365f8983afed8556a0bc51f"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3bb0f4df6dce41a1c7482087b60d18c372ef4463cb99aa8195100fcd41e0fd64"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:22c27672c27a059a5f39ff4e49feed8c7f2e1525577c8a7e3978bd428eb5869d"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78104f5d035c9991f92831fc0efe9e64a05d4032194f2a69f67aaa05a4d75bbb"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a40b73dc19d82c3e3ffb40abdaacca8fbc95eeb26c66b7f9f860aebc07a73998"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d801d1368188c74552cd779b1286e67cb9fd96f4c57a9f9a2a09b6def9e1ab37"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82641ffb13a4da1293fcc9f437d457647e60ed0385a9216cd135953778b3f0a1"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:160f9d1810f2c18fffa94aa98bf17632f6bd2dabc67fcb01a698ca80c37d52ee"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:8d7d6eea831ed435fdeeb9bcd26476226401d7309d115a710c65da4088841948"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f6456bec6c557d63d8ec0023758c32f589e1889ed03c055702e84ce275488bed"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eef39a502fad3bf104b9e1906b4fb0cee20e44e755e51df9a98f8922c3bf6d4"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1e4664c5b797e093c19b794bbecc19d2367e782b4a577d8b7c1821db5dc150d"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ca003fb5f3995ff5cf676db6681b8ea5d54d3b30bea36af1120e78ee1a4a4cdf"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7f17363141eb0c53752c89e10650b85ef059a52765d0802ba9613dbd2d21d425"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:8a765db05581c7d7e1280170f2888cda351760d196cc059c37ea96f121125799"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:2a0dd641a72604486cd7302dd8f87a12c8a9b45e1755e47d2682733f097c1af5"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0a1a3c973e4dc97797fc19e9f11546c95278ffc55c4492acb742f69e035490bc"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4fab75642aae4e604e729d6f78e0addb9d7e7d49e28c8f4d16b24da278e5263"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65f80be77f6327a86d8fd35a4467adcfe6174c159b4ab52a1a8dd4c6f2d7d9e1"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:a8da7533dbe66b88afd430c56a2f2ce1fd82e2681868f857da38eeb3191d7498"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa8eb4584fc6cbe6a84d7a7864be3ed28e23e9fd2146aa8ef1814d579df91958"}, + {file = "tokenizers-0.15.0.tar.gz", hash = "sha256:10c7e6e7b4cabd757da59e93f5f8d1126291d16f8b54f28510825ef56a3e5d0e"}, ] [package.dependencies] -huggingface_hub = ">=0.16.4,<0.17" +huggingface_hub = ">=0.16.4,<1.0" [package.extras] dev = ["tokenizers[testing]"] @@ -1040,113 +1397,147 @@ telegram = ["requests"] [[package]] name = "typing-extensions" -version = "4.8.0" +version = "4.9.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, - {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, + {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, + {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, ] [[package]] name = "urllib3" -version = "2.0.7" +version = "2.1.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"}, - {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"}, + {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, + {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "uvicorn" +version = "0.24.0.post1" +description = "The lightning-fast ASGI server." +optional = true +python-versions = ">=3.8" +files = [ + {file = "uvicorn-0.24.0.post1-py3-none-any.whl", hash = "sha256:7c84fea70c619d4a710153482c0d230929af7bcf76c7bfa6de151f0a3a80121e"}, + {file = "uvicorn-0.24.0.post1.tar.gz", hash = "sha256:09c8e5a79dc466bdf28dead50093957db184de356fcdc48697bad3bde4c2588e"}, +] + +[package.dependencies] +click = ">=7.0" +h11 = ">=0.8" +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} + +[package.extras] +standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] + [[package]] name = "yarl" -version = "1.9.2" +version = "1.9.4" description = "Yet another URL library" optional = false python-versions = ">=3.7" files = [ - {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"}, - {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"}, - {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"}, - {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"}, - {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"}, - {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"}, - {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"}, - {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"}, - {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"}, - {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"}, - {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"}, - {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"}, - {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"}, - {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"}, - {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, + {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, + {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, + {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, + {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, + {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, + {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, + {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, + {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, + {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, + {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, + {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, + {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, + {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, + {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, + {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, ] [package.dependencies] @@ -1168,7 +1559,12 @@ files = [ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] +[extras] +extra-proxy = [] +proxy = ["backoff", "fastapi", "rq", "uvicorn"] +proxy-otel = [] + [metadata] lock-version = "2.0" -python-versions = "^3.8" -content-hash = "531003aaa1ec2726a59a79a3458a46a189cb603ffc8e8dad5117a2f65dff3b93" +python-versions = "^3.8.1" +content-hash = "4a017b5085b0c46d270e45977a88d1fe1812ddb413cf127dcf1d2e6b39fee286" diff --git a/pyproject.toml b/pyproject.toml index 62ca2f5a0..91c1236ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.12.3" +version = "1.15.1" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License" @@ -55,7 +55,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.12.3" +version = "1.15.1" version_files = [ "pyproject.toml:^version" ] diff --git a/requirements.txt b/requirements.txt index d79f5ac79..986580d7a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # LITELLM PROXY DEPENDENCIES # -openai +openai>=1.0.0 fastapi tomli pydantic>=2.5 @@ -17,4 +17,5 @@ celery psutil mangum google-generativeai -async_generator # for ollama \ No newline at end of file +traceloop-sdk==0.5.3 +langfuse==1.14.0