diff --git a/.circleci/config.yml b/.circleci/config.yml
index 3e18d26dd..321236f46 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -34,6 +34,7 @@ jobs:
pip install "boto3>=1.28.57"
pip install "aioboto3>=12.3.0"
pip install langchain
+ pip install lunary==0.2.5
pip install "langfuse>=2.0.0"
pip install numpydoc
pip install traceloop-sdk==0.0.69
diff --git a/README.md b/README.md
index 566b9d391..5a1440f97 100644
--- a/README.md
+++ b/README.md
@@ -25,6 +25,7 @@
LiteLLM manages:
+
- Translate inputs to provider's `completion`, `embedding`, and `image_generation` endpoints
- [Consistent output](https://docs.litellm.ai/docs/completion/output), text responses will always be available at `['choices'][0]['message']['content']`
- Retry/fallback logic across multiple deployments (e.g. Azure/OpenAI) - [Router](https://docs.litellm.ai/docs/routing)
@@ -38,15 +39,14 @@ LiteLLM manages:
Support for more providers. Missing a provider or LLM Platform, raise a [feature request](https://github.com/BerriAI/litellm/issues/new?assignees=&labels=enhancement&projects=&template=feature_request.yml&title=%5BFeature%5D%3A+).
# Usage ([**Docs**](https://docs.litellm.ai/docs/))
+
> [!IMPORTANT]
> LiteLLM v1.0.0 now requires `openai>=1.0.0`. Migration guide [here](https://docs.litellm.ai/docs/migration)
-
-
```shell
pip install litellm
```
@@ -55,9 +55,9 @@ pip install litellm
from litellm import completion
import os
-## set ENV variables
-os.environ["OPENAI_API_KEY"] = "your-openai-key"
-os.environ["COHERE_API_KEY"] = "your-cohere-key"
+## set ENV variables
+os.environ["OPENAI_API_KEY"] = "your-openai-key"
+os.environ["COHERE_API_KEY"] = "your-cohere-key"
messages = [{ "content": "Hello, how are you?","role": "user"}]
@@ -88,8 +88,10 @@ print(response)
```
## Streaming ([Docs](https://docs.litellm.ai/docs/completion/stream))
+
liteLLM supports streaming the model response back, pass `stream=True` to get a streaming iterator in response.
Streaming is supported for all models (Bedrock, Huggingface, TogetherAI, Azure, OpenAI, etc.)
+
```python
from litellm import completion
response = completion(model="gpt-3.5-turbo", messages=messages, stream=True)
@@ -103,20 +105,22 @@ for part in response:
```
## Logging Observability ([Docs](https://docs.litellm.ai/docs/observability/callbacks))
-LiteLLM exposes pre defined callbacks to send data to Langfuse, DynamoDB, s3 Buckets, LLMonitor, Helicone, Promptlayer, Traceloop, Athina, Slack
+
+LiteLLM exposes pre defined callbacks to send data to Lunary, Langfuse, DynamoDB, s3 Buckets, Helicone, Promptlayer, Traceloop, Athina, Slack
+
```python
from litellm import completion
## set env variables for logging tools
+os.environ["LUNARY_PUBLIC_KEY"] = "your-lunary-public-key"
os.environ["LANGFUSE_PUBLIC_KEY"] = ""
os.environ["LANGFUSE_SECRET_KEY"] = ""
-os.environ["LLMONITOR_APP_ID"] = "your-llmonitor-app-id"
os.environ["ATHINA_API_KEY"] = "your-athina-api-key"
os.environ["OPENAI_API_KEY"]
# set callbacks
-litellm.success_callback = ["langfuse", "llmonitor", "athina"] # log input/output to langfuse, llmonitor, supabase, athina etc
+litellm.success_callback = ["lunary", "langfuse", "athina"] # log input/output to lunary, langfuse, supabase, athina etc
#openai call
response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
@@ -126,7 +130,8 @@ response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content
Set Budgets & Rate limits across multiple projects
-The proxy provides:
+The proxy provides:
+
1. [Hooks for auth](https://docs.litellm.ai/docs/proxy/virtual_keys#custom-auth)
2. [Hooks for logging](https://docs.litellm.ai/docs/proxy/logging#step-1---create-your-custom-litellm-callback-class)
3. [Cost tracking](https://docs.litellm.ai/docs/proxy/virtual_keys#tracking-spend)
@@ -134,13 +139,14 @@ The proxy provides:
## 📖 Proxy Endpoints - [Swagger Docs](https://litellm-api.up.railway.app/)
-## Quick Start Proxy - CLI
+## Quick Start Proxy - CLI
```shell
pip install 'litellm[proxy]'
```
### Step 1: Start litellm proxy
+
```shell
$ litellm --model huggingface/bigcode/starcoder
@@ -148,6 +154,7 @@ $ litellm --model huggingface/bigcode/starcoder
```
### Step 2: Make ChatCompletions Request to Proxy
+
```python
import openai # openai v1.0.0+
client = openai.OpenAI(api_key="anything",base_url="http://0.0.0.0:4000") # set proxy to base_url
@@ -163,13 +170,15 @@ print(response)
```
## Proxy Key Management ([Docs](https://docs.litellm.ai/docs/proxy/virtual_keys))
-UI on `/ui` on your proxy server
+
+UI on `/ui` on your proxy server

Set budgets and rate limits across multiple projects
`POST /key/generate`
### Request
+
```shell
curl 'http://0.0.0.0:4000/key/generate' \
--header 'Authorization: Bearer sk-1234' \
@@ -178,6 +187,7 @@ curl 'http://0.0.0.0:4000/key/generate' \
```
### Expected Response
+
```shell
{
"key": "sk-kdEXbIqZRwEeEiHwdg7sFA", # Bearer token
@@ -186,56 +196,60 @@ curl 'http://0.0.0.0:4000/key/generate' \
```
## Supported Providers ([Docs](https://docs.litellm.ai/docs/providers))
-| Provider | [Completion](https://docs.litellm.ai/docs/#basic-usage) | [Streaming](https://docs.litellm.ai/docs/completion/stream#streaming-responses) | [Async Completion](https://docs.litellm.ai/docs/completion/stream#async-completion) | [Async Streaming](https://docs.litellm.ai/docs/completion/stream#async-streaming) | [Async Embedding](https://docs.litellm.ai/docs/embedding/supported_embedding) | [Async Image Generation](https://docs.litellm.ai/docs/image_generation) |
-| ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | ------------- |
-| [openai](https://docs.litellm.ai/docs/providers/openai) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [azure](https://docs.litellm.ai/docs/providers/azure) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [aws - sagemaker](https://docs.litellm.ai/docs/providers/aws_sagemaker) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [aws - bedrock](https://docs.litellm.ai/docs/providers/bedrock) | ✅ | ✅ | ✅ | ✅ |✅ |
-| [google - vertex_ai [Gemini]](https://docs.litellm.ai/docs/providers/vertex) | ✅ | ✅ | ✅ | ✅ |
-| [google - palm](https://docs.litellm.ai/docs/providers/palm) | ✅ | ✅ | ✅ | ✅ |
-| [google AI Studio - gemini](https://docs.litellm.ai/docs/providers/gemini) | ✅ | | ✅ | | |
-| [mistral ai api](https://docs.litellm.ai/docs/providers/mistral) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [cloudflare AI Workers](https://docs.litellm.ai/docs/providers/cloudflare_workers) | ✅ | ✅ | ✅ | ✅ |
-| [cohere](https://docs.litellm.ai/docs/providers/cohere) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [anthropic](https://docs.litellm.ai/docs/providers/anthropic) | ✅ | ✅ | ✅ | ✅ |
-| [huggingface](https://docs.litellm.ai/docs/providers/huggingface) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [replicate](https://docs.litellm.ai/docs/providers/replicate) | ✅ | ✅ | ✅ | ✅ |
-| [together_ai](https://docs.litellm.ai/docs/providers/togetherai) | ✅ | ✅ | ✅ | ✅ |
-| [openrouter](https://docs.litellm.ai/docs/providers/openrouter) | ✅ | ✅ | ✅ | ✅ |
-| [ai21](https://docs.litellm.ai/docs/providers/ai21) | ✅ | ✅ | ✅ | ✅ |
-| [baseten](https://docs.litellm.ai/docs/providers/baseten) | ✅ | ✅ | ✅ | ✅ |
-| [vllm](https://docs.litellm.ai/docs/providers/vllm) | ✅ | ✅ | ✅ | ✅ |
-| [nlp_cloud](https://docs.litellm.ai/docs/providers/nlp_cloud) | ✅ | ✅ | ✅ | ✅ |
-| [aleph alpha](https://docs.litellm.ai/docs/providers/aleph_alpha) | ✅ | ✅ | ✅ | ✅ |
-| [petals](https://docs.litellm.ai/docs/providers/petals) | ✅ | ✅ | ✅ | ✅ |
-| [ollama](https://docs.litellm.ai/docs/providers/ollama) | ✅ | ✅ | ✅ | ✅ |
-| [deepinfra](https://docs.litellm.ai/docs/providers/deepinfra) | ✅ | ✅ | ✅ | ✅ |
-| [perplexity-ai](https://docs.litellm.ai/docs/providers/perplexity) | ✅ | ✅ | ✅ | ✅ |
-| [Groq AI](https://docs.litellm.ai/docs/providers/groq) | ✅ | ✅ | ✅ | ✅ |
-| [anyscale](https://docs.litellm.ai/docs/providers/anyscale) | ✅ | ✅ | ✅ | ✅ |
-| [voyage ai](https://docs.litellm.ai/docs/providers/voyage) | | | | | ✅ |
-| [xinference [Xorbits Inference]](https://docs.litellm.ai/docs/providers/xinference) | | | | | ✅ |
+| Provider | [Completion](https://docs.litellm.ai/docs/#basic-usage) | [Streaming](https://docs.litellm.ai/docs/completion/stream#streaming-responses) | [Async Completion](https://docs.litellm.ai/docs/completion/stream#async-completion) | [Async Streaming](https://docs.litellm.ai/docs/completion/stream#async-streaming) | [Async Embedding](https://docs.litellm.ai/docs/embedding/supported_embedding) | [Async Image Generation](https://docs.litellm.ai/docs/image_generation) |
+| ----------------------------------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------- |
+| [openai](https://docs.litellm.ai/docs/providers/openai) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [azure](https://docs.litellm.ai/docs/providers/azure) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [aws - sagemaker](https://docs.litellm.ai/docs/providers/aws_sagemaker) | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [aws - bedrock](https://docs.litellm.ai/docs/providers/bedrock) | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [google - vertex_ai [Gemini]](https://docs.litellm.ai/docs/providers/vertex) | ✅ | ✅ | ✅ | ✅ |
+| [google - palm](https://docs.litellm.ai/docs/providers/palm) | ✅ | ✅ | ✅ | ✅ |
+| [google AI Studio - gemini](https://docs.litellm.ai/docs/providers/gemini) | ✅ | | ✅ | | |
+| [mistral ai api](https://docs.litellm.ai/docs/providers/mistral) | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [cloudflare AI Workers](https://docs.litellm.ai/docs/providers/cloudflare_workers) | ✅ | ✅ | ✅ | ✅ |
+| [cohere](https://docs.litellm.ai/docs/providers/cohere) | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [anthropic](https://docs.litellm.ai/docs/providers/anthropic) | ✅ | ✅ | ✅ | ✅ |
+| [huggingface](https://docs.litellm.ai/docs/providers/huggingface) | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [replicate](https://docs.litellm.ai/docs/providers/replicate) | ✅ | ✅ | ✅ | ✅ |
+| [together_ai](https://docs.litellm.ai/docs/providers/togetherai) | ✅ | ✅ | ✅ | ✅ |
+| [openrouter](https://docs.litellm.ai/docs/providers/openrouter) | ✅ | ✅ | ✅ | ✅ |
+| [ai21](https://docs.litellm.ai/docs/providers/ai21) | ✅ | ✅ | ✅ | ✅ |
+| [baseten](https://docs.litellm.ai/docs/providers/baseten) | ✅ | ✅ | ✅ | ✅ |
+| [vllm](https://docs.litellm.ai/docs/providers/vllm) | ✅ | ✅ | ✅ | ✅ |
+| [nlp_cloud](https://docs.litellm.ai/docs/providers/nlp_cloud) | ✅ | ✅ | ✅ | ✅ |
+| [aleph alpha](https://docs.litellm.ai/docs/providers/aleph_alpha) | ✅ | ✅ | ✅ | ✅ |
+| [petals](https://docs.litellm.ai/docs/providers/petals) | ✅ | ✅ | ✅ | ✅ |
+| [ollama](https://docs.litellm.ai/docs/providers/ollama) | ✅ | ✅ | ✅ | ✅ |
+| [deepinfra](https://docs.litellm.ai/docs/providers/deepinfra) | ✅ | ✅ | ✅ | ✅ |
+| [perplexity-ai](https://docs.litellm.ai/docs/providers/perplexity) | ✅ | ✅ | ✅ | ✅ |
+| [Groq AI](https://docs.litellm.ai/docs/providers/groq) | ✅ | ✅ | ✅ | ✅ |
+| [anyscale](https://docs.litellm.ai/docs/providers/anyscale) | ✅ | ✅ | ✅ | ✅ |
+| [voyage ai](https://docs.litellm.ai/docs/providers/voyage) | | | | | ✅ |
+| [xinference [Xorbits Inference]](https://docs.litellm.ai/docs/providers/xinference) | | | | | ✅ |
[**Read the Docs**](https://docs.litellm.ai/docs/)
## Contributing
-To contribute: Clone the repo locally -> Make a change -> Submit a PR with the change.
-Here's how to modify the repo locally:
-Step 1: Clone the repo
+To contribute: Clone the repo locally -> Make a change -> Submit a PR with the change.
+
+Here's how to modify the repo locally:
+Step 1: Clone the repo
+
```
git clone https://github.com/BerriAI/litellm.git
```
-Step 2: Navigate into the project, and install dependencies:
+Step 2: Navigate into the project, and install dependencies:
+
```
cd litellm
poetry install
```
Step 3: Test your change:
+
```
cd litellm/tests # pwd: Documents/litellm/litellm/tests
poetry run flake8
@@ -243,8 +257,9 @@ poetry run pytest .
```
Step 4: Submit a PR with your changes! 🚀
-- push your fork to your GitHub repo
-- submit a PR from there
+
+- push your fork to your GitHub repo
+- submit a PR from there
# Enterprise
For companies that need better security, user management and professional support
@@ -260,12 +275,14 @@ This covers:
- ✅ **Secure access with Single Sign-On**
# Support / talk with founders
+
- [Schedule Demo 👋](https://calendly.com/d/4mp-gd3-k5k/berriai-1-1-onboarding-litellm-hosted-version)
- [Community Discord 💭](https://discord.gg/wuPM9dRgDw)
- Our numbers 📞 +1 (770) 8783-106 / +1 (412) 618-6238
- Our emails ✉️ ishaan@berri.ai / krrish@berri.ai
-# Why did we build this
+# Why did we build this
+
- **Need for simplicity**: Our code started to get extremely complicated managing & translating calls between Azure, OpenAI and Cohere.
# Contributors
@@ -282,4 +299,3 @@ This covers:
-
diff --git a/cookbook/logging_observability/LiteLLM_Lunary.ipynb b/cookbook/logging_observability/LiteLLM_Lunary.ipynb
new file mode 100644
index 000000000..3b1dc5d5e
--- /dev/null
+++ b/cookbook/logging_observability/LiteLLM_Lunary.ipynb
@@ -0,0 +1,348 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "4FbDOmcj2VkM"
+ },
+ "source": [
+ "## Use LiteLLM with Langfuse\n",
+ "https://docs.litellm.ai/docs/observability/langfuse_integration"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "21W8Woog26Ns"
+ },
+ "source": [
+ "## Install Dependencies"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "xrjKLBxhxu2L"
+ },
+ "outputs": [],
+ "source": [
+ "%pip install litellm lunary"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "jHEu-TjZ29PJ"
+ },
+ "source": [
+ "## Set Env Variables"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "id": "QWd9rTysxsWO"
+ },
+ "outputs": [],
+ "source": [
+ "import litellm\n",
+ "from litellm import completion\n",
+ "import os\n",
+ "\n",
+ "# from https://app.lunary.ai/\n",
+ "os.environ[\"LUNARY_PUBLIC_KEY\"] = \"\"\n",
+ "\n",
+ "\n",
+ "# LLM provider keys\n",
+ "# You can use any of the litellm supported providers: https://docs.litellm.ai/docs/providers\n",
+ "os.environ['OPENAI_API_KEY'] = \"\"\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "NodQl0hp3Lma"
+ },
+ "source": [
+ "## Set Lunary as a callback for sending data\n",
+ "## OpenAI completion call"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "vNAuwJY1yp_F",
+ "outputId": "c3a71e26-13f5-4379-fac9-409290ba79bb"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[Choices(finish_reason='stop', index=0, message=Message(content='Hello! How can I assist you today?', role='assistant'))]ModelResponse(id='chatcmpl-8xIWykI0GiJSmYtXYuB8Z363kpIBm', choices=[Choices(finish_reason='stop', index=0, message=Message(content='Hello! How can I assist you today?', role='assistant'))], created=1709143276, model='gpt-3.5-turbo-0125', object='chat.completion', system_fingerprint='fp_86156a94a0', usage=Usage(completion_tokens=9, prompt_tokens=15, total_tokens=24))\n",
+ "\n",
+ "[Lunary] Add event: {\n",
+ " \"event\": \"start\",\n",
+ " \"type\": \"llm\",\n",
+ " \"name\": \"gpt-3.5-turbo\",\n",
+ " \"runId\": \"a363776a-bd07-4474-bce2-193067f01b2e\",\n",
+ " \"timestamp\": \"2024-02-28T18:01:15.188153+00:00\",\n",
+ " \"input\": {\n",
+ " \"role\": \"user\",\n",
+ " \"content\": \"Hi \\ud83d\\udc4b - i'm openai\"\n",
+ " },\n",
+ " \"extra\": {},\n",
+ " \"runtime\": \"litellm\",\n",
+ " \"metadata\": {}\n",
+ "}\n",
+ "\n",
+ "\n",
+ "[Lunary] Add event: {\n",
+ " \"event\": \"end\",\n",
+ " \"type\": \"llm\",\n",
+ " \"runId\": \"a363776a-bd07-4474-bce2-193067f01b2e\",\n",
+ " \"timestamp\": \"2024-02-28T18:01:16.846581+00:00\",\n",
+ " \"output\": {\n",
+ " \"role\": \"assistant\",\n",
+ " \"content\": \"Hello! How can I assist you today?\"\n",
+ " },\n",
+ " \"runtime\": \"litellm\",\n",
+ " \"tokensUsage\": {\n",
+ " \"completion\": 9,\n",
+ " \"prompt\": 15\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "--- Logging error ---\n",
+ "Traceback (most recent call last):\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/urllib3/connectionpool.py\", line 537, in _make_request\n",
+ " response = conn.getresponse()\n",
+ " ^^^^^^^^^^^^^^^^^^\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/urllib3/connection.py\", line 466, in getresponse\n",
+ " httplib_response = super().getresponse()\n",
+ " ^^^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/opt/homebrew/Cellar/python@3.12/3.12.2_1/Frameworks/Python.framework/Versions/3.12/lib/python3.12/http/client.py\", line 1423, in getresponse\n",
+ " response.begin()\n",
+ " File \"/opt/homebrew/Cellar/python@3.12/3.12.2_1/Frameworks/Python.framework/Versions/3.12/lib/python3.12/http/client.py\", line 331, in begin\n",
+ " version, status, reason = self._read_status()\n",
+ " ^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/opt/homebrew/Cellar/python@3.12/3.12.2_1/Frameworks/Python.framework/Versions/3.12/lib/python3.12/http/client.py\", line 292, in _read_status\n",
+ " line = str(self.fp.readline(_MAXLINE + 1), \"iso-8859-1\")\n",
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/opt/homebrew/Cellar/python@3.12/3.12.2_1/Frameworks/Python.framework/Versions/3.12/lib/python3.12/socket.py\", line 707, in readinto\n",
+ " return self._sock.recv_into(b)\n",
+ " ^^^^^^^^^^^^^^^^^^^^^^^\n",
+ "TimeoutError: timed out\n",
+ "\n",
+ "The above exception was the direct cause of the following exception:\n",
+ "\n",
+ "Traceback (most recent call last):\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/requests/adapters.py\", line 486, in send\n",
+ " resp = conn.urlopen(\n",
+ " ^^^^^^^^^^^^^\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/urllib3/connectionpool.py\", line 847, in urlopen\n",
+ " retries = retries.increment(\n",
+ " ^^^^^^^^^^^^^^^^^^\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/urllib3/util/retry.py\", line 470, in increment\n",
+ " raise reraise(type(error), error, _stacktrace)\n",
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/urllib3/util/util.py\", line 39, in reraise\n",
+ " raise value\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/urllib3/connectionpool.py\", line 793, in urlopen\n",
+ " response = self._make_request(\n",
+ " ^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/urllib3/connectionpool.py\", line 539, in _make_request\n",
+ " self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/urllib3/connectionpool.py\", line 370, in _raise_timeout\n",
+ " raise ReadTimeoutError(\n",
+ "urllib3.exceptions.ReadTimeoutError: HTTPConnectionPool(host='localhost', port=3333): Read timed out. (read timeout=5)\n",
+ "\n",
+ "During handling of the above exception, another exception occurred:\n",
+ "\n",
+ "Traceback (most recent call last):\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/lunary/consumer.py\", line 59, in send_batch\n",
+ " response = requests.post(\n",
+ " ^^^^^^^^^^^^^^\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/requests/api.py\", line 115, in post\n",
+ " return request(\"post\", url, data=data, json=json, **kwargs)\n",
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/requests/api.py\", line 59, in request\n",
+ " return session.request(method=method, url=url, **kwargs)\n",
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/requests/sessions.py\", line 589, in request\n",
+ " resp = self.send(prep, **send_kwargs)\n",
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/requests/sessions.py\", line 703, in send\n",
+ " r = adapter.send(request, **kwargs)\n",
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/requests/adapters.py\", line 532, in send\n",
+ " raise ReadTimeout(e, request=request)\n",
+ "requests.exceptions.ReadTimeout: HTTPConnectionPool(host='localhost', port=3333): Read timed out. (read timeout=5)\n",
+ "\n",
+ "During handling of the above exception, another exception occurred:\n",
+ "\n",
+ "Traceback (most recent call last):\n",
+ " File \"/opt/homebrew/Cellar/python@3.12/3.12.2_1/Frameworks/Python.framework/Versions/3.12/lib/python3.12/logging/__init__.py\", line 1160, in emit\n",
+ " msg = self.format(record)\n",
+ " ^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/opt/homebrew/Cellar/python@3.12/3.12.2_1/Frameworks/Python.framework/Versions/3.12/lib/python3.12/logging/__init__.py\", line 999, in format\n",
+ " return fmt.format(record)\n",
+ " ^^^^^^^^^^^^^^^^^^\n",
+ " File \"/opt/homebrew/Cellar/python@3.12/3.12.2_1/Frameworks/Python.framework/Versions/3.12/lib/python3.12/logging/__init__.py\", line 703, in format\n",
+ " record.message = record.getMessage()\n",
+ " ^^^^^^^^^^^^^^^^^^^\n",
+ " File \"/opt/homebrew/Cellar/python@3.12/3.12.2_1/Frameworks/Python.framework/Versions/3.12/lib/python3.12/logging/__init__.py\", line 392, in getMessage\n",
+ " msg = msg % self.args\n",
+ " ~~~~^~~~~~~~~~~\n",
+ "TypeError: not all arguments converted during string formatting\n",
+ "Call stack:\n",
+ " File \"/opt/homebrew/Cellar/python@3.12/3.12.2_1/Frameworks/Python.framework/Versions/3.12/lib/python3.12/threading.py\", line 1030, in _bootstrap\n",
+ " self._bootstrap_inner()\n",
+ " File \"/opt/homebrew/Cellar/python@3.12/3.12.2_1/Frameworks/Python.framework/Versions/3.12/lib/python3.12/threading.py\", line 1073, in _bootstrap_inner\n",
+ " self.run()\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/lunary/consumer.py\", line 24, in run\n",
+ " self.send_batch()\n",
+ " File \"/Users/vince/Library/Caches/pypoetry/virtualenvs/litellm-7WKnDWGw-py3.12/lib/python3.12/site-packages/lunary/consumer.py\", line 73, in send_batch\n",
+ " logging.error(\"[Lunary] Error sending events\", e)\n",
+ "Message: '[Lunary] Error sending events'\n",
+ "Arguments: (ReadTimeout(ReadTimeoutError(\"HTTPConnectionPool(host='localhost', port=3333): Read timed out. (read timeout=5)\")),)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# set langfuse as a callback, litellm will send the data to langfuse\n",
+ "litellm.success_callback = [\"lunary\"]\n",
+ "\n",
+ "# openai call\n",
+ "response = completion(\n",
+ " model=\"gpt-3.5-turbo\",\n",
+ " messages=[\n",
+ " {\"role\": \"user\", \"content\": \"Hi 👋 - i'm openai\"}\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "print(response)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Using LiteLLM with Lunary Templates\n",
+ "\n",
+ "You can use LiteLLM seamlessly with Lunary templates to manage your prompts and completions.\n",
+ "\n",
+ "Assuming you have created a template \"test-template\" with a variable \"question\", you can use it like this:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "2PMSLc_FziJL",
+ "outputId": "1c37605e-b406-4ffc-aafd-e1983489c6be"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[Choices(finish_reason='stop', index=0, message=Message(content='Hello! How can I assist you today?', role='assistant'))]ModelResponse(id='chatcmpl-8xIXegwpudg4YKnLB6pmpFGXqTHcH', choices=[Choices(finish_reason='stop', index=0, message=Message(content='Hello! How can I assist you today?', role='assistant'))], created=1709143318, model='gpt-4-0125-preview', object='chat.completion', system_fingerprint='fp_c8aa5a06d6', usage=Usage(completion_tokens=9, prompt_tokens=21, total_tokens=30))\n",
+ "\n",
+ "[Lunary] Add event: {\n",
+ " \"event\": \"start\",\n",
+ " \"type\": \"llm\",\n",
+ " \"name\": \"gpt-4-turbo-preview\",\n",
+ " \"runId\": \"3a5b698d-cb55-4b3b-ab6d-04d2b99e40cb\",\n",
+ " \"timestamp\": \"2024-02-28T18:01:56.746249+00:00\",\n",
+ " \"input\": [\n",
+ " {\n",
+ " \"role\": \"system\",\n",
+ " \"content\": \"You are an helpful assistant.\"\n",
+ " },\n",
+ " {\n",
+ " \"role\": \"user\",\n",
+ " \"content\": \"Hi! Hello!\"\n",
+ " }\n",
+ " ],\n",
+ " \"extra\": {\n",
+ " \"temperature\": 1,\n",
+ " \"max_tokens\": 100\n",
+ " },\n",
+ " \"runtime\": \"litellm\",\n",
+ " \"metadata\": {}\n",
+ "}\n",
+ "\n",
+ "\n",
+ "[Lunary] Add event: {\n",
+ " \"event\": \"end\",\n",
+ " \"type\": \"llm\",\n",
+ " \"runId\": \"3a5b698d-cb55-4b3b-ab6d-04d2b99e40cb\",\n",
+ " \"timestamp\": \"2024-02-28T18:01:58.741244+00:00\",\n",
+ " \"output\": {\n",
+ " \"role\": \"assistant\",\n",
+ " \"content\": \"Hello! How can I assist you today?\"\n",
+ " },\n",
+ " \"runtime\": \"litellm\",\n",
+ " \"tokensUsage\": {\n",
+ " \"completion\": 9,\n",
+ " \"prompt\": 21\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "import lunary\n",
+ "from litellm import completion\n",
+ "\n",
+ "template = lunary.render_template(\"test-template\", {\"question\": \"Hello!\"})\n",
+ "\n",
+ "response = completion(**template)\n",
+ "\n",
+ "print(response)"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/cookbook/proxy-server/readme.md b/cookbook/proxy-server/readme.md
index 4b296831b..d0b0592c4 100644
--- a/cookbook/proxy-server/readme.md
+++ b/cookbook/proxy-server/readme.md
@@ -33,7 +33,7 @@
- Call all models using the OpenAI format - `completion(model, messages)`
- Text responses will always be available at `['choices'][0]['message']['content']`
- **Error Handling** Using Model Fallbacks (if `GPT-4` fails, try `llama2`)
-- **Logging** - Log Requests, Responses and Errors to `Supabase`, `Posthog`, `Mixpanel`, `Sentry`, `LLMonitor`,`Athina`, `Helicone` (Any of the supported providers here: https://litellm.readthedocs.io/en/latest/advanced/
+- **Logging** - Log Requests, Responses and Errors to `Supabase`, `Posthog`, `Mixpanel`, `Sentry`, `Lunary`,`Athina`, `Helicone` (Any of the supported providers here: https://litellm.readthedocs.io/en/latest/advanced/
**Example: Logs sent to Supabase**
diff --git a/docs/my-website/docs/getting_started.md b/docs/my-website/docs/getting_started.md
index 607b86943..edbdf3c00 100644
--- a/docs/my-website/docs/getting_started.md
+++ b/docs/my-website/docs/getting_started.md
@@ -2,11 +2,11 @@
import QuickStart from '../src/components/QuickStart.js'
-LiteLLM simplifies LLM API calls by mapping them all to the [OpenAI ChatCompletion format](https://platform.openai.com/docs/api-reference/chat).
+LiteLLM simplifies LLM API calls by mapping them all to the [OpenAI ChatCompletion format](https://platform.openai.com/docs/api-reference/chat).
-## basic usage
+## basic usage
-By default we provide a free $10 community-key to try all providers supported on LiteLLM.
+By default we provide a free $10 community-key to try all providers supported on LiteLLM.
```python
from litellm import completion
@@ -29,14 +29,16 @@ Email us @ krrish@berri.ai
Next Steps 👉 [Call all supported models - e.g. Claude-2, Llama2-70b, etc.](./proxy_api.md#supported-models)
-More details 👉
-* [Completion() function details](./completion/)
-* [All supported models / providers on LiteLLM](./providers/)
-* [Build your own OpenAI proxy](https://github.com/BerriAI/liteLLM-proxy/tree/main)
+More details 👉
+
+- [Completion() function details](./completion/)
+- [All supported models / providers on LiteLLM](./providers/)
+- [Build your own OpenAI proxy](https://github.com/BerriAI/liteLLM-proxy/tree/main)
## streaming
-Same example from before. Just pass in `stream=True` in the completion args.
+Same example from before. Just pass in `stream=True` in the completion args.
+
```python
from litellm import completion
@@ -55,46 +57,50 @@ response = completion("command-nightly", messages, stream=True)
print(response)
```
-More details 👉
-* [streaming + async](./completion/stream.md)
-* [tutorial for streaming Llama2 on TogetherAI](./tutorials/TogetherAI_liteLLM.md)
+More details 👉
-## exception handling
+- [streaming + async](./completion/stream.md)
+- [tutorial for streaming Llama2 on TogetherAI](./tutorials/TogetherAI_liteLLM.md)
-LiteLLM maps exceptions across all supported providers to the OpenAI exceptions. All our exceptions inherit from OpenAI's exception types, so any error-handling you have for that, should work out of the box with LiteLLM.
+## exception handling
-```python
+LiteLLM maps exceptions across all supported providers to the OpenAI exceptions. All our exceptions inherit from OpenAI's exception types, so any error-handling you have for that, should work out of the box with LiteLLM.
+
+```python
from openai.error import OpenAIError
from litellm import completion
os.environ["ANTHROPIC_API_KEY"] = "bad-key"
-try:
- # some code
+try:
+ # some code
completion(model="claude-instant-1", messages=[{"role": "user", "content": "Hey, how's it going?"}])
except OpenAIError as e:
print(e)
```
## Logging Observability - Log LLM Input/Output ([Docs](https://docs.litellm.ai/docs/observability/callbacks))
-LiteLLM exposes pre defined callbacks to send data to Langfuse, LLMonitor, Helicone, Promptlayer, Traceloop, Slack
+
+LiteLLM exposes pre defined callbacks to send data to Lunary, Langfuse, Helicone, Promptlayer, Traceloop, Slack
+
```python
from litellm import completion
## set env variables for logging tools
+os.environ["LUNARY_PUBLIC_KEY"] = "your-lunary-public-key"
os.environ["LANGFUSE_PUBLIC_KEY"] = ""
os.environ["LANGFUSE_SECRET_KEY"] = ""
-os.environ["LLMONITOR_APP_ID"] = "your-llmonitor-app-id"
os.environ["OPENAI_API_KEY"]
# set callbacks
-litellm.success_callback = ["langfuse", "llmonitor"] # log input/output to langfuse, llmonitor, supabase
+litellm.success_callback = ["lunary", "langfuse"] # log input/output to langfuse, lunary, supabase
#openai call
response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
```
-More details 👉
-* [exception mapping](./exception_mapping.md)
-* [retries + model fallbacks for completion()](./completion/reliable_completions.md)
-* [tutorial for model fallbacks with completion()](./tutorials/fallbacks.md)
\ No newline at end of file
+More details 👉
+
+- [exception mapping](./exception_mapping.md)
+- [retries + model fallbacks for completion()](./completion/reliable_completions.md)
+- [tutorial for model fallbacks with completion()](./tutorials/fallbacks.md)
diff --git a/docs/my-website/docs/index.md b/docs/my-website/docs/index.md
index 18331ba3b..762156f46 100644
--- a/docs/my-website/docs/index.md
+++ b/docs/my-website/docs/index.md
@@ -5,7 +5,6 @@ import TabItem from '@theme/TabItem';
https://github.com/BerriAI/litellm
-
## **Call 100+ LLMs using the same Input/Output Format**
- Translate inputs to provider's `completion`, `embedding`, and `image_generation` endpoints
@@ -21,6 +20,7 @@ You can use litellm through either:
## LiteLLM Python SDK
### Basic usage
+
@@ -28,6 +28,7 @@ You can use litellm through either:
```shell
pip install litellm
```
+
@@ -39,7 +40,7 @@ import os
os.environ["OPENAI_API_KEY"] = "your-api-key"
response = completion(
- model="gpt-3.5-turbo",
+ model="gpt-3.5-turbo",
messages=[{ "content": "Hello, how are you?","role": "user"}]
)
```
@@ -55,7 +56,7 @@ import os
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
response = completion(
- model="claude-2",
+ model="claude-2",
messages=[{ "content": "Hello, how are you?","role": "user"}]
)
```
@@ -73,7 +74,7 @@ os.environ["VERTEX_PROJECT"] = "hardy-device-386718"
os.environ["VERTEX_LOCATION"] = "us-central1"
response = completion(
- model="chat-bison",
+ model="chat-bison",
messages=[{ "content": "Hello, how are you?","role": "user"}]
)
```
@@ -83,15 +84,15 @@ response = completion(
```python
-from litellm import completion
+from litellm import completion
import os
-os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
+os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
# e.g. Call 'WizardLM/WizardCoder-Python-34B-V1.0' hosted on HF Inference endpoints
response = completion(
model="huggingface/WizardLM/WizardCoder-Python-34B-V1.0",
- messages=[{ "content": "Hello, how are you?","role": "user"}],
+ messages=[{ "content": "Hello, how are you?","role": "user"}],
api_base="https://my-endpoint.huggingface.cloud"
)
@@ -113,25 +114,25 @@ os.environ["AZURE_API_VERSION"] = ""
# azure call
response = completion(
- "azure/",
+ "azure/",
messages = [{ "content": "Hello, how are you?","role": "user"}]
)
```
-
```python
from litellm import completion
response = completion(
- model="ollama/llama2",
- messages = [{ "content": "Hello, how are you?","role": "user"}],
+ model="ollama/llama2",
+ messages = [{ "content": "Hello, how are you?","role": "user"}],
api_base="http://localhost:11434"
)
```
+
@@ -140,19 +141,21 @@ from litellm import completion
import os
## set ENV variables
-os.environ["OPENROUTER_API_KEY"] = "openrouter_api_key"
+os.environ["OPENROUTER_API_KEY"] = "openrouter_api_key"
response = completion(
- model="openrouter/google/palm-2-chat-bison",
+ model="openrouter/google/palm-2-chat-bison",
messages = [{ "content": "Hello, how are you?","role": "user"}],
)
```
+
### Streaming
Set `stream=True` in the `completion` args.
+
@@ -164,7 +167,7 @@ import os
os.environ["OPENAI_API_KEY"] = "your-api-key"
response = completion(
- model="gpt-3.5-turbo",
+ model="gpt-3.5-turbo",
messages=[{ "content": "Hello, how are you?","role": "user"}],
stream=True,
)
@@ -181,7 +184,7 @@ import os
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
response = completion(
- model="claude-2",
+ model="claude-2",
messages=[{ "content": "Hello, how are you?","role": "user"}],
stream=True,
)
@@ -200,7 +203,7 @@ os.environ["VERTEX_PROJECT"] = "hardy-device-386718"
os.environ["VERTEX_LOCATION"] = "us-central1"
response = completion(
- model="chat-bison",
+ model="chat-bison",
messages=[{ "content": "Hello, how are you?","role": "user"}],
stream=True,
)
@@ -211,15 +214,15 @@ response = completion(
```python
-from litellm import completion
+from litellm import completion
import os
-os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
+os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
# e.g. Call 'WizardLM/WizardCoder-Python-34B-V1.0' hosted on HF Inference endpoints
response = completion(
model="huggingface/WizardLM/WizardCoder-Python-34B-V1.0",
- messages=[{ "content": "Hello, how are you?","role": "user"}],
+ messages=[{ "content": "Hello, how are you?","role": "user"}],
api_base="https://my-endpoint.huggingface.cloud",
stream=True,
)
@@ -242,7 +245,7 @@ os.environ["AZURE_API_VERSION"] = ""
# azure call
response = completion(
- "azure/",
+ "azure/",
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True,
)
@@ -250,19 +253,19 @@ response = completion(
-
```python
from litellm import completion
response = completion(
- model="ollama/llama2",
- messages = [{ "content": "Hello, how are you?","role": "user"}],
+ model="ollama/llama2",
+ messages = [{ "content": "Hello, how are you?","role": "user"}],
api_base="http://localhost:11434",
stream=True,
)
```
+
@@ -271,48 +274,50 @@ from litellm import completion
import os
## set ENV variables
-os.environ["OPENROUTER_API_KEY"] = "openrouter_api_key"
+os.environ["OPENROUTER_API_KEY"] = "openrouter_api_key"
response = completion(
- model="openrouter/google/palm-2-chat-bison",
+ model="openrouter/google/palm-2-chat-bison",
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True,
)
```
+
### Exception handling
-LiteLLM maps exceptions across all supported providers to the OpenAI exceptions. All our exceptions inherit from OpenAI's exception types, so any error-handling you have for that, should work out of the box with LiteLLM.
+LiteLLM maps exceptions across all supported providers to the OpenAI exceptions. All our exceptions inherit from OpenAI's exception types, so any error-handling you have for that, should work out of the box with LiteLLM.
-```python
+```python
from openai.error import OpenAIError
from litellm import completion
os.environ["ANTHROPIC_API_KEY"] = "bad-key"
-try:
- # some code
+try:
+ # some code
completion(model="claude-instant-1", messages=[{"role": "user", "content": "Hey, how's it going?"}])
except OpenAIError as e:
print(e)
```
### Logging Observability - Log LLM Input/Output ([Docs](https://docs.litellm.ai/docs/observability/callbacks))
-LiteLLM exposes pre defined callbacks to send data to Langfuse, LLMonitor, Helicone, Promptlayer, Traceloop, Slack
+LiteLLM exposes pre defined callbacks to send data to Lunary, Langfuse, Helicone, Promptlayer, Traceloop, Slack
+
```python
from litellm import completion
## set env variables for logging tools
os.environ["LANGFUSE_PUBLIC_KEY"] = ""
os.environ["LANGFUSE_SECRET_KEY"] = ""
-os.environ["LLMONITOR_APP_ID"] = "your-llmonitor-app-id"
+os.environ["LUNARY_PUBLIC_KEY"] = "your-lunary-public-key"
os.environ["OPENAI_API_KEY"]
# set callbacks
-litellm.success_callback = ["langfuse", "llmonitor"] # log input/output to langfuse, llmonitor, supabase
+litellm.success_callback = ["lunary", "langfuse"] # log input/output to lunary, langfuse, supabase
#openai call
response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
@@ -324,7 +329,7 @@ Use a callback function for this - more info on custom callbacks: https://docs.l
```python
import litellm
-# track_cost_callback
+# track_cost_callback
def track_cost_callback(
kwargs, # kwargs to completion
completion_response, # response from completion
@@ -335,7 +340,7 @@ def track_cost_callback(
print("streaming response_cost", response_cost)
except:
pass
-# set callback
+# set callback
litellm.success_callback = [track_cost_callback] # set custom callback function
# litellm.completion() call
@@ -353,11 +358,12 @@ response = completion(
## OpenAI Proxy
-Track spend across multiple projects/people
+Track spend across multiple projects/people

-The proxy provides:
+The proxy provides:
+
1. [Hooks for auth](https://docs.litellm.ai/docs/proxy/virtual_keys#custom-auth)
2. [Hooks for logging](https://docs.litellm.ai/docs/proxy/logging#step-1---create-your-custom-litellm-callback-class)
3. [Cost tracking](https://docs.litellm.ai/docs/proxy/virtual_keys#tracking-spend)
@@ -365,13 +371,14 @@ The proxy provides:
### 📖 Proxy Endpoints - [Swagger Docs](https://litellm-api.up.railway.app/)
-### Quick Start Proxy - CLI
+### Quick Start Proxy - CLI
```shell
pip install 'litellm[proxy]'
```
#### Step 1: Start litellm proxy
+
```shell
$ litellm --model huggingface/bigcode/starcoder
@@ -379,6 +386,7 @@ $ litellm --model huggingface/bigcode/starcoder
```
#### Step 2: Make ChatCompletions Request to Proxy
+
```python
import openai # openai v1.0.0+
client = openai.OpenAI(api_key="anything",base_url="http://0.0.0.0:4000") # set proxy to base_url
@@ -394,6 +402,7 @@ print(response)
```
## More details
-* [exception mapping](./exception_mapping.md)
-* [retries + model fallbacks for completion()](./completion/reliable_completions.md)
-* [proxy virtual keys & spend management](./tutorials/fallbacks.md)
\ No newline at end of file
+
+- [exception mapping](./exception_mapping.md)
+- [retries + model fallbacks for completion()](./completion/reliable_completions.md)
+- [proxy virtual keys & spend management](./tutorials/fallbacks.md)
diff --git a/docs/my-website/docs/observability/callbacks.md b/docs/my-website/docs/observability/callbacks.md
index 3b3d4eef3..fbc0733e5 100644
--- a/docs/my-website/docs/observability/callbacks.md
+++ b/docs/my-website/docs/observability/callbacks.md
@@ -7,7 +7,7 @@ liteLLM provides `input_callbacks`, `success_callbacks` and `failure_callbacks`,
liteLLM supports:
- [Custom Callback Functions](https://docs.litellm.ai/docs/observability/custom_callback)
-- [LLMonitor](https://llmonitor.com/docs)
+- [Lunary](https://lunary.ai/docs)
- [Helicone](https://docs.helicone.ai/introduction)
- [Traceloop](https://traceloop.com/docs)
- [Athina](https://docs.athina.ai/)
@@ -22,16 +22,16 @@ from litellm import completion
# set callbacks
litellm.input_callback=["sentry"] # for sentry breadcrumbing - logs the input being sent to the api
-litellm.success_callback=["posthog", "helicone", "llmonitor", "athina"]
-litellm.failure_callback=["sentry", "llmonitor"]
+litellm.success_callback=["posthog", "helicone", "lunary", "athina"]
+litellm.failure_callback=["sentry", "lunary"]
## set env variables
os.environ['SENTRY_DSN'], os.environ['SENTRY_API_TRACE_RATE']= ""
os.environ['POSTHOG_API_KEY'], os.environ['POSTHOG_API_URL'] = "api-key", "api-url"
os.environ["HELICONE_API_KEY"] = ""
os.environ["TRACELOOP_API_KEY"] = ""
-os.environ["LLMONITOR_APP_ID"] = ""
+os.environ["LUNARY_PUBLIC_KEY"] = ""
os.environ["ATHINA_API_KEY"] = ""
response = completion(model="gpt-3.5-turbo", messages=messages)
-```
\ No newline at end of file
+```
diff --git a/docs/my-website/docs/observability/llmonitor_integration.md b/docs/my-website/docs/observability/llmonitor_integration.md
deleted file mode 100644
index 06ac44a84..000000000
--- a/docs/my-website/docs/observability/llmonitor_integration.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# LLMonitor Tutorial
-
-[LLMonitor](https://llmonitor.com/) is an open-source observability platform that provides cost tracking, user tracking and powerful agent tracing.
-
-
-
-## Use LLMonitor to log requests across all LLM Providers (OpenAI, Azure, Anthropic, Cohere, Replicate, PaLM)
-
-liteLLM provides `callbacks`, making it easy for you to log data depending on the status of your responses.
-
-:::info
-We want to learn how we can make the callbacks better! Meet the [founders](https://calendly.com/d/4mp-gd3-k5k/berriai-1-1-onboarding-litellm-hosted-version) or
-join our [discord](https://discord.gg/wuPM9dRgDw)
-:::
-
-### Using Callbacks
-
-First, sign up to get an app ID on the [LLMonitor dashboard](https://llmonitor.com).
-
-Use just 2 lines of code, to instantly log your responses **across all providers** with llmonitor:
-
-```python
-litellm.success_callback = ["llmonitor"]
-litellm.failure_callback = ["llmonitor"]
-```
-
-Complete code
-
-```python
-from litellm import completion
-
-## set env variables
-os.environ["LLMONITOR_APP_ID"] = "your-llmonitor-app-id"
-# Optional: os.environ["LLMONITOR_API_URL"] = "self-hosting-url"
-
-os.environ["OPENAI_API_KEY"], os.environ["COHERE_API_KEY"] = "", ""
-
-# set callbacks
-litellm.success_callback = ["llmonitor"]
-litellm.failure_callback = ["llmonitor"]
-
-#openai call
-response = completion(
- model="gpt-3.5-turbo",
- messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}],
- user="ishaan_litellm"
-)
-
-#cohere call
-response = completion(
- model="command-nightly",
- messages=[{"role": "user", "content": "Hi 👋 - i'm cohere"}],
- user="ishaan_litellm"
-)
-```
-
-## Support & Talk to Founders
-
-- [Schedule Demo 👋](https://calendly.com/d/4mp-gd3-k5k/berriai-1-1-onboarding-litellm-hosted-version)
-- [Community Discord 💭](https://discord.gg/wuPM9dRgDw)
-- Our numbers 📞 +1 (770) 8783-106 / +1 (412) 618-6238
-- Our emails ✉️ ishaan@berri.ai / krrish@berri.ai
-- Meet the LLMonitor team on [Discord](http://discord.com/invite/8PafSG58kK) or via [email](mailto:vince@llmonitor.com).
\ No newline at end of file
diff --git a/docs/my-website/docs/observability/lunary_integration.md b/docs/my-website/docs/observability/lunary_integration.md
new file mode 100644
index 000000000..9b8e90df7
--- /dev/null
+++ b/docs/my-website/docs/observability/lunary_integration.md
@@ -0,0 +1,82 @@
+# Lunary - Logging and tracing LLM input/output
+
+[Lunary](https://lunary.ai/) is an open-source AI developer platform providing observability, prompt management, and evaluation tools for AI developers.
+
+
+
+## Use Lunary to log requests across all LLM Providers (OpenAI, Azure, Anthropic, Cohere, Replicate, PaLM)
+
+liteLLM provides `callbacks`, making it easy for you to log data depending on the status of your responses.
+
+:::info
+We want to learn how we can make the callbacks better! Meet the [founders](https://calendly.com/d/4mp-gd3-k5k/berriai-1-1-onboarding-litellm-hosted-version) or
+join our [discord](https://discord.gg/wuPM9dRgDw)
+:::
+
+### Using Callbacks
+
+First, sign up to get a public key on the [Lunary dashboard](https://lunary.ai).
+
+Use just 2 lines of code, to instantly log your responses **across all providers** with lunary:
+
+```python
+litellm.success_callback = ["lunary"]
+litellm.failure_callback = ["lunary"]
+```
+
+Complete code
+
+```python
+from litellm import completion
+
+## set env variables
+os.environ["LUNARY_PUBLIC_KEY"] = "your-lunary-public-key"
+
+os.environ["OPENAI_API_KEY"] = ""
+
+# set callbacks
+litellm.success_callback = ["lunary"]
+litellm.failure_callback = ["lunary"]
+
+#openai call
+response = completion(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}],
+ user="ishaan_litellm"
+)
+```
+
+## Templates
+
+You can use Lunary to manage prompt templates and use them across all your LLM providers.
+
+Make sure to have `lunary` installed:
+
+```bash
+pip install lunary
+```
+
+Then, use the following code to pull templates into Lunary:
+
+```python
+from litellm import completion
+from lunary
+
+template = lunary.render_template("template-slug", {
+ "name": "John", # Inject variables
+})
+
+litellm.success_callback = ["lunary"]
+
+result = completion(**template)
+```
+
+## Support & Talk to Founders
+
+- Meet the Lunary team via [email](mailto:hello@lunary.ai).
+- [Schedule Demo 👋](https://calendly.com/d/4mp-gd3-k5k/berriai-1-1-onboarding-litellm-hosted-version)
+- [Community Discord 💭](https://discord.gg/wuPM9dRgDw)
+- Our numbers 📞 +1 (770) 8783-106 / +1 (412) 618-6238
+- Our emails ✉️ ishaan@berri.ai / krrish@berri.ai
diff --git a/docs/my-website/docs/providers/anthropic.md b/docs/my-website/docs/providers/anthropic.md
index 792e57fc0..22e9eb559 100644
--- a/docs/my-website/docs/providers/anthropic.md
+++ b/docs/my-website/docs/providers/anthropic.md
@@ -175,6 +175,15 @@ print(response)
## Usage - Function Calling
+:::info
+
+Claude returns it's output as an XML Tree. [Here is how we translate it](https://github.com/BerriAI/litellm/blob/49642a5b00a53b1babc1a753426a8afcac85dbbe/litellm/llms/prompt_templates/factory.py#L734).
+
+You can see the raw response via `response._hidden_params["original_response"]`.
+
+Claude hallucinates, e.g. returning the list param `value` as `\n- apple
\n- banana
\n` or `\n\n- apple
\n- banana
\n
\n`.
+:::
+
```python
from litellm import completion
diff --git a/docs/my-website/docs/providers/bedrock.md b/docs/my-website/docs/providers/bedrock.md
index 6da788d97..590ffc423 100644
--- a/docs/my-website/docs/providers/bedrock.md
+++ b/docs/my-website/docs/providers/bedrock.md
@@ -146,6 +146,15 @@ print(response)
## Usage - Function Calling
+:::info
+
+Claude returns it's output as an XML Tree. [Here is how we translate it](https://github.com/BerriAI/litellm/blob/49642a5b00a53b1babc1a753426a8afcac85dbbe/litellm/llms/prompt_templates/factory.py#L734).
+
+You can see the raw response via `response._hidden_params["original_response"]`.
+
+Claude hallucinates, e.g. returning the list param `value` as `\n- apple
\n- banana
\n` or `\n\n- apple
\n- banana
\n
\n`.
+:::
+
```python
from litellm import completion
diff --git a/docs/my-website/docs/proxy/users.md b/docs/my-website/docs/proxy/users.md
index 12cbda9d0..478d63f84 100644
--- a/docs/my-website/docs/proxy/users.md
+++ b/docs/my-website/docs/proxy/users.md
@@ -176,8 +176,7 @@ general_settings:
master_key: sk-1234
litellm_settings:
- max_budget: 10 # global budget for proxy
- max_user_budget: 0.0001 # budget for 'user' passed to /chat/completions
+ max_end_user_budget: 0.0001 # budget for 'user' passed to /chat/completions
```
2. Make a /chat/completions call, pass 'user' - First call Works
diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js
index 2cfef4687..63f3fbb02 100644
--- a/docs/my-website/sidebars.js
+++ b/docs/my-website/sidebars.js
@@ -22,20 +22,20 @@ const sidebars = {
type: "category",
label: "💥 OpenAI Proxy Server",
link: {
- type: 'generated-index',
- title: '💥 OpenAI Proxy Server',
+ type: "generated-index",
+ title: "💥 OpenAI Proxy Server",
description: `Proxy Server to call 100+ LLMs in a unified interface & track spend, set budgets per virtual key/user`,
- slug: '/simple_proxy',
+ slug: "/simple_proxy",
},
items: [
- "proxy/quick_start",
+ "proxy/quick_start",
"proxy/deploy",
"proxy/prod",
"proxy/configs",
{
- type: 'link',
- label: '📖 All Endpoints',
- href: 'https://litellm-api.up.railway.app/',
+ type: "link",
+ label: "📖 All Endpoints",
+ href: "https://litellm-api.up.railway.app/",
},
"proxy/enterprise",
"proxy/user_keys",
@@ -46,12 +46,9 @@ const sidebars = {
"proxy/cost_tracking",
"proxy/token_auth",
{
- "type": "category",
- "label": "🔥 Load Balancing",
- "items": [
- "proxy/load_balancing",
- "proxy/reliability",
- ]
+ type: "category",
+ label: "🔥 Load Balancing",
+ items: ["proxy/load_balancing", "proxy/reliability"],
},
"proxy/model_management",
"proxy/health",
@@ -60,13 +57,9 @@ const sidebars = {
"proxy/prompt_injection",
"proxy/caching",
{
- "type": "category",
- "label": "Logging, Alerting",
- "items": [
- "proxy/logging",
- "proxy/alerting",
- "proxy/streaming_logging",
- ]
+ type: "category",
+ label: "Logging, Alerting",
+ items: ["proxy/logging", "proxy/alerting", "proxy/streaming_logging"],
},
"proxy/grafana_metrics",
"proxy/call_hooks",
@@ -78,20 +71,20 @@ const sidebars = {
type: "category",
label: "Completion()",
link: {
- type: 'generated-index',
- title: 'Completion()',
- description: 'Details on the completion() function',
- slug: '/completion',
+ type: "generated-index",
+ title: "Completion()",
+ description: "Details on the completion() function",
+ slug: "/completion",
},
items: [
- "completion/input",
+ "completion/input",
"completion/prompt_formatting",
- "completion/output",
+ "completion/output",
"exception_mapping",
- "completion/stream",
+ "completion/stream",
"completion/message_trimming",
"completion/function_call",
- "completion/model_alias",
+ "completion/model_alias",
"completion/batching",
"completion/mock_requests",
"completion/reliable_completions",
@@ -101,7 +94,7 @@ const sidebars = {
type: "category",
label: "Embedding(), Moderation(), Image Generation(), Audio Transcriptions()",
items: [
- "embedding/supported_embedding",
+ "embedding/supported_embedding",
"embedding/async_embedding",
"embedding/moderation",
"image_generation",
@@ -112,10 +105,11 @@ const sidebars = {
type: "category",
label: "Supported Models & Providers",
link: {
- type: 'generated-index',
- title: 'Providers',
- description: 'Learn how to deploy + call models from different providers on LiteLLM',
- slug: '/providers',
+ type: "generated-index",
+ title: "Providers",
+ description:
+ "Learn how to deploy + call models from different providers on LiteLLM",
+ slug: "/providers",
},
items: [
"providers/openai",
@@ -150,7 +144,7 @@ const sidebars = {
"providers/openrouter",
"providers/custom_openai_proxy",
"providers/petals",
- ]
+ ],
},
"proxy/custom_pricing",
"routing",
@@ -165,9 +159,10 @@ const sidebars = {
type: "category",
label: "Logging & Observability",
items: [
- 'debugging/local_debugging',
+ "debugging/local_debugging",
"observability/callbacks",
"observability/custom_callback",
+ "observability/lunary_integration",
"observability/langfuse_integration",
"observability/sentry",
"observability/promptlayer_integration",
@@ -176,7 +171,6 @@ const sidebars = {
"observability/slack_integration",
"observability/traceloop_integration",
"observability/athina_integration",
- "observability/llmonitor_integration",
"observability/helicone_integration",
"observability/supabase_integration",
`observability/telemetry`,
@@ -184,19 +178,19 @@ const sidebars = {
},
"caching/redis_cache",
{
- type: 'category',
- label: 'Tutorials',
+ type: "category",
+ label: "Tutorials",
items: [
'tutorials/azure_openai',
'tutorials/instructor',
'tutorials/oobabooga',
"tutorials/gradio_integration",
- 'tutorials/huggingface_codellama',
- 'tutorials/huggingface_tutorial',
- 'tutorials/TogetherAI_liteLLM',
- 'tutorials/finetuned_chat_gpt',
- 'tutorials/sagemaker_llms',
- 'tutorials/text_completion',
+ "tutorials/huggingface_codellama",
+ "tutorials/huggingface_tutorial",
+ "tutorials/TogetherAI_liteLLM",
+ "tutorials/finetuned_chat_gpt",
+ "tutorials/sagemaker_llms",
+ "tutorials/text_completion",
"tutorials/first_playground",
"tutorials/model_fallbacks",
],
@@ -204,40 +198,39 @@ const sidebars = {
{
type: "category",
label: "LangChain, LlamaIndex Integration",
- items: [
- "langchain/langchain"
- ],
+ items: ["langchain/langchain"],
},
{
- type: 'category',
- label: 'Extras',
+ type: "category",
+ label: "Extras",
items: [
- 'extras/contributing',
+ "extras/contributing",
"proxy_server",
{
type: "category",
label: "❤️ 🚅 Projects built on LiteLLM",
link: {
- type: 'generated-index',
- title: 'Projects built on LiteLLM',
- description: 'Learn how to deploy + call models from different providers on LiteLLM',
- slug: '/project',
+ type: "generated-index",
+ title: "Projects built on LiteLLM",
+ description:
+ "Learn how to deploy + call models from different providers on LiteLLM",
+ slug: "/project",
},
items: [
"projects/Docq.AI",
"projects/OpenInterpreter",
"projects/FastREPL",
"projects/PROMPTMETHEUS",
- "projects/Codium PR Agent",
+ "projects/Codium PR Agent",
"projects/Prompt2Model",
"projects/SalesGPT",
- "projects/Quivr",
- "projects/Langstream",
- "projects/Otter",
- "projects/GPT Migrate",
- "projects/YiVal",
- "projects/LiteLLM Proxy",
- ]
+ "projects/Quivr",
+ "projects/Langstream",
+ "projects/Otter",
+ "projects/GPT Migrate",
+ "projects/YiVal",
+ "projects/LiteLLM Proxy",
+ ],
},
],
},
diff --git a/docs/my-website/src/pages/index.md b/docs/my-website/src/pages/index.md
index d7ed14019..126e83688 100644
--- a/docs/my-website/src/pages/index.md
+++ b/docs/my-website/src/pages/index.md
@@ -5,7 +5,6 @@ import TabItem from '@theme/TabItem';
https://github.com/BerriAI/litellm
-
## **Call 100+ LLMs using the same Input/Output Format**
- Translate inputs to provider's `completion`, `embedding`, and `image_generation` endpoints
@@ -13,7 +12,8 @@ https://github.com/BerriAI/litellm
- Retry/fallback logic across multiple deployments (e.g. Azure/OpenAI) - [Router](https://docs.litellm.ai/docs/routing)
- Track spend & set budgets per project [OpenAI Proxy Server](https://docs.litellm.ai/docs/simple_proxy)
-## Basic usage
+## Basic usage
+
@@ -21,6 +21,7 @@ https://github.com/BerriAI/litellm
```shell
pip install litellm
```
+
@@ -32,7 +33,7 @@ import os
os.environ["OPENAI_API_KEY"] = "your-api-key"
response = completion(
- model="gpt-3.5-turbo",
+ model="gpt-3.5-turbo",
messages=[{ "content": "Hello, how are you?","role": "user"}]
)
```
@@ -48,7 +49,7 @@ import os
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
response = completion(
- model="claude-2",
+ model="claude-2",
messages=[{ "content": "Hello, how are you?","role": "user"}]
)
```
@@ -66,7 +67,7 @@ os.environ["VERTEX_PROJECT"] = "hardy-device-386718"
os.environ["VERTEX_LOCATION"] = "us-central1"
response = completion(
- model="chat-bison",
+ model="chat-bison",
messages=[{ "content": "Hello, how are you?","role": "user"}]
)
```
@@ -76,15 +77,15 @@ response = completion(
```python
-from litellm import completion
+from litellm import completion
import os
-os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
+os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
# e.g. Call 'WizardLM/WizardCoder-Python-34B-V1.0' hosted on HF Inference endpoints
response = completion(
model="huggingface/WizardLM/WizardCoder-Python-34B-V1.0",
- messages=[{ "content": "Hello, how are you?","role": "user"}],
+ messages=[{ "content": "Hello, how are you?","role": "user"}],
api_base="https://my-endpoint.huggingface.cloud"
)
@@ -106,25 +107,25 @@ os.environ["AZURE_API_VERSION"] = ""
# azure call
response = completion(
- "azure/",
+ "azure/",
messages = [{ "content": "Hello, how are you?","role": "user"}]
)
```
-
```python
from litellm import completion
response = completion(
- model="ollama/llama2",
- messages = [{ "content": "Hello, how are you?","role": "user"}],
+ model="ollama/llama2",
+ messages = [{ "content": "Hello, how are you?","role": "user"}],
api_base="http://localhost:11434"
)
```
+
@@ -133,19 +134,21 @@ from litellm import completion
import os
## set ENV variables
-os.environ["OPENROUTER_API_KEY"] = "openrouter_api_key"
+os.environ["OPENROUTER_API_KEY"] = "openrouter_api_key"
response = completion(
- model="openrouter/google/palm-2-chat-bison",
+ model="openrouter/google/palm-2-chat-bison",
messages = [{ "content": "Hello, how are you?","role": "user"}],
)
```
+
## Streaming
-Set `stream=True` in the `completion` args.
+
+Set `stream=True` in the `completion` args.
@@ -157,7 +160,7 @@ import os
os.environ["OPENAI_API_KEY"] = "your-api-key"
response = completion(
- model="gpt-3.5-turbo",
+ model="gpt-3.5-turbo",
messages=[{ "content": "Hello, how are you?","role": "user"}],
stream=True,
)
@@ -174,7 +177,7 @@ import os
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
response = completion(
- model="claude-2",
+ model="claude-2",
messages=[{ "content": "Hello, how are you?","role": "user"}],
stream=True,
)
@@ -193,7 +196,7 @@ os.environ["VERTEX_PROJECT"] = "hardy-device-386718"
os.environ["VERTEX_LOCATION"] = "us-central1"
response = completion(
- model="chat-bison",
+ model="chat-bison",
messages=[{ "content": "Hello, how are you?","role": "user"}],
stream=True,
)
@@ -204,15 +207,15 @@ response = completion(
```python
-from litellm import completion
+from litellm import completion
import os
-os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
+os.environ["HUGGINGFACE_API_KEY"] = "huggingface_api_key"
# e.g. Call 'WizardLM/WizardCoder-Python-34B-V1.0' hosted on HF Inference endpoints
response = completion(
model="huggingface/WizardLM/WizardCoder-Python-34B-V1.0",
- messages=[{ "content": "Hello, how are you?","role": "user"}],
+ messages=[{ "content": "Hello, how are you?","role": "user"}],
api_base="https://my-endpoint.huggingface.cloud",
stream=True,
)
@@ -235,7 +238,7 @@ os.environ["AZURE_API_VERSION"] = ""
# azure call
response = completion(
- "azure/",
+ "azure/",
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True,
)
@@ -243,19 +246,19 @@ response = completion(
-
```python
from litellm import completion
response = completion(
- model="ollama/llama2",
- messages = [{ "content": "Hello, how are you?","role": "user"}],
+ model="ollama/llama2",
+ messages = [{ "content": "Hello, how are you?","role": "user"}],
api_base="http://localhost:11434",
stream=True,
)
```
+
@@ -264,60 +267,64 @@ from litellm import completion
import os
## set ENV variables
-os.environ["OPENROUTER_API_KEY"] = "openrouter_api_key"
+os.environ["OPENROUTER_API_KEY"] = "openrouter_api_key"
response = completion(
- model="openrouter/google/palm-2-chat-bison",
+ model="openrouter/google/palm-2-chat-bison",
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True,
)
```
+
-## Exception handling
+## Exception handling
-LiteLLM maps exceptions across all supported providers to the OpenAI exceptions. All our exceptions inherit from OpenAI's exception types, so any error-handling you have for that, should work out of the box with LiteLLM.
+LiteLLM maps exceptions across all supported providers to the OpenAI exceptions. All our exceptions inherit from OpenAI's exception types, so any error-handling you have for that, should work out of the box with LiteLLM.
-```python
+```python
from openai.error import OpenAIError
from litellm import completion
os.environ["ANTHROPIC_API_KEY"] = "bad-key"
-try:
- # some code
+try:
+ # some code
completion(model="claude-instant-1", messages=[{"role": "user", "content": "Hey, how's it going?"}])
except OpenAIError as e:
print(e)
```
## Logging Observability - Log LLM Input/Output ([Docs](https://docs.litellm.ai/docs/observability/callbacks))
-LiteLLM exposes pre defined callbacks to send data to Langfuse, LLMonitor, Helicone, Promptlayer, Traceloop, Slack
+
+LiteLLM exposes pre defined callbacks to send data to Lunary, Langfuse, Helicone, Promptlayer, Traceloop, Slack
+
```python
from litellm import completion
## set env variables for logging tools
os.environ["LANGFUSE_PUBLIC_KEY"] = ""
os.environ["LANGFUSE_SECRET_KEY"] = ""
-os.environ["LLMONITOR_APP_ID"] = "your-llmonitor-app-id"
+os.environ["LUNARY_PUBLIC_KEY"] = "your-lunary-public-key"
os.environ["OPENAI_API_KEY"]
# set callbacks
-litellm.success_callback = ["langfuse", "llmonitor"] # log input/output to langfuse, llmonitor, supabase
+litellm.success_callback = ["langfuse", "lunary"] # log input/output to lunary, langfuse, supabase
#openai call
response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
```
## Track Costs, Usage, Latency for streaming
+
Use a callback function for this - more info on custom callbacks: https://docs.litellm.ai/docs/observability/custom_callback
```python
import litellm
-# track_cost_callback
+# track_cost_callback
def track_cost_callback(
kwargs, # kwargs to completion
completion_response, # response from completion
@@ -328,7 +335,7 @@ def track_cost_callback(
print("streaming response_cost", response_cost)
except:
pass
-# set callback
+# set callback
litellm.success_callback = [track_cost_callback] # set custom callback function
# litellm.completion() call
@@ -346,11 +353,12 @@ response = completion(
## OpenAI Proxy
-Track spend across multiple projects/people
+Track spend across multiple projects/people

-The proxy provides:
+The proxy provides:
+
1. [Hooks for auth](https://docs.litellm.ai/docs/proxy/virtual_keys#custom-auth)
2. [Hooks for logging](https://docs.litellm.ai/docs/proxy/logging#step-1---create-your-custom-litellm-callback-class)
3. [Cost tracking](https://docs.litellm.ai/docs/proxy/virtual_keys#tracking-spend)
@@ -358,13 +366,14 @@ The proxy provides:
### 📖 Proxy Endpoints - [Swagger Docs](https://litellm-api.up.railway.app/)
-### Quick Start Proxy - CLI
+### Quick Start Proxy - CLI
```shell
pip install 'litellm[proxy]'
```
#### Step 1: Start litellm proxy
+
```shell
$ litellm --model huggingface/bigcode/starcoder
@@ -372,6 +381,7 @@ $ litellm --model huggingface/bigcode/starcoder
```
#### Step 2: Make ChatCompletions Request to Proxy
+
```python
import openai # openai v1.0.0+
client = openai.OpenAI(api_key="anything",base_url="http://0.0.0.0:8000") # set proxy to base_url
@@ -387,6 +397,7 @@ print(response)
```
## More details
-* [exception mapping](./exception_mapping.md)
-* [retries + model fallbacks for completion()](./completion/reliable_completions.md)
-* [proxy virtual keys & spend management](./tutorials/fallbacks.md)
\ No newline at end of file
+
+- [exception mapping](./exception_mapping.md)
+- [retries + model fallbacks for completion()](./completion/reliable_completions.md)
+- [proxy virtual keys & spend management](./tutorials/fallbacks.md)
diff --git a/docs/my-website/src/pages/observability/callbacks.md b/docs/my-website/src/pages/observability/callbacks.md
index be27d76da..2ec288d5e 100644
--- a/docs/my-website/src/pages/observability/callbacks.md
+++ b/docs/my-website/src/pages/observability/callbacks.md
@@ -6,7 +6,7 @@ liteLLM provides `success_callbacks` and `failure_callbacks`, making it easy for
liteLLM supports:
-- [LLMonitor](https://llmonitor.com/docs)
+- [Lunary](https://lunary.ai/docs)
- [Helicone](https://docs.helicone.ai/introduction)
- [Sentry](https://docs.sentry.io/platforms/python/)
- [PostHog](https://posthog.com/docs/libraries/python)
@@ -18,8 +18,8 @@ liteLLM supports:
from litellm import completion
# set callbacks
-litellm.success_callback=["posthog", "helicone", "llmonitor"]
-litellm.failure_callback=["sentry", "llmonitor"]
+litellm.success_callback=["posthog", "helicone", "lunary"]
+litellm.failure_callback=["sentry", "lunary"]
## set env variables
os.environ['SENTRY_DSN'], os.environ['SENTRY_API_TRACE_RATE']= ""
diff --git a/litellm/__init__.py b/litellm/__init__.py
index ea5844320..9e7c26186 100644
--- a/litellm/__init__.py
+++ b/litellm/__init__.py
@@ -174,6 +174,7 @@ upperbound_key_generate_params: Optional[Dict] = None
default_user_params: Optional[Dict] = None
default_team_settings: Optional[List] = None
max_user_budget: Optional[float] = None
+max_end_user_budget: Optional[float] = None
#### RELIABILITY ####
request_timeout: Optional[float] = 6000
num_retries: Optional[int] = None # per model endpoint
diff --git a/litellm/integrations/llmonitor.py b/litellm/integrations/llmonitor.py
deleted file mode 100644
index ff4c3990f..000000000
--- a/litellm/integrations/llmonitor.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#### What this does ####
-# On success + failure, log events to aispend.io
-import datetime
-import traceback
-import dotenv
-import os
-import requests
-
-dotenv.load_dotenv() # Loading env variables using dotenv
-
-
-# convert to {completion: xx, tokens: xx}
-def parse_usage(usage):
- return {
- "completion": usage["completion_tokens"] if "completion_tokens" in usage else 0,
- "prompt": usage["prompt_tokens"] if "prompt_tokens" in usage else 0,
- }
-
-
-def parse_messages(input):
- if input is None:
- return None
-
- def clean_message(message):
- # if is strin, return as is
- if isinstance(message, str):
- return message
-
- if "message" in message:
- return clean_message(message["message"])
- text = message["content"]
- if text == None:
- text = message.get("function_call", None)
-
- return {
- "role": message["role"],
- "text": text,
- }
-
- if isinstance(input, list):
- if len(input) == 1:
- return clean_message(input[0])
- else:
- return [clean_message(msg) for msg in input]
- else:
- return clean_message(input)
-
-
-class LLMonitorLogger:
- # Class variables or attributes
- def __init__(self):
- # Instance variables
- self.api_url = os.getenv("LLMONITOR_API_URL") or "https://app.llmonitor.com"
- self.app_id = os.getenv("LLMONITOR_APP_ID")
-
- def log_event(
- self,
- type,
- event,
- run_id,
- model,
- print_verbose,
- input=None,
- user_id=None,
- response_obj=None,
- start_time=datetime.datetime.now(),
- end_time=datetime.datetime.now(),
- error=None,
- ):
- # Method definition
- try:
- print_verbose(f"LLMonitor Logging - Logging request for model {model}")
-
- if response_obj:
- usage = (
- parse_usage(response_obj["usage"])
- if "usage" in response_obj
- else None
- )
- output = response_obj["choices"] if "choices" in response_obj else None
- else:
- usage = None
- output = None
-
- if error:
- error_obj = {"stack": error}
-
- else:
- error_obj = None
-
- data = [
- {
- "type": type,
- "name": model,
- "runId": run_id,
- "app": self.app_id,
- "event": "start",
- "timestamp": start_time.isoformat(),
- "userId": user_id,
- "input": parse_messages(input),
- },
- {
- "type": type,
- "runId": run_id,
- "app": self.app_id,
- "event": event,
- "error": error_obj,
- "timestamp": end_time.isoformat(),
- "userId": user_id,
- "output": parse_messages(output),
- "tokensUsage": usage,
- },
- ]
-
- print_verbose(f"LLMonitor Logging - final data object: {data}")
-
- response = requests.post(
- self.api_url + "/api/report",
- headers={"Content-Type": "application/json"},
- json={"events": data},
- )
-
- print_verbose(f"LLMonitor Logging - response: {response}")
- except:
- # traceback.print_exc()
- print_verbose(f"LLMonitor Logging Error - {traceback.format_exc()}")
- pass
diff --git a/litellm/integrations/lunary.py b/litellm/integrations/lunary.py
new file mode 100644
index 000000000..79491dfc6
--- /dev/null
+++ b/litellm/integrations/lunary.py
@@ -0,0 +1,157 @@
+#### What this does ####
+# On success + failure, log events to lunary.ai
+from datetime import datetime, timezone
+import traceback
+import dotenv
+import importlib
+from pkg_resources import parse_version
+import sys
+
+dotenv.load_dotenv()
+
+# convert to {completion: xx, tokens: xx}
+def parse_usage(usage):
+ return {
+ "completion": usage["completion_tokens"] if "completion_tokens" in usage else 0,
+ "prompt": usage["prompt_tokens"] if "prompt_tokens" in usage else 0,
+ }
+
+def parse_messages(input):
+ if input is None:
+ return None
+
+ def clean_message(message):
+ # if is strin, return as is
+ if isinstance(message, str):
+ return message
+
+ if "message" in message:
+ return clean_message(message["message"])
+
+
+ serialized = {
+ "role": message.get("role"),
+ "content": message.get("content"),
+ }
+
+ # Only add tool_calls and function_call to res if they are set
+ if message.get("tool_calls"):
+ serialized["tool_calls"] = message.get("tool_calls")
+ if message.get("function_call"):
+ serialized["function_call"] = message.get("function_call")
+
+ return serialized
+
+ if isinstance(input, list):
+ if len(input) == 1:
+ return clean_message(input[0])
+ else:
+ return [clean_message(msg) for msg in input]
+ else:
+ return clean_message(input)
+
+
+class LunaryLogger:
+ # Class variables or attributes
+ def __init__(self):
+ try:
+ import lunary
+ version = importlib.metadata.version("lunary")
+ # if version < 0.1.43 then raise ImportError
+ if parse_version(version) < parse_version("0.1.43"):
+ print("Lunary version outdated. Required: > 0.1.43. Upgrade via 'pip install lunary --upgrade'")
+ raise ImportError
+
+ self.lunary_client = lunary
+ except ImportError:
+ print("Lunary not installed. Please install it using 'pip install lunary'")
+ raise ImportError
+
+ def log_event(
+ self,
+ kwargs,
+ type,
+ event,
+ run_id,
+ model,
+ print_verbose,
+ extra=None,
+ input=None,
+ user_id=None,
+ response_obj=None,
+ start_time=datetime.now(timezone.utc),
+ end_time=datetime.now(timezone.utc),
+ error=None,
+ ):
+ # Method definition
+ try:
+ print_verbose(f"Lunary Logging - Logging request for model {model}")
+
+ litellm_params = kwargs.get("litellm_params", {})
+ metadata = (
+ litellm_params.get("metadata", {}) or {}
+ )
+
+ tags = litellm_params.pop("tags", None) or []
+
+ if extra:
+ extra.pop("extra_body", None)
+ extra.pop("user", None)
+ template_id = extra.pop("extra_headers", {}).get("Template-Id", None)
+
+ # keep only serializable types
+ for param, value in extra.items():
+ if not isinstance(value, (str, int, bool, float)):
+ try:
+ extra[param] = str(value)
+ except:
+ pass
+
+ if response_obj:
+ usage = (
+ parse_usage(response_obj["usage"])
+ if "usage" in response_obj
+ else None
+ )
+
+ output = response_obj["choices"] if "choices" in response_obj else None
+
+ else:
+ usage = None
+ output = None
+
+ if error:
+ error_obj = {"stack": error}
+ else:
+ error_obj = None
+
+ self.lunary_client.track_event(
+ type,
+ "start",
+ run_id,
+ user_id=user_id,
+ name=model,
+ input=parse_messages(input),
+ timestamp=start_time.astimezone(timezone.utc).isoformat(),
+ template_id=template_id,
+ metadata=metadata,
+ runtime="litellm",
+ tags=tags,
+ extra=extra,
+ )
+
+ self.lunary_client.track_event(
+ type,
+ event,
+ run_id,
+ timestamp=end_time.astimezone(timezone.utc).isoformat(),
+ runtime="litellm",
+ error=error_obj,
+ output=parse_messages(output),
+ token_usage=usage
+ )
+
+ except:
+ # traceback.print_exc()
+ print_verbose(f"Lunary Logging Error - {traceback.format_exc()}")
+ pass
diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py
index b6200a1a4..864ad658f 100644
--- a/litellm/llms/anthropic.py
+++ b/litellm/llms/anthropic.py
@@ -3,7 +3,7 @@ import json
from enum import Enum
import requests, copy
import time, uuid
-from typing import Callable, Optional
+from typing import Callable, Optional, List
from litellm.utils import ModelResponse, Usage, map_finish_reason, CustomStreamWrapper
import litellm
from .prompt_templates.factory import (
@@ -118,6 +118,7 @@ def completion(
):
headers = validate_environment(api_key, headers)
_is_function_call = False
+ json_schemas: dict = {}
messages = copy.deepcopy(messages)
optional_params = copy.deepcopy(optional_params)
if model in custom_prompt_dict:
@@ -161,6 +162,10 @@ def completion(
## Handle Tool Calling
if "tools" in optional_params:
_is_function_call = True
+ for tool in optional_params["tools"]:
+ json_schemas[tool["function"]["name"]] = tool["function"].get(
+ "parameters", None
+ )
tool_calling_system_prompt = construct_tool_use_system_prompt(
tools=optional_params["tools"]
)
@@ -248,7 +253,12 @@ def completion(
0
].strip()
function_arguments_str = f"{function_arguments_str}"
- function_arguments = parse_xml_params(function_arguments_str)
+ function_arguments = parse_xml_params(
+ function_arguments_str,
+ json_schema=json_schemas.get(
+ function_name, None
+ ), # check if we have a json schema for this function name
+ )
_message = litellm.Message(
tool_calls=[
{
@@ -263,6 +273,9 @@ def completion(
content=None,
)
model_response.choices[0].message = _message # type: ignore
+ model_response._hidden_params["original_response"] = (
+ text_content # allow user to access raw anthropic tool calling response
+ )
else:
model_response.choices[0].message.content = text_content # type: ignore
model_response.choices[0].finish_reason = map_finish_reason(
diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py
index 8177167a5..ae78d3ec4 100644
--- a/litellm/llms/bedrock.py
+++ b/litellm/llms/bedrock.py
@@ -691,6 +691,7 @@ def completion(
):
exception_mapping_worked = False
_is_function_call = False
+ json_schemas: dict = {}
try:
# pop aws_secret_access_key, aws_access_key_id, aws_region_name from kwargs, since completion calls fail with them
aws_secret_access_key = optional_params.pop("aws_secret_access_key", None)
@@ -757,6 +758,10 @@ def completion(
## Handle Tool Calling
if "tools" in inference_params:
_is_function_call = True
+ for tool in inference_params["tools"]:
+ json_schemas[tool["function"]["name"]] = tool["function"].get(
+ "parameters", None
+ )
tool_calling_system_prompt = construct_tool_use_system_prompt(
tools=inference_params["tools"]
)
@@ -943,7 +948,12 @@ def completion(
function_arguments_str = (
f"{function_arguments_str}"
)
- function_arguments = parse_xml_params(function_arguments_str)
+ function_arguments = parse_xml_params(
+ function_arguments_str,
+ json_schema=json_schemas.get(
+ function_name, None
+ ), # check if we have a json schema for this function name)
+ )
_message = litellm.Message(
tool_calls=[
{
@@ -958,6 +968,9 @@ def completion(
content=None,
)
model_response.choices[0].message = _message # type: ignore
+ model_response._hidden_params["original_response"] = (
+ outputText # allow user to access raw anthropic tool calling response
+ )
if _is_function_call == True and stream is not None and stream == True:
print_verbose(
f"INSIDE BEDROCK STREAMING TOOL CALLING CONDITION BLOCK"
diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py
index 90bc64161..4492423f4 100644
--- a/litellm/llms/prompt_templates/factory.py
+++ b/litellm/llms/prompt_templates/factory.py
@@ -731,18 +731,53 @@ def contains_tag(tag: str, string: str) -> bool:
return bool(re.search(f"<{tag}>(.+?){tag}>", string, re.DOTALL))
-def parse_xml_params(xml_content):
+def parse_xml_params(xml_content, json_schema: Optional[dict] = None):
+ """
+ Compare the xml output to the json schema
+
+ check if a value is a list - if so, get it's child elements
+ """
root = ET.fromstring(xml_content)
params = {}
- for child in root.findall(".//parameters/*"):
- try:
- # Attempt to decode the element's text as JSON
- params[child.tag] = json.loads(child.text)
- except json.JSONDecodeError:
- # If JSON decoding fails, use the original text
- params[child.tag] = child.text
+
+ if json_schema is not None: # check if we have a json schema for this function call
+ # iterate over all properties in the schema
+ for prop in json_schema["properties"]:
+ # If property is an array, get the nested items
+ _element = root.find(f"parameters/{prop}")
+ if json_schema["properties"][prop]["type"] == "array":
+ items = []
+ if _element is not None:
+ for value in _element:
+ try:
+ if value.text is not None:
+ _value = json.loads(value.text)
+ else:
+ continue
+ except json.JSONDecodeError:
+ _value = value.text
+ items.append(_value)
+ params[prop] = items
+ # If property is not an array, append the value directly
+ elif _element is not None and _element.text is not None:
+ try:
+ _value = json.loads(_element.text)
+ except json.JSONDecodeError:
+ _value = _element.text
+ params[prop] = _value
+ else:
+ for child in root.findall(".//parameters/*"):
+ if child is not None and child.text is not None:
+ try:
+ # Attempt to decode the element's text as JSON
+ params[child.tag] = json.loads(child.text) # type: ignore
+ except json.JSONDecodeError:
+ # If JSON decoding fails, use the original text
+ params[child.tag] = child.text # type: ignore
+
return params
+
###
diff --git a/litellm/main.py b/litellm/main.py
index 665784f1d..1fcf0d5d3 100644
--- a/litellm/main.py
+++ b/litellm/main.py
@@ -2952,7 +2952,26 @@ async def atext_completion(*args, **kwargs):
model=model,
)
else:
- return response
+ transformed_logprobs = None
+ # only supported for TGI models
+ try:
+ raw_response = response._hidden_params.get("original_response", None)
+ transformed_logprobs = litellm.utils.transform_logprobs(raw_response)
+ except Exception as e:
+ print_verbose(f"LiteLLM non blocking exception: {e}")
+ text_completion_response = TextCompletionResponse()
+ text_completion_response["id"] = response.get("id", None)
+ text_completion_response["object"] = "text_completion"
+ text_completion_response["created"] = response.get("created", None)
+ text_completion_response["model"] = response.get("model", None)
+ text_choices = TextChoices()
+ text_choices["text"] = response["choices"][0]["message"]["content"]
+ text_choices["index"] = response["choices"][0]["index"]
+ text_choices["logprobs"] = transformed_logprobs
+ text_choices["finish_reason"] = response["choices"][0]["finish_reason"]
+ text_completion_response["choices"] = [text_choices]
+ text_completion_response["usage"] = response.get("usage", None)
+ return text_completion_response
except Exception as e:
custom_llm_provider = custom_llm_provider or "openai"
raise exception_type(
@@ -3165,6 +3184,7 @@ def text_completion(
transformed_logprobs = litellm.utils.transform_logprobs(raw_response)
except Exception as e:
print_verbose(f"LiteLLM non blocking exception: {e}")
+
text_completion_response["id"] = response.get("id", None)
text_completion_response["object"] = "text_completion"
text_completion_response["created"] = response.get("created", None)
@@ -3176,6 +3196,7 @@ def text_completion(
text_choices["finish_reason"] = response["choices"][0]["finish_reason"]
text_completion_response["choices"] = [text_choices]
text_completion_response["usage"] = response.get("usage", None)
+
return text_completion_response
diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json
index d24d5a8ee..d3eb93d72 100644
--- a/litellm/model_prices_and_context_window_backup.json
+++ b/litellm/model_prices_and_context_window_backup.json
@@ -1503,7 +1503,7 @@
"litellm_provider": "bedrock",
"mode": "chat"
},
- "mistral.mixtral-8x7b-instruct": {
+ "mistral.mixtral-8x7b-instruct-v0:1": {
"max_tokens": 8191,
"max_input_tokens": 32000,
"max_output_tokens": 8191,
@@ -1512,7 +1512,7 @@
"litellm_provider": "bedrock",
"mode": "chat"
},
- "bedrock/us-west-2/mistral.mixtral-8x7b-instruct": {
+ "bedrock/us-west-2/mistral.mixtral-8x7b-instruct-v0:1": {
"max_tokens": 8191,
"max_input_tokens": 32000,
"max_output_tokens": 8191,
diff --git a/litellm/proxy/_experimental/out/404.html b/litellm/proxy/_experimental/out/404.html
index 4f087e491..e69de29bb 100644
--- a/litellm/proxy/_experimental/out/404.html
+++ b/litellm/proxy/_experimental/out/404.html
@@ -1 +0,0 @@
-
404: This page could not be found.🚅 LiteLLM404
This page could not be found.
\ No newline at end of file
diff --git a/litellm/proxy/_experimental/out/_next/static/DptMjzo5xd96cx0b56k4u/_buildManifest.js b/litellm/proxy/_experimental/out/_next/static/DptMjzo5xd96cx0b56k4u/_buildManifest.js
new file mode 100644
index 000000000..f779caa02
--- /dev/null
+++ b/litellm/proxy/_experimental/out/_next/static/DptMjzo5xd96cx0b56k4u/_buildManifest.js
@@ -0,0 +1 @@
+self.__BUILD_MANIFEST={__rewrites:{afterFiles:[],beforeFiles:[],fallback:[]},"/_error":["static/chunks/pages/_error-d6107f1aac0c574c.js"],sortedPages:["/_app","/_error"]},self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB();
\ No newline at end of file
diff --git a/litellm/proxy/_experimental/out/_next/static/DptMjzo5xd96cx0b56k4u/_ssgManifest.js b/litellm/proxy/_experimental/out/_next/static/DptMjzo5xd96cx0b56k4u/_ssgManifest.js
new file mode 100644
index 000000000..5b3ff592f
--- /dev/null
+++ b/litellm/proxy/_experimental/out/_next/static/DptMjzo5xd96cx0b56k4u/_ssgManifest.js
@@ -0,0 +1 @@
+self.__SSG_MANIFEST=new Set([]);self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB()
\ No newline at end of file
diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/app/page-a5a04da2a9356785.js b/litellm/proxy/_experimental/out/_next/static/chunks/app/page-a5a04da2a9356785.js
new file mode 100644
index 000000000..77f6db469
--- /dev/null
+++ b/litellm/proxy/_experimental/out/_next/static/chunks/app/page-a5a04da2a9356785.js
@@ -0,0 +1 @@
+(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[931],{20661:function(e,t,s){Promise.resolve().then(s.bind(s,90177))},90177:function(e,t,s){"use strict";s.r(t),s.d(t,{default:function(){return eB}});var l=s(3827),r=s(64090),n=s(47907),a=s(8792),o=s(40491),i=s(65270),c=e=>{let{userID:t,userRole:s,userEmail:r,showSSOBanner:n}=e;console.log("User ID:",t),console.log("userEmail:",r);let c=[{key:"1",label:(0,l.jsxs)(l.Fragment,{children:[(0,l.jsxs)("p",{children:["Role: ",s]}),(0,l.jsxs)("p",{children:["ID: ",t]})]})}];return(0,l.jsxs)("nav",{className:"left-0 right-0 top-0 flex justify-between items-center h-12 mb-4",children:[(0,l.jsx)("div",{className:"text-left my-2 absolute top-0 left-0",children:(0,l.jsx)("div",{className:"flex flex-col items-center",children:(0,l.jsx)(a.default,{href:"/",children:(0,l.jsx)("button",{className:"text-gray-800 text-2xl py-1 rounded text-center",children:(0,l.jsx)("img",{src:"/get_image",width:200,height:200,alt:"LiteLLM Brand",className:"mr-2"})})})})}),(0,l.jsxs)("div",{className:"text-right mx-4 my-2 absolute top-0 right-0 flex items-center justify-end space-x-2",children:[n?(0,l.jsx)("a",{href:"https://docs.litellm.ai/docs/proxy/ui#setup-ssoauth-for-ui",target:"_blank",className:"mr-2"}):null,(0,l.jsx)("div",{style:{border:"1px solid #391085",padding:"6px",borderRadius:"8px"},children:(0,l.jsx)(o.Z,{menu:{items:c},children:(0,l.jsx)(i.Z,{children:r})})})]})]})},d=s(80588);let m=async(e,t,s)=>{try{if(console.log("Form Values in keyCreateCall:",s),s.description&&(s.metadata||(s.metadata={}),s.metadata.description=s.description,delete s.description,s.metadata=JSON.stringify(s.metadata)),s.metadata){console.log("formValues.metadata:",s.metadata);try{s.metadata=JSON.parse(s.metadata)}catch(e){throw d.ZP.error("Failed to parse metadata: "+e),Error("Failed to parse metadata: "+e)}}console.log("Form Values after check:",s);let l=await fetch("/key/generate",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({user_id:t,...s})});if(!l.ok){let e=await l.text();throw d.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let r=await l.json();return console.log("API Response:",r),r}catch(e){throw console.error("Failed to create key:",e),e}},h=async(e,t,s)=>{try{if(console.log("Form Values in keyCreateCall:",s),s.description&&(s.metadata||(s.metadata={}),s.metadata.description=s.description,delete s.description,s.metadata=JSON.stringify(s.metadata)),s.metadata){console.log("formValues.metadata:",s.metadata);try{s.metadata=JSON.parse(s.metadata)}catch(e){throw d.ZP.error("Failed to parse metadata: "+e),Error("Failed to parse metadata: "+e)}}console.log("Form Values after check:",s);let l=await fetch("/user/new",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({user_id:t,...s})});if(!l.ok){let e=await l.text();throw d.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let r=await l.json();return console.log("API Response:",r),r}catch(e){throw console.error("Failed to create key:",e),e}},u=async(e,t)=>{try{console.log("in keyDeleteCall:",t);let s=await fetch("/key/delete",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({keys:[t]})});if(!s.ok){let e=await s.text();throw d.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to create key:",e),e}},x=async(e,t)=>{try{console.log("in teamDeleteCall:",t);let s=await fetch("/team/delete",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({team_ids:[t]})});if(!s.ok){let e=await s.text();throw d.ZP.error("Failed to delete team: "+e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to delete key:",e),e}},p=async function(e,t,s){let l=arguments.length>3&&void 0!==arguments[3]&&arguments[3];try{let r="/user/info";"App Owner"==s&&t&&(r="".concat(r,"?user_id=").concat(t)),console.log("in userInfoCall viewAll=",l),l&&(r="".concat(r,"?view_all=true"));let n=await fetch(r,{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!n.ok){let e=await n.text();throw d.ZP.error(e),Error("Network response was not ok")}let a=await n.json();return console.log("API Response:",a),a}catch(e){throw console.error("Failed to create key:",e),e}},j=async e=>{try{let t=await fetch("/global/spend",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}return await t.json()}catch(e){throw console.error("Failed to create key:",e),e}},g=async(e,t,s)=>{try{let t=await fetch("/v2/model/info",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}return await t.json()}catch(e){throw console.error("Failed to create key:",e),e}},y=async(e,t,s)=>{try{let t=await fetch("/model/metrics",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}return await t.json()}catch(e){throw console.error("Failed to create key:",e),e}},f=async(e,t,s)=>{try{let t=await fetch("/models",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}return await t.json()}catch(e){throw console.error("Failed to create key:",e),e}},w=async(e,t)=>{try{let s="/global/spend/logs";console.log("in keySpendLogsCall:",s);let l=await fetch("".concat(s,"?api_key=").concat(t),{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!l.ok){let e=await l.text();throw d.ZP.error(e),Error("Network response was not ok")}let r=await l.json();return console.log(r),r}catch(e){throw console.error("Failed to create key:",e),e}},Z=async e=>{try{let t="/global/spend/teams";console.log("in teamSpendLogsCall:",t);let s=await fetch("".concat(t),{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!s.ok){let e=await s.text();throw d.ZP.error(e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to create key:",e),e}},_=async(e,t,s,l,r,n)=>{try{console.log("user role in spend logs call: ".concat(s));let t="/spend/logs";t="App Owner"==s?"".concat(t,"?user_id=").concat(l,"&start_date=").concat(r,"&end_date=").concat(n):"".concat(t,"?start_date=").concat(r,"&end_date=").concat(n);let a=await fetch(t,{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!a.ok){let e=await a.text();throw d.ZP.error(e),Error("Network response was not ok")}let o=await a.json();return console.log(o),o}catch(e){throw console.error("Failed to create key:",e),e}},k=async e=>{try{let t=await fetch("/global/spend/logs",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}let s=await t.json();return console.log(s),s}catch(e){throw console.error("Failed to create key:",e),e}},b=async e=>{try{let t=await fetch("/global/spend/keys?limit=5",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}let s=await t.json();return console.log(s),s}catch(e){throw console.error("Failed to create key:",e),e}},v=async(e,t)=>{try{t&&JSON.stringify({api_key:t});let s={method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}};t&&(s.body=JSON.stringify({api_key:t}));let l=await fetch("/global/spend/end_users",s);if(!l.ok){let e=await l.text();throw d.ZP.error(e),Error("Network response was not ok")}let r=await l.json();return console.log(r),r}catch(e){throw console.error("Failed to create key:",e),e}},N=async e=>{try{let t=await fetch("/global/spend/models?limit=5",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}let s=await t.json();return console.log(s),s}catch(e){throw console.error("Failed to create key:",e),e}},S=async(e,t)=>{try{let s=await fetch("/v2/key/info",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({keys:t})});if(!s.ok){let e=await s.text();throw d.ZP.error(e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to create key:",e),e}},A=async(e,t,s,l)=>{try{let r=await fetch("/user/request_model",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({models:[t],user_id:s,justification:l})});if(!r.ok){let e=await r.text();throw d.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let n=await r.json();return console.log(n),n}catch(e){throw console.error("Failed to create key:",e),e}},C=async e=>{try{let t="/user/get_requests";console.log("in userGetRequesedtModelsCall:",t);let s=await fetch(t,{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!s.ok){let e=await s.text();throw d.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to get requested models:",e),e}},T=async(e,t)=>{try{let s="/user/get_users?role=".concat(t);console.log("in userGetAllUsersCall:",s);let l=await fetch(s,{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!l.ok){let e=await l.text();throw d.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let r=await l.json();return console.log(r),r}catch(e){throw console.error("Failed to get requested models:",e),e}},I=async(e,t)=>{try{console.log("Form Values in teamCreateCall:",t);let s=await fetch("/team/new",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({...t})});if(!s.ok){let e=await s.text();throw d.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let l=await s.json();return console.log("API Response:",l),l}catch(e){throw console.error("Failed to create key:",e),e}},P=async(e,t,s)=>{try{console.log("Form Values in teamMemberAddCall:",s);let l=await fetch("/team/member_add",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({team_id:t,member:s})});if(!l.ok){let e=await l.text();throw d.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let r=await l.json();return console.log("API Response:",r),r}catch(e){throw console.error("Failed to create key:",e),e}},E=async(e,t)=>{try{console.log("Form Values in userUpdateUserCall:",t);let s=await fetch("/user/update",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({user_role:"proxy_admin_viewer",...t})});if(!s.ok){let e=await s.text();throw d.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let l=await s.json();return console.log("API Response:",l),l}catch(e){throw console.error("Failed to create key:",e),e}},F=async(e,t)=>{try{let s=await fetch("/global/predict/spend/logs",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({data:t})});if(!s.ok){let e=await s.text();throw d.ZP.error(e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to create key:",e),e}};var M=s(10384),O=s(46453),R=s(2179),D=s(71801),U=s(42440),z=s(55217),B=s(50670),L=s(12143),K=s(77171),V=s(42539),q=s(88707),J=s(1861);let{Option:G}=B.default;var W=e=>{let{userID:t,team:s,userRole:n,accessToken:a,data:o,setData:i}=e,[c]=L.Z.useForm(),[h,u]=(0,r.useState)(!1),[x,p]=(0,r.useState)(null),[j,g]=(0,r.useState)(null),[y,w]=(0,r.useState)([]),Z=()=>{u(!1),c.resetFields()},_=()=>{u(!1),p(null),c.resetFields()};(0,r.useEffect)(()=>{(async()=>{try{if(null===t||null===n)return;if(null!==a){let e=(await f(a,t,n)).data.map(e=>e.id);console.log("available_model_names:",e),w(e)}}catch(e){console.error("Error fetching user models:",e)}})()},[a,t,n]);let k=async e=>{try{d.ZP.info("Making API Call"),u(!0);let s=await m(a,t,e);console.log("key create Response:",s),i(e=>e?[...e,s]:[s]),p(s.key),g(s.soft_budget),d.ZP.success("API Key Created"),c.resetFields(),localStorage.removeItem("userData"+t)}catch(e){console.error("Error creating the key:",e)}},b=e=>{if(e.includes("all_models")){let e=s?s.models:y;c.setFieldsValue({models:e})}};return(0,l.jsxs)("div",{children:[(0,l.jsx)(R.Z,{className:"mx-auto",onClick:()=>u(!0),children:"+ Create New Key"}),(0,l.jsx)(K.Z,{title:"Create Key",visible:h,width:800,footer:null,onOk:Z,onCancel:_,children:(0,l.jsxs)(L.Z,{form:c,onFinish:k,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:["App Owner"===n||"Admin"===n?(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)(L.Z.Item,{label:"Key Name",name:"key_alias",rules:[{required:!0,message:"Please input a key name"}],help:"required",children:(0,l.jsx)(V.Z,{})}),(0,l.jsx)(L.Z.Item,{label:"Team ID",name:"team_id",initialValue:s?s.team_id:null,valuePropName:"team_id",className:"mt-8",children:(0,l.jsx)(V.Z,{value:s?s.team_alias:"",disabled:!0})}),(0,l.jsx)(L.Z.Item,{label:"Models",name:"models",children:(0,l.jsxs)(B.default,{mode:"multiple",placeholder:"Select models",style:{width:"100%"},onChange:e=>b(e),children:[(0,l.jsx)(G,{value:"all_models",children:"All Models"},"all_models"),s&&s.models?s.models.map(e=>(0,l.jsx)(G,{value:e,children:e},e)):y.map(e=>(0,l.jsx)(G,{value:e,children:e},e))]})}),(0,l.jsx)(L.Z.Item,{className:"mt-8",label:"Max Budget (USD)",name:"max_budget",help:"Budget cannot exceed team max budget: $".concat((null==s?void 0:s.max_budget)!==null&&(null==s?void 0:s.max_budget)!==void 0?null==s?void 0:s.max_budget:"unlimited"),rules:[{validator:async(e,t)=>{if(t&&s&&null!==s.max_budget&&t>s.max_budget)throw Error("Budget cannot exceed team max budget: $".concat(s.max_budget))}}],children:(0,l.jsx)(q.Z,{step:.01,precision:2,width:200})}),(0,l.jsx)(L.Z.Item,{className:"mt-8",label:"Reset Budget",name:"budget_duration",help:"Team Reset Budget: ".concat((null==s?void 0:s.budget_duration)!==null&&(null==s?void 0:s.budget_duration)!==void 0?null==s?void 0:s.budget_duration:"None"),children:(0,l.jsxs)(B.default,{defaultValue:null,placeholder:"n/a",children:[(0,l.jsx)(B.default.Option,{value:"24h",children:"daily"}),(0,l.jsx)(B.default.Option,{value:"30d",children:"monthly"})]})}),(0,l.jsx)(L.Z.Item,{className:"mt-8",label:"Tokens per minute Limit (TPM)",name:"tpm_limit",help:"TPM cannot exceed team TPM limit: ".concat((null==s?void 0:s.tpm_limit)!==null&&(null==s?void 0:s.tpm_limit)!==void 0?null==s?void 0:s.tpm_limit:"unlimited"),rules:[{validator:async(e,t)=>{if(t&&s&&null!==s.tpm_limit&&t>s.tpm_limit)throw Error("TPM limit cannot exceed team TPM limit: ".concat(s.tpm_limit))}}],children:(0,l.jsx)(q.Z,{step:1,width:400})}),(0,l.jsx)(L.Z.Item,{className:"mt-8",label:"Requests per minute Limit (RPM)",name:"rpm_limit",help:"RPM cannot exceed team RPM limit: ".concat((null==s?void 0:s.rpm_limit)!==null&&(null==s?void 0:s.rpm_limit)!==void 0?null==s?void 0:s.rpm_limit:"unlimited"),rules:[{validator:async(e,t)=>{if(t&&s&&null!==s.rpm_limit&&t>s.rpm_limit)throw Error("RPM limit cannot exceed team RPM limit: ".concat(s.rpm_limit))}}],children:(0,l.jsx)(q.Z,{step:1,width:400})}),(0,l.jsx)(L.Z.Item,{label:"Expire Key (eg: 30s, 30h, 30d)",name:"duration",className:"mt-8",children:(0,l.jsx)(V.Z,{})}),(0,l.jsx)(L.Z.Item,{label:"Metadata",name:"metadata",children:(0,l.jsx)(V.Z.TextArea,{rows:4,placeholder:"Enter metadata as JSON"})})]}):(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)(L.Z.Item,{label:"Key Name",name:"key_alias",children:(0,l.jsx)(V.Z,{})}),(0,l.jsx)(L.Z.Item,{label:"Team ID (Contact Group)",name:"team_id",children:(0,l.jsx)(V.Z,{placeholder:"default team (create a new team)"})}),(0,l.jsx)(L.Z.Item,{label:"Description",name:"description",children:(0,l.jsx)(V.Z.TextArea,{placeholder:"Enter description",rows:4})})]}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(J.ZP,{htmlType:"submit",children:"Create Key"})})]})}),x&&(0,l.jsx)(K.Z,{visible:h,onOk:Z,onCancel:_,footer:null,children:(0,l.jsxs)(O.Z,{numItems:1,className:"gap-2 w-full",children:[(0,l.jsx)(U.Z,{children:"Save your Key"}),(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsxs)("p",{children:["Please save this secret key somewhere safe and accessible. For security reasons, ",(0,l.jsx)("b",{children:"you will not be able to view it again"})," ","through your LiteLLM account. If you lose this secret key, you will need to generate a new one."]})}),(0,l.jsx)(M.Z,{numColSpan:1,children:null!=x?(0,l.jsxs)("div",{children:[(0,l.jsx)(D.Z,{className:"mt-3",children:"API Key:"}),(0,l.jsx)("div",{style:{background:"#f8f8f8",padding:"10px",borderRadius:"5px",marginBottom:"10px"},children:(0,l.jsx)("pre",{style:{wordWrap:"break-word",whiteSpace:"normal"},children:x})}),(0,l.jsx)(z.CopyToClipboard,{text:x,onCopy:()=>{d.ZP.success("API Key copied to clipboard")},children:(0,l.jsx)(R.Z,{className:"mt-3",children:"Copy API Key"})})]}):(0,l.jsx)(D.Z,{children:"Key being created, this might take 30s"})})]})})]})},$=s(9454),Y=s(33393),H=s(5),X=s(9853),Q=s(13810),ee=s(39290),et=s(66952),es=s(61244),el=s(10827),er=s(3851),en=s(2044),ea=s(64167),eo=s(74480),ei=s(7178),ec=e=>{let{userID:t,accessToken:s,selectedTeam:n,data:a,setData:o}=e,[i,c]=(0,r.useState)(!1),[d,m]=(0,r.useState)(!1),[h,x]=(0,r.useState)(null),[p,j]=r.useState(null),[g,y]=(0,r.useState)(null),[f,Z]=(0,r.useState)(null),[_,k]=(0,r.useState)(""),b=async e=>{try{if(null==s||null==e)return;console.log("accessToken: ".concat(s,"; token: ").concat(e.token));let t=await w(s,e.token);console.log("Response:",t),Z(t);let l=await F(s,t);console.log("Response2:",l);let r=[...t,...l.response];Z(r),k(l.predicted_spend),console.log("Combined Data:",r)}catch(e){console.error("There was an error fetching the data",e)}};(0,r.useEffect)(()=>{b(g)},[g]);let v=async e=>{null!=a&&(x(e),localStorage.removeItem("userData"+t),m(!0))},N=async()=>{if(null!=h&&null!=a){try{await u(s,h);let e=a.filter(e=>e.token!==h);o(e)}catch(e){console.error("Error deleting the key:",e)}m(!1),x(null)}};if(null!=a)return console.log("RERENDER TRIGGERED"),(0,l.jsx)("div",{children:(0,l.jsxs)(Q.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh] mb-4 mt-2",children:[(0,l.jsxs)(el.Z,{className:"mt-5",children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"Key Alias"}),(0,l.jsx)(eo.Z,{children:"Secret Key"}),(0,l.jsx)(eo.Z,{children:"Spend (USD)"}),(0,l.jsx)(eo.Z,{children:"Budget (USD)"}),(0,l.jsx)(eo.Z,{children:"Models"}),(0,l.jsx)(eo.Z,{children:"TPM / RPM Limits"})]})}),(0,l.jsx)(er.Z,{children:a.map(e=>(console.log(e),"litellm-dashboard"===e.team_id||n&&e.team_id!=n.team_id)?null:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{style:{maxWidth:"2px",whiteSpace:"pre-wrap",overflow:"hidden"},children:null!=e.key_alias?(0,l.jsx)(D.Z,{children:e.key_alias}):(0,l.jsx)(D.Z,{children:"Not Set"})}),(0,l.jsx)(en.Z,{children:(0,l.jsx)(D.Z,{children:e.key_name})}),(0,l.jsx)(en.Z,{children:(0,l.jsx)(D.Z,{children:(()=>{try{return parseFloat(e.spend).toFixed(4)}catch(t){return e.spend}})()})}),(0,l.jsx)(en.Z,{children:null!=e.max_budget?(0,l.jsx)(D.Z,{children:e.max_budget}):(0,l.jsx)(D.Z,{children:"Unlimited"})}),(0,l.jsx)(en.Z,{children:Array.isArray(e.models)?(0,l.jsx)("div",{style:{display:"flex",flexDirection:"column"},children:0===e.models.length?(0,l.jsx)(l.Fragment,{children:n&&n.models&&n.models.length>0?n.models.map((e,t)=>(0,l.jsx)(H.Z,{size:"xs",className:"mb-1",color:"blue",children:(0,l.jsx)(D.Z,{children:e.length>30?"".concat(e.slice(0,30),"..."):e})},t)):(0,l.jsx)(H.Z,{size:"xs",className:"mb-1",color:"purple",children:(0,l.jsx)(D.Z,{children:"All Models"})})}):e.models.map((e,t)=>(0,l.jsx)(H.Z,{size:"xs",className:"mb-1",color:"blue",children:(0,l.jsx)(D.Z,{children:e.length>30?"".concat(e.slice(0,30),"..."):e})},t))}):null}),(0,l.jsx)(en.Z,{children:(0,l.jsxs)(D.Z,{children:["TPM: ",e.tpm_limit?e.tpm_limit:"Unlimited"," ",(0,l.jsx)("br",{})," RPM:"," ",e.rpm_limit?e.rpm_limit:"Unlimited"]})}),(0,l.jsxs)(en.Z,{children:[(0,l.jsx)(es.Z,{onClick:()=>{y(e),j(e.id)},icon:$.Z,size:"sm"}),(0,l.jsx)(ee.Z,{open:null!==p,onClose:()=>{j(null),y(null)},children:(0,l.jsx)(et.Z,{children:g&&(0,l.jsxs)(l.Fragment,{children:[(0,l.jsxs)("div",{className:"grid grid-cols-1 gap-6 sm:grid-cols-2 lg:grid-cols-3",children:[(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)("p",{className:"text-tremor-default font-medium text-tremor-content dark:text-dark-tremor-content",children:"Spend"}),(0,l.jsx)("div",{className:"mt-2 flex items-baseline space-x-2.5",children:(0,l.jsx)("p",{className:"text-tremor font-semibold text-tremor-content-strong dark:text-dark-tremor-content-strong",children:(()=>{try{return parseFloat(g.spend).toFixed(4)}catch(e){return g.spend}})()})})]}),(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)("p",{className:"text-tremor-default font-medium text-tremor-content dark:text-dark-tremor-content",children:"Budget"}),(0,l.jsx)("div",{className:"mt-2 flex items-baseline space-x-2.5",children:(0,l.jsx)("p",{className:"text-tremor font-semibold text-tremor-content-strong dark:text-dark-tremor-content-strong",children:null!=g.max_budget?(0,l.jsx)(l.Fragment,{children:g.max_budget}):(0,l.jsx)(l.Fragment,{children:"Unlimited"})})})]},e.name),(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)("p",{className:"text-tremor-default font-medium text-tremor-content dark:text-dark-tremor-content",children:"Expires"}),(0,l.jsx)("div",{className:"mt-2 flex items-baseline space-x-2.5",children:(0,l.jsx)("p",{className:"text-tremor-default font-small text-tremor-content-strong dark:text-dark-tremor-content-strong",children:null!=g.expires?(0,l.jsx)(l.Fragment,{children:new Date(g.expires).toLocaleString(void 0,{day:"numeric",month:"long",year:"numeric",hour:"numeric",minute:"numeric",second:"numeric"})}):(0,l.jsx)(l.Fragment,{children:"Never"})})})]},e.name)]}),(0,l.jsx)(Q.Z,{className:"mt-6 mb-6",children:f&&(0,l.jsx)(X.Z,{className:"mt-6",data:f,colors:["blue","amber"],index:"date",categories:["spend","predicted_spend"],yAxisWidth:80})}),(0,l.jsx)(U.Z,{children:"Metadata"}),(0,l.jsx)(D.Z,{children:JSON.stringify(g.metadata)}),(0,l.jsx)(R.Z,{variant:"light",className:"mx-auto flex items-center",onClick:()=>{j(null),y(null)},children:"Close"})]})})}),(0,l.jsx)(es.Z,{onClick:()=>v(e.token),icon:Y.Z,size:"sm"})]})]},e.token))})]}),d&&(0,l.jsx)("div",{className:"fixed z-10 inset-0 overflow-y-auto",children:(0,l.jsxs)("div",{className:"flex items-end justify-center min-h-screen pt-4 px-4 pb-20 text-center sm:block sm:p-0",children:[(0,l.jsx)("div",{className:"fixed inset-0 transition-opacity","aria-hidden":"true",children:(0,l.jsx)("div",{className:"absolute inset-0 bg-gray-500 opacity-75"})}),(0,l.jsx)("span",{className:"hidden sm:inline-block sm:align-middle sm:h-screen","aria-hidden":"true",children:""}),(0,l.jsxs)("div",{className:"inline-block align-bottom bg-white rounded-lg text-left overflow-hidden shadow-xl transform transition-all sm:my-8 sm:align-middle sm:max-w-lg sm:w-full",children:[(0,l.jsx)("div",{className:"bg-white px-4 pt-5 pb-4 sm:p-6 sm:pb-4",children:(0,l.jsx)("div",{className:"sm:flex sm:items-start",children:(0,l.jsxs)("div",{className:"mt-3 text-center sm:mt-0 sm:ml-4 sm:text-left",children:[(0,l.jsx)("h3",{className:"text-lg leading-6 font-medium text-gray-900",children:"Delete Key"}),(0,l.jsx)("div",{className:"mt-2",children:(0,l.jsx)("p",{className:"text-sm text-gray-500",children:"Are you sure you want to delete this key ?"})})]})})}),(0,l.jsxs)("div",{className:"bg-gray-50 px-4 py-3 sm:px-6 sm:flex sm:flex-row-reverse",children:[(0,l.jsx)(R.Z,{onClick:N,color:"red",className:"ml-2",children:"Delete"}),(0,l.jsx)(R.Z,{onClick:()=>{m(!1),x(null)},children:"Cancel"})]})]})]})})]})})},ed=e=>{let{userID:t,userSpendData:s,userRole:n,accessToken:a}=e,[o,i]=(0,r.useState)(null==s?void 0:s.spend),[c,d]=(0,r.useState)((null==s?void 0:s.max_budget)||null);(0,r.useEffect)(()=>{(async()=>{if("Admin"===n)try{let e=await j(a);i(e.spend),d(e.max_budget||null)}catch(e){console.error("Error fetching global spend data:",e)}})()},[n,a]);let m=void 0!==o?o.toFixed(5):null;return(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)("p",{className:"text-tremor-default text-tremor-content dark:text-dark-tremor-content",children:"Total Spend"}),(0,l.jsxs)("p",{className:"text-3xl text-tremor-content-strong dark:text-dark-tremor-content-strong font-semibold",children:["$",m]})]})},em=s(55901),eh=s(27166),eu=e=>{let{teams:t,setSelectedTeam:s}=e,[n,a]=(0,r.useState)("");return(0,l.jsxs)("div",{className:"mt-5 mb-5",children:[(0,l.jsx)(U.Z,{children:"Select Team"}),(0,l.jsx)(D.Z,{children:"If you belong to multiple teams, this setting controls which team is used by default when creating new API Keys."}),t&&t.length>0?(0,l.jsx)(em.Z,{defaultValue:"0",children:t.map((e,t)=>(0,l.jsx)(eh.Z,{value:String(t),onClick:()=>s(e),children:e.team_alias},t))}):(0,l.jsxs)(D.Z,{children:["No team created. ",(0,l.jsx)("b",{children:"Defaulting to personal account."})]})]})},ex=s(37963),ep=s(36083);console.log("isLocal:",!1);var ej=e=>{let{userID:t,userRole:s,teams:a,keys:o,setUserRole:i,userEmail:c,setUserEmail:d,setTeams:m,setKeys:h}=e,[u,x]=(0,r.useState)(null),g=(0,n.useSearchParams)();g.get("viewSpend"),(0,n.useRouter)();let y=g.get("token"),[w,Z]=(0,r.useState)(null),[_,k]=(0,r.useState)([]),[b,v]=(0,r.useState)(a?a[0]:null);if(window.addEventListener("beforeunload",function(){sessionStorage.clear()}),(0,r.useEffect)(()=>{if(y){let e=(0,ex.o)(y);if(e){if(console.log("Decoded token:",e),console.log("Decoded key:",e.key),Z(e.key),e.user_role){let t=function(e){if(!e)return"Undefined Role";switch(console.log("Received user role: ".concat(e)),e.toLowerCase()){case"app_owner":case"demo_app_owner":return"App Owner";case"app_admin":case"proxy_admin":return"Admin";case"proxy_admin_viewer":return"Admin Viewer";case"app_user":return"App User";default:return"Unknown Role"}}(e.user_role);console.log("Decoded user_role:",t),i(t)}else console.log("User role not defined");e.user_email?d(e.user_email):console.log("User Email is not set ".concat(e))}}if(t&&w&&s&&!o&&!u){let e=sessionStorage.getItem("userModels"+t);e?k(JSON.parse(e)):(async()=>{try{let e=await p(w,t,s);if(console.log("received teams in user dashboard: ".concat(Object.keys(e),"; team values: ").concat(Object.entries(e.teams))),"Admin"==s){let e=await j(w);x(e),console.log("globalSpend:",e)}else x(e.user_info);h(e.keys),m(e.teams),v(e.teams?e.teams[0]:null),sessionStorage.setItem("userData"+t,JSON.stringify(e.keys)),sessionStorage.setItem("userSpendData"+t,JSON.stringify(e.user_info));let l=(await f(w,t,s)).data.map(e=>e.id);console.log("available_model_names:",l),k(l),console.log("userModels:",_),sessionStorage.setItem("userModels"+t,JSON.stringify(l))}catch(e){console.error("There was an error fetching the data",e)}})()}},[t,y,w,o,s]),null==t||null==y){let e="/sso/key/generate";return console.log("Full URL:",e),window.location.href=e,null}if(null==w)return null;if(null==s&&i("App Owner"),s&&"Admin Viewer"==s){let{Title:e,Paragraph:t}=ep.default;return(0,l.jsxs)("div",{children:[(0,l.jsx)(e,{level:1,children:"Access Denied"}),(0,l.jsx)(t,{children:"Ask your proxy admin for access to create keys"})]})}return console.log("inside user dashboard, selected team",b),(0,l.jsx)("div",{className:"w-full mx-4",children:(0,l.jsx)(O.Z,{numItems:1,className:"gap-2 p-8 h-[75vh] w-full mt-2",children:(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(ed,{userID:t,userSpendData:u,userRole:s,accessToken:w}),(0,l.jsx)(ec,{userID:t,accessToken:w,selectedTeam:b||null,data:o,setData:h}),(0,l.jsx)(W,{userID:t,team:b||null,userRole:s,accessToken:w,data:o,setData:h},b?b.team_id:null),(0,l.jsx)(eu,{teams:a,setSelectedTeam:v})]})})})};let{Option:eg}=B.default;var ey=e=>{let{userModels:t,accessToken:s,userID:n}=e,[a]=L.Z.useForm(),[o,i]=(0,r.useState)(!1),c=async e=>{try{d.ZP.info("Requesting access");let{selectedModel:t,accessReason:l}=e;await A(s,t,n,l),i(!0)}catch(e){console.error("Error requesting access:",e)}};return(0,l.jsxs)("div",{children:[(0,l.jsx)(R.Z,{size:"xs",onClick:()=>i(!0),children:"Request Access"}),(0,l.jsx)(K.Z,{title:"Request Access",visible:o,width:800,footer:null,onOk:()=>{i(!1),a.resetFields()},onCancel:()=>{i(!1),a.resetFields()},children:(0,l.jsxs)(L.Z,{form:a,onFinish:c,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:[(0,l.jsx)(L.Z.Item,{label:"Select Model",name:"selectedModel",children:(0,l.jsx)(B.default,{placeholder:"Select model",style:{width:"100%"},children:t.map(e=>(0,l.jsx)(eg,{value:e,children:e},e))})}),(0,l.jsx)(L.Z.Item,{label:"Reason for Access",name:"accessReason",children:(0,l.jsx)(V.Z.TextArea,{rows:4,placeholder:"Enter reason for access"})}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(R.Z,{children:"Request Access"})})]})})]})},ef=e=>{let{accessToken:t,token:s,userRole:n,userID:a}=e,[o,i]=(0,r.useState)({data:[]}),[c,d]=(0,r.useState)([]),[m,h]=(0,r.useState)([]);if((0,r.useEffect)(()=>{if(!t||!s||!n||!a)return;let e=async()=>{try{let e=await g(t,a,n);console.log("Model data response:",e.data),i(e);let s=await y(t,a,n);if(console.log("Model metrics response:",s),d(s),"Admin"===n&&t){let e=await C(t);console.log("Pending Requests:",m),h(e.requests||[])}}catch(e){console.error("There was an error fetching the model data",e)}};t&&s&&n&&a&&e()},[t,s,n,a]),!o||!t||!s||!n||!a)return(0,l.jsx)("div",{children:"Loading..."});let u=[];for(let e=0;e(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{children:(0,l.jsx)(U.Z,{children:e.model_name})}),(0,l.jsx)(en.Z,{children:e.provider}),"Admin"===n&&(0,l.jsx)(en.Z,{children:e.api_base}),(0,l.jsx)(en.Z,{children:e.user_access?(0,l.jsx)(H.Z,{color:"green",children:"Yes"}):(0,l.jsx)(ey,{userModels:u,accessToken:t,userID:a})}),(0,l.jsx)(en.Z,{children:e.input_cost}),(0,l.jsx)(en.Z,{children:e.output_cost}),(0,l.jsx)(en.Z,{children:e.max_tokens})]},e.model_name))})]})}),(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Model Statistics (Number Requests)"}),(0,l.jsx)(X.Z,{data:c,index:"model",categories:["num_requests"],colors:["blue"],yAxisWidth:400,layout:"vertical",tickGap:5})]}),(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Model Statistics (Latency)"}),(0,l.jsx)(X.Z,{data:c,index:"model",categories:["avg_latency_seconds"],colors:["red"],yAxisWidth:400,layout:"vertical",tickGap:5})]})]})})},ew=s(92836),eZ=s(26734),e_=s(41608),ek=s(32126),eb=s(23682);let{Option:ev}=B.default;var eN=e=>{let{userID:t,accessToken:s}=e,[n]=L.Z.useForm(),[a,o]=(0,r.useState)(!1),[i,c]=(0,r.useState)(null),[m,u]=(0,r.useState)([]);(0,r.useEffect)(()=>{(async()=>{try{let e=await f(s,t,"any"),l=[];for(let t=0;t{o(!1),n.resetFields()},p=()=>{o(!1),c(null),n.resetFields()},j=async e=>{try{d.ZP.info("Making API Call"),o(!0),console.log("formValues in create user:",e);let l=await h(s,t,e);console.log("user create Response:",l),c(l.key),d.ZP.success("API user Created"),n.resetFields(),localStorage.removeItem("userData"+t)}catch(e){console.error("Error creating the user:",e)}};return(0,l.jsxs)("div",{children:[(0,l.jsx)(R.Z,{className:"mx-auto",onClick:()=>o(!0),children:"+ Create New User"}),(0,l.jsx)(K.Z,{title:"Create User",visible:a,width:800,footer:null,onOk:x,onCancel:p,children:(0,l.jsxs)(L.Z,{form:n,onFinish:j,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:[(0,l.jsx)(L.Z.Item,{label:"User ID",name:"user_id",children:(0,l.jsx)(V.Z,{placeholder:"Enter User ID"})}),(0,l.jsx)(L.Z.Item,{label:"Team ID",name:"team_id",children:(0,l.jsx)(V.Z,{placeholder:"ai_team"})}),(0,l.jsx)(L.Z.Item,{label:"Models",name:"models",children:(0,l.jsx)(B.default,{mode:"multiple",placeholder:"Select models",style:{width:"100%"},children:m.map(e=>(0,l.jsx)(ev,{value:e,children:e},e))})}),(0,l.jsx)(L.Z.Item,{label:"Max Budget (USD)",name:"max_budget",children:(0,l.jsx)(q.Z,{step:.01,precision:2,width:200})}),(0,l.jsx)(L.Z.Item,{label:"Tokens per minute Limit (TPM)",name:"tpm_limit",children:(0,l.jsx)(q.Z,{step:1,width:400})}),(0,l.jsx)(L.Z.Item,{label:"Requests per minute Limit (RPM)",name:"rpm_limit",children:(0,l.jsx)(q.Z,{step:1,width:400})}),(0,l.jsx)(L.Z.Item,{label:"Duration (eg: 30s, 30h, 30d)",name:"duration",children:(0,l.jsx)(V.Z,{})}),(0,l.jsx)(L.Z.Item,{label:"Metadata",name:"metadata",children:(0,l.jsx)(V.Z.TextArea,{rows:4,placeholder:"Enter metadata as JSON"})}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(J.ZP,{htmlType:"submit",children:"Create User"})})]})}),i&&(0,l.jsxs)(K.Z,{title:"Save Your User",visible:a,onOk:x,onCancel:p,footer:null,children:[(0,l.jsxs)("p",{children:["Please save this secret user somewhere safe and accessible. For security reasons, ",(0,l.jsx)("b",{children:"you will not be able to view it again"})," ","through your LiteLLM account. If you lose this secret user, you will need to generate a new one."]}),(0,l.jsx)("p",{children:null!=i?"API user: ".concat(i):"User being created, this might take 30s"})]})]})},eS=e=>{let{accessToken:t,token:s,keys:n,userRole:a,userID:o,setKeys:i}=e,[c,d]=(0,r.useState)(null),[m,h]=(0,r.useState)(null),[u,x]=(0,r.useState)(1);if((0,r.useEffect)(()=>{if(!t||!s||!a||!o)return;let e=async()=>{try{let e=await p(t,null,a,!0);console.log("user data response:",e),d(e)}catch(e){console.error("There was an error fetching the model data",e)}};t&&s&&a&&o&&!c&&e();let l=async()=>{try{let e=await v(t,null);console.log("user data response:",e),h(e)}catch(e){console.error("There was an error fetching the model data",e)}};a&&("Admin"==a||"Admin Viewer"==a)&&!m&&l()},[t,s,a,o]),!c||!t||!s||!a||!o)return(0,l.jsx)("div",{children:"Loading..."});let j=async e=>{try{let s=await v(t,e);console.log("user data response:",s),h(s)}catch(e){console.error("There was an error fetching the model data",e)}};return(0,l.jsx)("div",{style:{width:"100%"},children:(0,l.jsxs)(O.Z,{className:"gap-2 p-2 h-[75vh] w-full mt-8",children:[(0,l.jsx)(eN,{userID:o,accessToken:t}),(0,l.jsx)(Q.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh] mb-4",children:(0,l.jsxs)(eZ.Z,{children:[(0,l.jsxs)(e_.Z,{variant:"line",defaultValue:"1",children:[(0,l.jsx)(ew.Z,{value:"1",children:"Key Owners"}),(0,l.jsx)(ew.Z,{value:"2",children:"End-Users"})]}),(0,l.jsxs)(eb.Z,{children:[(0,l.jsx)(ek.Z,{children:(0,l.jsxs)(el.Z,{className:"mt-5",children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"User ID"}),(0,l.jsx)(eo.Z,{children:"User Role"}),(0,l.jsx)(eo.Z,{children:"User Models"}),(0,l.jsx)(eo.Z,{children:"User Spend ($ USD)"}),(0,l.jsx)(eo.Z,{children:"User Max Budget ($ USD)"})]})}),(0,l.jsx)(er.Z,{children:c.map(e=>(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{children:e.user_id}),(0,l.jsx)(en.Z,{children:e.user_role?e.user_role:"app_owner"}),(0,l.jsx)(en.Z,{children:e.models&&e.models.length>0?e.models:"All Models"}),(0,l.jsx)(en.Z,{children:e.spend?e.spend:0}),(0,l.jsx)(en.Z,{children:e.max_budget?e.max_budget:"Unlimited"})]},e.user_id))})]})}),(0,l.jsxs)(ek.Z,{children:[(0,l.jsxs)("div",{className:"flex items-center",children:[(0,l.jsx)("div",{className:"flex-1"}),(0,l.jsxs)("div",{className:"flex-1 flex justify-between items-center",children:[(0,l.jsx)(D.Z,{className:"w-1/4 mr-2 text-right",children:"Key"}),(0,l.jsx)(em.Z,{defaultValue:"1",className:"w-3/4",children:null==n?void 0:n.map((e,t)=>{if(e&&null!==e.key_name&&e.key_name.length>0)return(0,l.jsx)(eh.Z,{value:String(t),onClick:()=>j(e.token),children:e.key_name},t)})})]})]}),(0,l.jsxs)(el.Z,{children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"End User"}),(0,l.jsx)(eo.Z,{children:"Spend"}),(0,l.jsx)(eo.Z,{children:"Total Events"})]})}),(0,l.jsx)(er.Z,{children:null==m?void 0:m.map((e,t)=>(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{children:e.end_user}),(0,l.jsx)(en.Z,{children:e.total_spend}),(0,l.jsx)(en.Z,{children:e.total_events})]},t))})]})]})]})]})}),function(){if(!c)return null;let e=Math.ceil(c.length/25),t=Math.min(25*u,c.length);return(0,l.jsxs)("div",{className:"flex justify-between items-center",children:[(0,l.jsxs)("div",{children:["Showing ",(u-1)*25+1," – ",t," of ",c.length]}),(0,l.jsxs)("div",{className:"flex",children:[(0,l.jsx)("button",{className:"bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-l focus:outline-none",disabled:1===u,onClick:()=>x(u-1),children:"← Prev"}),(0,l.jsx)("button",{className:"bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-r focus:outline-none",disabled:u===e,onClick:()=>x(u+1),children:"Next →"})]})]})}()]})})},eA=s(98941),eC=e=>{let{teams:t,searchParams:s,accessToken:n,setTeams:a,userID:o,userRole:i}=e,[c]=L.Z.useForm(),[m]=L.Z.useForm(),{Title:h,Paragraph:u}=ep.default,[p,j]=(0,r.useState)(""),[g,y]=(0,r.useState)(t?t[0]:null),[w,Z]=(0,r.useState)(!1),[_,k]=(0,r.useState)(!1),[b,v]=(0,r.useState)([]),[N,S]=(0,r.useState)(!1),[A,C]=(0,r.useState)(null),T=async e=>{C(e),S(!0)},E=e=>{if(e.includes("all_models")){let e=b.filter(e=>"all"!==e);c.setFieldsValue({models:e})}},F=async()=>{if(null!=A&&null!=t&&null!=n){try{await x(n,A);let e=t.filter(e=>e.team_id!==A);a(e)}catch(e){console.error("Error deleting the team:",e)}S(!1),C(null)}};(0,r.useEffect)(()=>{(async()=>{try{if(null===o||null===i)return;if(null!==n){let e=(await f(n,o,i)).data.map(e=>e.id);console.log("available_model_names:",e),v(e)}}catch(e){console.error("Error fetching user models:",e)}})()},[n,o,i]);let U=async e=>{try{if(null!=n){d.ZP.info("Creating Team");let s=await I(n,e);null!==t?a([...t,s]):a([s]),console.log("response for team create call: ".concat(s)),d.ZP.success("Team created"),Z(!1)}}catch(e){console.error("Error creating the team:",e),d.ZP.error("Error creating the team: "+e)}},z=async e=>{try{if(null!=n&&null!=t){d.ZP.info("Adding Member");let s={role:"user",user_email:e.user_email,user_id:e.user_id},l=await P(n,g.team_id,s);console.log("response for team create call: ".concat(l.data));let r=t.findIndex(e=>(console.log("team.team_id=".concat(e.team_id,"; response.data.team_id=").concat(l.data.team_id)),e.team_id===l.data.team_id));if(console.log("foundIndex: ".concat(r)),-1!==r){let e=[...t];e[r]=l.data,a(e),y(l.data)}k(!1)}}catch(e){console.error("Error creating the team:",e)}};return console.log("received teams ".concat(t)),(0,l.jsx)("div",{className:"w-full mx-4",children:(0,l.jsxs)(O.Z,{numItems:1,className:"gap-2 p-8 h-[75vh] w-full mt-2",children:[(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(h,{level:4,children:"All Teams"}),(0,l.jsxs)(Q.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh]",children:[(0,l.jsxs)(el.Z,{children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"Team Name"}),(0,l.jsx)(eo.Z,{children:"Spend (USD)"}),(0,l.jsx)(eo.Z,{children:"Budget (USD)"}),(0,l.jsx)(eo.Z,{children:"Models"}),(0,l.jsx)(eo.Z,{children:"TPM / RPM Limits"})]})}),(0,l.jsx)(er.Z,{children:t&&t.length>0?t.map(e=>(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{style:{maxWidth:"4px",whiteSpace:"pre-wrap",overflow:"hidden"},children:e.team_alias}),(0,l.jsx)(en.Z,{style:{maxWidth:"4px",whiteSpace:"pre-wrap",overflow:"hidden"},children:e.spend}),(0,l.jsx)(en.Z,{style:{maxWidth:"4px",whiteSpace:"pre-wrap",overflow:"hidden"},children:e.max_budget?e.max_budget:"No limit"}),(0,l.jsx)(en.Z,{style:{maxWidth:"8-x",whiteSpace:"pre-wrap",overflow:"hidden"},children:Array.isArray(e.models)?(0,l.jsx)("div",{style:{display:"flex",flexDirection:"column"},children:0===e.models.length?(0,l.jsx)(H.Z,{size:"xs",className:"mb-1",color:"purple",children:(0,l.jsx)(D.Z,{children:"All Models"})}):e.models.map((e,t)=>(0,l.jsx)(H.Z,{size:"xs",className:"mb-1",color:"blue",children:(0,l.jsx)(D.Z,{children:e.length>30?"".concat(e.slice(0,30),"..."):e})},t))}):null}),(0,l.jsx)(en.Z,{style:{maxWidth:"4px",whiteSpace:"pre-wrap",overflow:"hidden"},children:(0,l.jsxs)(D.Z,{children:["TPM:"," ",e.tpm_limit?e.tpm_limit:"Unlimited"," ",(0,l.jsx)("br",{}),"RPM:"," ",e.rpm_limit?e.rpm_limit:"Unlimited"]})}),(0,l.jsxs)(en.Z,{children:[(0,l.jsx)(es.Z,{icon:eA.Z,size:"sm"}),(0,l.jsx)(es.Z,{onClick:()=>T(e.team_id),icon:Y.Z,size:"sm"})]})]},e.team_id)):null})]}),N&&(0,l.jsx)("div",{className:"fixed z-10 inset-0 overflow-y-auto",children:(0,l.jsxs)("div",{className:"flex items-end justify-center min-h-screen pt-4 px-4 pb-20 text-center sm:block sm:p-0",children:[(0,l.jsx)("div",{className:"fixed inset-0 transition-opacity","aria-hidden":"true",children:(0,l.jsx)("div",{className:"absolute inset-0 bg-gray-500 opacity-75"})}),(0,l.jsx)("span",{className:"hidden sm:inline-block sm:align-middle sm:h-screen","aria-hidden":"true",children:""}),(0,l.jsxs)("div",{className:"inline-block align-bottom bg-white rounded-lg text-left overflow-hidden shadow-xl transform transition-all sm:my-8 sm:align-middle sm:max-w-lg sm:w-full",children:[(0,l.jsx)("div",{className:"bg-white px-4 pt-5 pb-4 sm:p-6 sm:pb-4",children:(0,l.jsx)("div",{className:"sm:flex sm:items-start",children:(0,l.jsxs)("div",{className:"mt-3 text-center sm:mt-0 sm:ml-4 sm:text-left",children:[(0,l.jsx)("h3",{className:"text-lg leading-6 font-medium text-gray-900",children:"Delete Team"}),(0,l.jsx)("div",{className:"mt-2",children:(0,l.jsx)("p",{className:"text-sm text-gray-500",children:"Are you sure you want to delete this team ?"})})]})})}),(0,l.jsxs)("div",{className:"bg-gray-50 px-4 py-3 sm:px-6 sm:flex sm:flex-row-reverse",children:[(0,l.jsx)(R.Z,{onClick:F,color:"red",className:"ml-2",children:"Delete"}),(0,l.jsx)(R.Z,{onClick:()=>{S(!1),C(null)},children:"Cancel"})]})]})]})})]})]}),(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(R.Z,{className:"mx-auto",onClick:()=>Z(!0),children:"+ Create New Team"}),(0,l.jsx)(K.Z,{title:"Create Team",visible:w,width:800,footer:null,onOk:()=>{Z(!1),c.resetFields()},onCancel:()=>{Z(!1),c.resetFields()},children:(0,l.jsxs)(L.Z,{form:c,onFinish:U,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:[(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)(L.Z.Item,{label:"Team Name",name:"team_alias",rules:[{required:!0,message:"Please input a team name"}],children:(0,l.jsx)(V.Z,{})}),(0,l.jsx)(L.Z.Item,{label:"Models",name:"models",children:(0,l.jsxs)(B.default,{mode:"multiple",placeholder:"Select models",style:{width:"100%"},onChange:e=>E(e),children:[(0,l.jsx)(B.default.Option,{value:"all_models",children:"All Models"},"all_models"),b.map(e=>(0,l.jsx)(B.default.Option,{value:e,children:e},e))]})}),(0,l.jsx)(L.Z.Item,{label:"Max Budget (USD)",name:"max_budget",children:(0,l.jsx)(q.Z,{step:.01,precision:2,width:200})}),(0,l.jsx)(L.Z.Item,{label:"Tokens per minute Limit (TPM)",name:"tpm_limit",children:(0,l.jsx)(q.Z,{step:1,width:400})}),(0,l.jsx)(L.Z.Item,{label:"Requests per minute Limit (RPM)",name:"rpm_limit",children:(0,l.jsx)(q.Z,{step:1,width:400})})]}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(J.ZP,{htmlType:"submit",children:"Create Team"})})]})})]}),(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(h,{level:4,children:"Team Members"}),(0,l.jsx)(u,{children:"If you belong to multiple teams, this setting controls which teams members you see."}),t&&t.length>0?(0,l.jsx)(em.Z,{defaultValue:"0",children:t.map((e,t)=>(0,l.jsx)(eh.Z,{value:String(t),onClick:()=>{y(e)},children:e.team_alias},t))}):(0,l.jsxs)(u,{children:["No team created. ",(0,l.jsx)("b",{children:"Defaulting to personal account."})]})]}),(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsx)(Q.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh]",children:(0,l.jsxs)(el.Z,{children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"Member Name"}),(0,l.jsx)(eo.Z,{children:"Role"})]})}),(0,l.jsx)(er.Z,{children:g?g.members_with_roles.map((e,t)=>(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{children:e.user_email?e.user_email:e.user_id?e.user_id:null}),(0,l.jsx)(en.Z,{children:e.role})]},t)):null})]})})}),(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(R.Z,{className:"mx-auto mb-5",onClick:()=>k(!0),children:"+ Add member"}),(0,l.jsx)(K.Z,{title:"Add member",visible:_,width:800,footer:null,onOk:()=>{k(!1),m.resetFields()},onCancel:()=>{k(!1),m.resetFields()},children:(0,l.jsxs)(L.Z,{form:c,onFinish:z,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:[(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)(L.Z.Item,{label:"Email",name:"user_email",className:"mb-4",children:(0,l.jsx)(V.Z,{name:"user_email",className:"px-3 py-2 border rounded-md w-full"})}),(0,l.jsx)("div",{className:"text-center mb-4",children:"OR"}),(0,l.jsx)(L.Z.Item,{label:"User ID",name:"user_id",className:"mb-4",children:(0,l.jsx)(V.Z,{name:"user_id",className:"px-3 py-2 border rounded-md w-full"})})]}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(J.ZP,{htmlType:"submit",children:"Add member"})})]})})]})]})})},eT=e=>{let{searchParams:t,accessToken:s}=e,[n]=L.Z.useForm(),[a]=L.Z.useForm(),{Title:o,Paragraph:i}=ep.default,[c,m]=(0,r.useState)(""),[h,u]=(0,r.useState)(null),[x,p]=(0,r.useState)(!1);(0,r.useEffect)(()=>{(async()=>{if(null!=s){let e=[],t=await T(s,"proxy_admin_viewer");t.forEach(t=>{e.push({user_role:t.user_role,user_id:t.user_id,user_email:t.user_email})}),console.log("proxy viewers: ".concat(t));let l=await T(s,"proxy_admin");l.forEach(t=>{e.push({user_role:t.user_role,user_id:t.user_id,user_email:t.user_email})}),console.log("proxy admins: ".concat(l)),console.log("combinedList: ".concat(e)),u(e)}})()},[s]);let j=async e=>{try{if(null!=s&&null!=h){d.ZP.info("Making API Call"),e.user_email,e.user_id;let t=await E(s,e);console.log("response for team create call: ".concat(t));let l=h.findIndex(e=>(console.log("user.user_id=".concat(e.user_id,"; response.user_id=").concat(t.user_id)),e.user_id===t.user_id));console.log("foundIndex: ".concat(l)),-1==l&&(console.log("updates admin with new user"),h.push(t),u(h)),p(!1)}}catch(e){console.error("Error creating the key:",e)}};return console.log("admins: ".concat(null==h?void 0:h.length)),(0,l.jsxs)("div",{className:"w-full m-2 mt-2 p-8",children:[(0,l.jsx)(o,{level:4,children:"Restricted Access"}),(0,l.jsxs)(i,{children:["Add other people to just view spend. They cannot create keys, teams or grant users access to new models."," ",(0,l.jsx)("a",{href:"https://docs.litellm.ai/docs/proxy/ui#restrict-ui-access",children:"Requires SSO Setup"})]}),(0,l.jsxs)(O.Z,{numItems:1,className:"gap-2 p-2 w-full",children:[(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsx)(Q.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh]",children:(0,l.jsxs)(el.Z,{children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"Member Name"}),(0,l.jsx)(eo.Z,{children:"Role"})]})}),(0,l.jsx)(er.Z,{children:h?h.map((e,t)=>(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{children:e.user_email?e.user_email:e.user_id?e.user_id:null}),(0,l.jsx)(en.Z,{children:e.user_role})]},t)):null})]})})}),(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(R.Z,{className:"mx-auto mb-5",onClick:()=>p(!0),children:"+ Add viewer"}),(0,l.jsx)(K.Z,{title:"Add viewer",visible:x,width:800,footer:null,onOk:()=>{p(!1),a.resetFields()},onCancel:()=>{p(!1),a.resetFields()},children:(0,l.jsxs)(L.Z,{form:n,onFinish:j,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:[(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)(L.Z.Item,{label:"Email",name:"user_email",className:"mb-4",children:(0,l.jsx)(V.Z,{name:"user_email",className:"px-3 py-2 border rounded-md w-full"})}),(0,l.jsx)("div",{className:"text-center mb-4",children:"OR"}),(0,l.jsx)(L.Z.Item,{label:"User ID",name:"user_id",className:"mb-4",children:(0,l.jsx)(V.Z,{name:"user_id",className:"px-3 py-2 border rounded-md w-full"})})]}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(J.ZP,{htmlType:"submit",children:"Add member"})})]})})]})]})]})},eI=s(52273),eP=s(12968),eE=s(67951);async function eF(e,t,s,l){console.log("isLocal:",!1);let r=window.location.origin,n=new eP.ZP.OpenAI({apiKey:l,baseURL:r,dangerouslyAllowBrowser:!0});try{for await(let l of(await n.chat.completions.create({model:s,stream:!0,messages:[{role:"user",content:e}]})))console.log(l),l.choices[0].delta.content&&t(l.choices[0].delta.content)}catch(e){d.ZP.error("Error occurred while generating model response. Please try again. Error: ".concat(e))}}var eM=e=>{let{accessToken:t,token:s,userRole:n,userID:a}=e,[o,i]=(0,r.useState)(""),[c,d]=(0,r.useState)(""),[m,h]=(0,r.useState)([]),[u,x]=(0,r.useState)(void 0),[p,j]=(0,r.useState)([]);(0,r.useEffect)(()=>{t&&s&&n&&a&&(async()=>{try{let e=await f(t,a,n);if(console.log("model_info:",e),(null==e?void 0:e.data.length)>0){let t=e.data.map(e=>({value:e.id,label:e.id}));console.log(t),j(t),x(e.data[0].id)}}catch(e){console.error("Error fetching model info:",e)}})()},[t,a,n]);let g=(e,t)=>{h(s=>{let l=s[s.length-1];return l&&l.role===e?[...s.slice(0,s.length-1),{role:e,content:l.content+t}]:[...s,{role:e,content:t}]})},y=async()=>{if(""!==c.trim()&&o&&s&&n&&a){h(e=>[...e,{role:"user",content:c}]);try{u&&await eF(c,e=>g("assistant",e),u,o)}catch(e){console.error("Error fetching model response",e),g("assistant","Error fetching model response")}d("")}};if(n&&"Admin Viewer"==n){let{Title:e,Paragraph:t}=ep.default;return(0,l.jsxs)("div",{children:[(0,l.jsx)(e,{level:1,children:"Access Denied"}),(0,l.jsx)(t,{children:"Ask your proxy admin for access to test models"})]})}return(0,l.jsx)("div",{style:{width:"100%",position:"relative"},children:(0,l.jsx)(O.Z,{className:"gap-2 p-8 h-[80vh] w-full mt-2",children:(0,l.jsx)(Q.Z,{children:(0,l.jsxs)(eZ.Z,{children:[(0,l.jsxs)(e_.Z,{children:[(0,l.jsx)(ew.Z,{children:"Chat"}),(0,l.jsx)(ew.Z,{children:"API Reference"})]}),(0,l.jsxs)(eb.Z,{children:[(0,l.jsxs)(ek.Z,{children:[(0,l.jsx)("div",{className:"sm:max-w-2xl",children:(0,l.jsxs)(O.Z,{numItems:2,children:[(0,l.jsxs)(M.Z,{children:[(0,l.jsx)(D.Z,{children:"API Key"}),(0,l.jsx)(eI.Z,{placeholder:"Type API Key here",type:"password",onValueChange:i,value:o})]}),(0,l.jsxs)(M.Z,{className:"mx-2",children:[(0,l.jsx)(D.Z,{children:"Select Model:"}),(0,l.jsx)(B.default,{placeholder:"Select a Model",onChange:e=>{console.log("selected ".concat(e)),x(e)},options:p,style:{width:"200px"}})]})]})}),(0,l.jsxs)(el.Z,{className:"mt-5",style:{display:"block",maxHeight:"60vh",overflowY:"auto"},children:[(0,l.jsx)(ea.Z,{children:(0,l.jsx)(ei.Z,{children:(0,l.jsx)(en.Z,{})})}),(0,l.jsx)(er.Z,{children:m.map((e,t)=>(0,l.jsx)(ei.Z,{children:(0,l.jsx)(en.Z,{children:"".concat(e.role,": ").concat(e.content)})},t))})]}),(0,l.jsx)("div",{className:"mt-3",style:{position:"absolute",bottom:5,width:"95%"},children:(0,l.jsxs)("div",{className:"flex",children:[(0,l.jsx)(eI.Z,{type:"text",value:c,onChange:e=>d(e.target.value),placeholder:"Type your message..."}),(0,l.jsx)(R.Z,{onClick:y,className:"ml-2",children:"Send"})]})})]}),(0,l.jsx)(ek.Z,{children:(0,l.jsxs)(eZ.Z,{children:[(0,l.jsxs)(e_.Z,{children:[(0,l.jsx)(ew.Z,{children:"OpenAI Python SDK"}),(0,l.jsx)(ew.Z,{children:"LlamaIndex"}),(0,l.jsx)(ew.Z,{children:"Langchain Py"})]}),(0,l.jsxs)(eb.Z,{children:[(0,l.jsx)(ek.Z,{children:(0,l.jsx)(eE.Z,{language:"python",children:'\nimport openai\nclient = openai.OpenAI(\n api_key="your_api_key",\n base_url="http://0.0.0.0:4000" # proxy base url\n)\n\nresponse = client.chat.completions.create(\n model="gpt-3.5-turbo", # model to use from Models Tab\n messages = [\n {\n "role": "user",\n "content": "this is a test request, write a short poem"\n }\n ],\n extra_body={\n "metadata": {\n "generation_name": "ishaan-generation-openai-client",\n "generation_id": "openai-client-gen-id22",\n "trace_id": "openai-client-trace-id22",\n "trace_user_id": "openai-client-user-id2"\n }\n }\n)\n\nprint(response)\n '})}),(0,l.jsx)(ek.Z,{children:(0,l.jsx)(eE.Z,{language:"python",children:'\nimport os, dotenv\n\nfrom llama_index.llms import AzureOpenAI\nfrom llama_index.embeddings import AzureOpenAIEmbedding\nfrom llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n\nllm = AzureOpenAI(\n engine="azure-gpt-3.5", # model_name on litellm proxy\n temperature=0.0,\n azure_endpoint="http://0.0.0.0:4000", # litellm proxy endpoint\n api_key="sk-1234", # litellm proxy API Key\n api_version="2023-07-01-preview",\n)\n\nembed_model = AzureOpenAIEmbedding(\n deployment_name="azure-embedding-model",\n azure_endpoint="http://0.0.0.0:4000",\n api_key="sk-1234",\n api_version="2023-07-01-preview",\n)\n\n\ndocuments = SimpleDirectoryReader("llama_index_data").load_data()\nservice_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)\nindex = VectorStoreIndex.from_documents(documents, service_context=service_context)\n\nquery_engine = index.as_query_engine()\nresponse = query_engine.query("What did the author do growing up?")\nprint(response)\n\n '})}),(0,l.jsx)(ek.Z,{children:(0,l.jsx)(eE.Z,{language:"python",children:'\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n SystemMessagePromptTemplate,\n)\nfrom langchain.schema import HumanMessage, SystemMessage\n\nchat = ChatOpenAI(\n openai_api_base="http://0.0.0.0:8000",\n model = "gpt-3.5-turbo",\n temperature=0.1,\n extra_body={\n "metadata": {\n "generation_name": "ishaan-generation-langchain-client",\n "generation_id": "langchain-client-gen-id22",\n "trace_id": "langchain-client-trace-id22",\n "trace_user_id": "langchain-client-user-id2"\n }\n }\n)\n\nmessages = [\n SystemMessage(\n content="You are a helpful assistant that im using to make a test request to."\n ),\n HumanMessage(\n content="test from litellm. tell me why it\'s amazing in 1 sentence"\n ),\n]\nresponse = chat(messages)\n\nprint(response)\n\n '})})]})]})})]})]})})})})},eO=s(33509),eR=s(95781);let{Sider:eD}=eO.default;var eU=e=>{let{setPage:t,userRole:s,defaultSelectedKey:r}=e;return"Admin Viewer"==s?(0,l.jsx)(eO.default,{style:{minHeight:"100vh",maxWidth:"120px"},children:(0,l.jsx)(eD,{width:120,children:(0,l.jsxs)(eR.Z,{mode:"inline",defaultSelectedKeys:r||["4"],style:{height:"100%",borderRight:0},children:[(0,l.jsx)(eR.Z.Item,{onClick:()=>t("api-keys"),children:"API Keys"},"4"),(0,l.jsx)(eR.Z.Item,{onClick:()=>t("models"),children:"Models"},"2"),(0,l.jsx)(eR.Z.Item,{onClick:()=>t("llm-playground"),children:"Chat UI"},"3"),(0,l.jsx)(eR.Z.Item,{onClick:()=>t("usage"),children:"Usage"},"1")]})})}):(0,l.jsx)(eO.default,{style:{minHeight:"100vh",maxWidth:"100px"},children:(0,l.jsx)(eD,{width:100,children:(0,l.jsxs)(eR.Z,{mode:"inline",defaultSelectedKeys:r||["1"],style:{height:"100%",borderRight:0},children:[(0,l.jsx)(eR.Z.Item,{onClick:()=>t("api-keys"),children:(0,l.jsx)(D.Z,{children:"API Keys"})},"1"),(0,l.jsx)(eR.Z.Item,{onClick:()=>t("llm-playground"),children:(0,l.jsx)(D.Z,{children:"Test Key"})},"3"),"Admin"==s?(0,l.jsx)(eR.Z.Item,{onClick:()=>t("teams"),children:(0,l.jsx)(D.Z,{children:"Teams"})},"6"):null,(0,l.jsx)(eR.Z.Item,{onClick:()=>t("usage"),children:(0,l.jsx)(D.Z,{children:"Usage"})},"4"),"Admin"==s?(0,l.jsx)(eR.Z.Item,{onClick:()=>t("users"),children:(0,l.jsx)(D.Z,{children:"Users"})},"5"):null,(0,l.jsx)(eR.Z.Item,{onClick:()=>t("models"),children:(0,l.jsx)(D.Z,{children:"Models"})},"2"),"Admin"==s?(0,l.jsx)(eR.Z.Item,{onClick:()=>t("admin-panel"),children:(0,l.jsx)(D.Z,{children:"Admin"})},"7"):null]})})})},ez=e=>{let{accessToken:t,token:s,userRole:n,userID:a}=e,o=new Date,[i,c]=(0,r.useState)([]),[d,m]=(0,r.useState)([]),[h,u]=(0,r.useState)([]),[x,p]=(0,r.useState)([]),[j,g]=(0,r.useState)([]),[y,f]=(0,r.useState)([]),[w,v]=(0,r.useState)([]),A=new Date(o.getFullYear(),o.getMonth(),1),C=new Date(o.getFullYear(),o.getMonth()+1,0),T=P(A),I=P(C);function P(e){let t=e.getFullYear(),s=e.getMonth()+1,l=e.getDate();return"".concat(t,"-").concat(s<10?"0"+s:s,"-").concat(l<10?"0"+l:l)}return console.log("Start date is ".concat(T)),console.log("End date is ".concat(I)),(0,r.useEffect)(()=>{t&&s&&n&&a&&(async()=>{try{if(console.log("user role: ".concat(n)),"Admin"==n||"Admin Viewer"==n){let e=await k(t);c(e);let s=(await b(t)).map(e=>({key:(e.key_name||e.key_alias||e.api_key).substring(0,7),spend:e.total_spend}));m(s);let l=(await N(t)).map(e=>({key:e.model,spend:e.total_spend}));u(l);let r=await Z(t);console.log("teamSpend",r),g(r.daily_spend),f(r.teams),v(r.total_spend_per_team)}else"App Owner"==n&&await _(t,s,n,a,T,I).then(async e=>{if(console.log("result from spend logs call",e),"daily_spend"in e){let t=e.daily_spend;console.log("daily spend",t),c(t);let s=e.top_api_keys;m(s)}else{let s=(await S(t,function(e){let t=[];e.forEach(e=>{Object.entries(e).forEach(e=>{let[s,l]=e;"spend"!==s&&"startTime"!==s&&"models"!==s&&"users"!==s&&t.push({key:s,spend:l})})}),t.sort((e,t)=>Number(t.spend)-Number(e.spend));let s=t.slice(0,5).map(e=>e.key);return console.log("topKeys: ".concat(Object.keys(s[0]))),s}(e))).info.map(e=>({key:(e.key_name||e.key_alias||e.token).substring(0,7),spend:e.spend}));m(s),p(function(e){let t={};e.forEach(e=>{Object.entries(e.users).forEach(e=>{let[s,l]=e;""!==s&&null!=s&&"None"!=s&&(t[s]||(t[s]=0),t[s]+=l)})});let s=Object.entries(t).map(e=>{let[t,s]=e;return{user_id:t,spend:s}});s.sort((e,t)=>t.spend-e.spend);let l=s.slice(0,5);return console.log("topKeys: ".concat(Object.values(l[0]))),l}(e)),c(e)}})}catch(e){console.error("There was an error fetching the data",e)}})()},[t,s,n,a,T,I]),(0,l.jsx)("div",{style:{width:"100%"},className:"p-8",children:(0,l.jsxs)(eZ.Z,{children:[(0,l.jsxs)(e_.Z,{className:"mt-2",children:[(0,l.jsx)(ew.Z,{children:"All Up"}),(0,l.jsx)(ew.Z,{children:"Team Based Usage"})]}),(0,l.jsxs)(eb.Z,{children:[(0,l.jsx)(ek.Z,{children:(0,l.jsxs)(O.Z,{numItems:2,className:"gap-2 h-[75vh] w-full",children:[(0,l.jsx)(M.Z,{numColSpan:2,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Monthly Spend"}),(0,l.jsx)(X.Z,{data:i,index:"date",categories:["spend"],colors:["blue"],valueFormatter:e=>"$ ".concat(new Intl.NumberFormat("us").format(e).toString()),yAxisWidth:100,tickGap:5})]})}),(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Top API Keys"}),(0,l.jsx)(X.Z,{className:"mt-4 h-40",data:d,index:"key",categories:["spend"],colors:["blue"],yAxisWidth:80,tickGap:5,layout:"vertical",showXAxis:!1,showLegend:!1})]})}),(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Top Users"}),(0,l.jsx)(X.Z,{className:"mt-4 h-40",data:x,index:"user_id",categories:["spend"],colors:["blue"],yAxisWidth:200,layout:"vertical",showXAxis:!1,showLegend:!1})]})}),(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Top Models"}),(0,l.jsx)(X.Z,{className:"mt-4 h-40",data:h,index:"key",categories:["spend"],colors:["blue"],yAxisWidth:200,layout:"vertical",showXAxis:!1,showLegend:!1})]})})]})}),(0,l.jsx)(ek.Z,{children:(0,l.jsxs)(O.Z,{numItems:2,className:"gap-2 p-10 h-[75vh] w-full",children:[(0,l.jsx)(M.Z,{numColSpan:2,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Daily Spend Per Team"}),(0,l.jsx)(X.Z,{className:"h-72",data:j,index:"date",categories:y,yAxisWidth:30,stack:!0})]})}),(0,l.jsx)(M.Z,{numColSpan:2,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Total Spend Per Team"}),(0,l.jsx)(X.Z,{className:"h-72",data:w,index:"team_id",categories:["total_spend"],yAxisWidth:30})]})})]})})]})]})})},eB=()=>{let{Title:e,Paragraph:t}=ep.default,[s,a]=(0,r.useState)(""),[o,i]=(0,r.useState)(null),[d,m]=(0,r.useState)(null),[h,u]=(0,r.useState)(null),[x,p]=(0,r.useState)(!0),j=(0,n.useSearchParams)(),g=j.get("userID"),y=j.get("token"),[f,w]=(0,r.useState)("api-keys"),[Z,_]=(0,r.useState)(null);return(0,r.useEffect)(()=>{if(y){let e=(0,ex.o)(y);if(e){if(console.log("Decoded token:",e),console.log("Decoded key:",e.key),_(e.key),e.user_role){let t=function(e){if(!e)return"Undefined Role";switch(console.log("Received user role: ".concat(e.toLowerCase())),console.log("Received user role length: ".concat(e.toLowerCase().length)),e.toLowerCase()){case"app_owner":case"demo_app_owner":return"App Owner";case"app_admin":case"proxy_admin":return"Admin";case"proxy_admin_viewer":return"Admin Viewer";case"app_user":return"App User";default:return"Unknown Role"}}(e.user_role);console.log("Decoded user_role:",t),a(t),"Admin Viewer"==t&&w("usage")}else console.log("User role not defined");e.user_email?i(e.user_email):console.log("User Email is not set ".concat(e)),e.login_method?p("username_password"==e.login_method):console.log("User Email is not set ".concat(e))}}},[y]),(0,l.jsx)(r.Suspense,{fallback:(0,l.jsx)("div",{children:"Loading..."}),children:(0,l.jsxs)("div",{className:"flex flex-col min-h-screen",children:[(0,l.jsx)(c,{userID:g,userRole:s,userEmail:o,showSSOBanner:x}),(0,l.jsxs)("div",{className:"flex flex-1 overflow-auto",children:[(0,l.jsx)("div",{className:"mt-8",children:(0,l.jsx)(eU,{setPage:w,userRole:s,defaultSelectedKey:null})}),"api-keys"==f?(0,l.jsx)(ej,{userID:g,userRole:s,teams:d,keys:h,setUserRole:a,userEmail:o,setUserEmail:i,setTeams:m,setKeys:u}):"models"==f?(0,l.jsx)(ef,{userID:g,userRole:s,token:y,accessToken:Z}):"llm-playground"==f?(0,l.jsx)(eM,{userID:g,userRole:s,token:y,accessToken:Z}):"users"==f?(0,l.jsx)(eS,{userID:g,userRole:s,token:y,keys:h,accessToken:Z,setKeys:u}):"teams"==f?(0,l.jsx)(eC,{teams:d,setTeams:m,searchParams:j,accessToken:Z,userID:g,userRole:s}):"admin-panel"==f?(0,l.jsx)(eT,{setTeams:m,searchParams:j,accessToken:Z}):(0,l.jsx)(ez,{userID:g,userRole:s,token:y,accessToken:Z})]})]})})}}},function(e){e.O(0,[798,971,69,744],function(){return e(e.s=20661)}),_N_E=e.O()}]);
\ No newline at end of file
diff --git a/litellm/proxy/_experimental/out/index.html b/litellm/proxy/_experimental/out/index.html
index 30858c11d..e69de29bb 100644
--- a/litellm/proxy/_experimental/out/index.html
+++ b/litellm/proxy/_experimental/out/index.html
@@ -1 +0,0 @@
-🚅 LiteLLM
\ No newline at end of file
diff --git a/litellm/proxy/_experimental/out/index.txt b/litellm/proxy/_experimental/out/index.txt
index dbabf4615..e69de29bb 100644
--- a/litellm/proxy/_experimental/out/index.txt
+++ b/litellm/proxy/_experimental/out/index.txt
@@ -1,7 +0,0 @@
-2:I[77831,[],""]
-3:I[90177,["798","static/chunks/798-4baed68da0c5497d.js","931","static/chunks/app/page-37392d6753f8a3d0.js"],""]
-4:I[5613,[],""]
-5:I[31778,[],""]
-0:["L9N6TOWJaqSp22Vj96YE4",[[["",{"children":["__PAGE__",{}]},"$undefined","$undefined",true],["",{"children":["__PAGE__",{},["$L1",["$","$L2",null,{"propsForComponent":{"params":{}},"Component":"$3","isStaticGeneration":true}],null]]},[null,["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_c23dc8","children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"loading":"$undefined","loadingStyles":"$undefined","loadingScripts":"$undefined","hasLoading":false,"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[],"styles":null}]}]}],null]],[[["$","link","0",{"rel":"stylesheet","href":"/ui/_next/static/css/f8da5a6a5b29d249.css","precedence":"next","crossOrigin":""}]],"$L6"]]]]
-6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"🚅 LiteLLM"}],["$","meta","3",{"name":"description","content":"LiteLLM Proxy Admin UI"}],["$","link","4",{"rel":"icon","href":"/ui/favicon.ico","type":"image/x-icon","sizes":"16x16"}],["$","meta","5",{"name":"next-size-adjust"}]]
-1:null
diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml
index 963121c4a..d218fddb1 100644
--- a/litellm/proxy/_new_secret_config.yaml
+++ b/litellm/proxy/_new_secret_config.yaml
@@ -5,10 +5,15 @@ model_list:
api_key: my-fake-key
api_base: https://exampleopenaiendpoint-production.up.railway.app/
+litellm_settings:
+ max_budget: 600020
+ budget_duration: 30d
+
general_settings:
master_key: sk-1234
- proxy_batch_write_at: 5 # 👈 Frequency of batch writing logs to server (in seconds)
+ proxy_batch_write_at: 60 # 👈 Frequency of batch writing logs to server (in seconds)
enable_jwt_auth: True
+ alerting: ["slack"]
litellm_jwtauth:
admin_jwt_scope: "litellm_proxy_admin"
team_jwt_scope: "litellm_team"
diff --git a/litellm/proxy/auth/auth_checks.py b/litellm/proxy/auth/auth_checks.py
index 5246fb94d..e3b7df4e9 100644
--- a/litellm/proxy/auth/auth_checks.py
+++ b/litellm/proxy/auth/auth_checks.py
@@ -18,6 +18,7 @@ from litellm.proxy._types import (
from typing import Optional, Literal, Union
from litellm.proxy.utils import PrismaClient
from litellm.caching import DualCache
+import litellm
all_routes = LiteLLMRoutes.openai_routes.value + LiteLLMRoutes.management_routes.value
@@ -26,6 +27,7 @@ def common_checks(
request_body: dict,
team_object: LiteLLM_TeamTable,
end_user_object: Optional[LiteLLM_EndUserTable],
+ global_proxy_spend: Optional[float],
general_settings: dict,
route: str,
) -> bool:
@@ -37,6 +39,7 @@ def common_checks(
3. If team is in budget
4. If end_user ('user' passed to /chat/completions, /embeddings endpoint) is in budget
5. [OPTIONAL] If 'enforce_end_user' enabled - did developer pass in 'user' param for openai endpoints
+ 6. [OPTIONAL] If 'litellm.max_budget' is set (>0), is proxy under budget
"""
_model = request_body.get("model", None)
if team_object.blocked == True:
@@ -66,7 +69,7 @@ def common_checks(
end_user_budget = end_user_object.litellm_budget_table.max_budget
if end_user_budget is not None and end_user_object.spend > end_user_budget:
raise Exception(
- f"End User={end_user_object.user_id} over budget. Spend={end_user_object.spend}, Budget={end_user_budget}"
+ f"ExceededBudget: End User={end_user_object.user_id} over budget. Spend={end_user_object.spend}, Budget={end_user_budget}"
)
# 5. [OPTIONAL] If 'enforce_user_param' enabled - did developer pass in 'user' param for openai endpoints
if (
@@ -77,7 +80,12 @@ def common_checks(
raise Exception(
f"'user' param not passed in. 'enforce_user_param'={general_settings['enforce_user_param']}"
)
-
+ # 6. [OPTIONAL] If 'litellm.max_budget' is set (>0), is proxy under budget
+ if litellm.max_budget > 0 and global_proxy_spend is not None:
+ if global_proxy_spend > litellm.max_budget:
+ raise Exception(
+ f"ExceededBudget: LiteLLM Proxy has exceeded its budget. Current spend: {global_proxy_spend}; Max Budget: {litellm.max_budget}"
+ )
return True
diff --git a/litellm/proxy/auth/handle_jwt.py b/litellm/proxy/auth/handle_jwt.py
index 4689ffe7b..9758d52cc 100644
--- a/litellm/proxy/auth/handle_jwt.py
+++ b/litellm/proxy/auth/handle_jwt.py
@@ -114,7 +114,8 @@ class JWTHandler:
public_key: Optional[dict] = None
if len(keys) == 1:
- public_key = keys[0]
+ if kid is None or keys["kid"] == kid:
+ public_key = keys[0]
elif len(keys) > 1:
for key in keys:
if kid is not None and key["kid"] == kid:
diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py
index f888ce431..e440581d2 100644
--- a/litellm/proxy/proxy_server.py
+++ b/litellm/proxy/proxy_server.py
@@ -437,12 +437,49 @@ async def user_api_key_auth(
key=end_user_id, value=end_user_object
)
+ global_proxy_spend = None
+
+ if litellm.max_budget > 0: # user set proxy max budget
+ # check cache
+ global_proxy_spend = await user_api_key_cache.async_get_cache(
+ key="{}:spend".format(litellm_proxy_admin_name)
+ )
+ if global_proxy_spend is None and prisma_client is not None:
+ # get from db
+ sql_query = """SELECT SUM(spend) as total_spend FROM "MonthlyGlobalSpend";"""
+
+ response = await prisma_client.db.query_raw(query=sql_query)
+
+ global_proxy_spend = response[0]["total_spend"]
+
+ await user_api_key_cache.async_set_cache(
+ key="{}:spend".format(litellm_proxy_admin_name),
+ value=global_proxy_spend,
+ ttl=60,
+ )
+ if global_proxy_spend is not None:
+ user_info = {
+ "user_id": litellm_proxy_admin_name,
+ "max_budget": litellm.max_budget,
+ "spend": global_proxy_spend,
+ "user_email": "",
+ }
+ asyncio.create_task(
+ proxy_logging_obj.budget_alerts(
+ user_max_budget=litellm.max_budget,
+ user_current_spend=global_proxy_spend,
+ type="user_and_proxy_budget",
+ user_info=user_info,
+ )
+ )
+
# run through common checks
_ = common_checks(
request_body=request_data,
team_object=team_object,
end_user_object=end_user_object,
general_settings=general_settings,
+ global_proxy_spend=global_proxy_spend,
route=route,
)
# save user object in cache
@@ -656,17 +693,8 @@ async def user_api_key_auth(
)
# Check 2. If user_id for this token is in budget
- ## Check 2.1 If global proxy is in budget
- ## Check 2.2 [OPTIONAL - checked only if litellm.max_user_budget is not None] If 'user' passed in /chat/completions is in budget
if valid_token.user_id is not None:
- user_id_list = [valid_token.user_id, litellm_proxy_budget_name]
- if (
- litellm.max_user_budget is not None
- ): # Check if 'user' passed in /chat/completions is in budget, only checked if litellm.max_user_budget is set
- user_passed_to_chat_completions = request_data.get("user", None)
- if user_passed_to_chat_completions is not None:
- user_id_list.append(user_passed_to_chat_completions)
-
+ user_id_list = [valid_token.user_id]
for id in user_id_list:
value = user_api_key_cache.get_cache(key=id)
if value is not None:
@@ -675,13 +703,12 @@ async def user_api_key_auth(
user_id_information.append(value)
if user_id_information is None or (
isinstance(user_id_information, list)
- and len(user_id_information) < 2
+ and len(user_id_information) < 1
):
if prisma_client is not None:
user_id_information = await prisma_client.get_data(
user_id_list=[
valid_token.user_id,
- litellm_proxy_budget_name,
],
table_name="user",
query_type="find_all",
@@ -881,11 +908,54 @@ async def user_api_key_auth(
blocked=valid_token.team_blocked,
models=valid_token.team_models,
)
+
+ _end_user_object = None
+ if "user" in request_data:
+ _id = "end_user_id:{}".format(request_data["user"])
+ _end_user_object = await user_api_key_cache.async_get_cache(key=_id)
+ if _end_user_object is not None:
+ _end_user_object = LiteLLM_EndUserTable(**_end_user_object)
+
+ global_proxy_spend = None
+ if litellm.max_budget > 0: # user set proxy max budget
+ # check cache
+ global_proxy_spend = await user_api_key_cache.async_get_cache(
+ key="{}:spend".format(litellm_proxy_admin_name)
+ )
+ if global_proxy_spend is None:
+ # get from db
+ sql_query = """SELECT SUM(spend) as total_spend FROM "MonthlyGlobalSpend";"""
+
+ response = await prisma_client.db.query_raw(query=sql_query)
+
+ global_proxy_spend = response[0]["total_spend"]
+ await user_api_key_cache.async_set_cache(
+ key="{}:spend".format(litellm_proxy_admin_name),
+ value=global_proxy_spend,
+ ttl=60,
+ )
+
+ if global_proxy_spend is not None:
+ user_info = {
+ "user_id": litellm_proxy_admin_name,
+ "max_budget": litellm.max_budget,
+ "spend": global_proxy_spend,
+ "user_email": "",
+ }
+ asyncio.create_task(
+ proxy_logging_obj.budget_alerts(
+ user_max_budget=litellm.max_budget,
+ user_current_spend=global_proxy_spend,
+ type="user_and_proxy_budget",
+ user_info=user_info,
+ )
+ )
_ = common_checks(
request_body=request_data,
team_object=_team_obj,
- end_user_object=None,
+ end_user_object=_end_user_object,
general_settings=general_settings,
+ global_proxy_spend=global_proxy_spend,
route=route,
)
# Token passed all checks
@@ -1553,7 +1623,7 @@ async def update_cache(
async def _update_user_cache():
## UPDATE CACHE FOR USER ID + GLOBAL PROXY
- user_ids = [user_id, litellm_proxy_budget_name, end_user_id]
+ user_ids = [user_id]
try:
for _id in user_ids:
# Fetch the existing cost for the given user
@@ -1594,14 +1664,26 @@ async def update_cache(
user_api_key_cache.set_cache(
key=_id, value=existing_spend_obj.json()
)
+ ## UPDATE GLOBAL PROXY ##
+ global_proxy_spend = await user_api_key_cache.async_get_cache(
+ key="{}:spend".format(litellm_proxy_admin_name)
+ )
+ if global_proxy_spend is None:
+ await user_api_key_cache.async_set_cache(
+ key="{}:spend".format(litellm_proxy_admin_name), value=response_cost
+ )
+ elif response_cost is not None and global_proxy_spend is not None:
+ increment = global_proxy_spend + response_cost
+ await user_api_key_cache.async_set_cache(
+ key="{}:spend".format(litellm_proxy_admin_name), value=increment
+ )
except Exception as e:
verbose_proxy_logger.debug(
f"An error occurred updating user cache: {str(e)}\n\n{traceback.format_exc()}"
)
async def _update_end_user_cache():
- ## UPDATE CACHE FOR USER ID + GLOBAL PROXY
- _id = end_user_id
+ _id = "end_user_id:{}".format(end_user_id)
try:
# Fetch the existing cost for the given user
existing_spend_obj = await user_api_key_cache.async_get_cache(key=_id)
@@ -1609,14 +1691,14 @@ async def update_cache(
# if user does not exist in LiteLLM_UserTable, create a new user
existing_spend = 0
max_user_budget = None
- if litellm.max_user_budget is not None:
- max_user_budget = litellm.max_user_budget
+ if litellm.max_end_user_budget is not None:
+ max_end_user_budget = litellm.max_end_user_budget
existing_spend_obj = LiteLLM_EndUserTable(
user_id=_id,
spend=0,
blocked=False,
litellm_budget_table=LiteLLM_BudgetTable(
- max_budget=max_user_budget
+ max_budget=max_end_user_budget
),
)
verbose_proxy_logger.debug(
@@ -2909,6 +2991,11 @@ def model_list(
dependencies=[Depends(user_api_key_auth)],
tags=["completions"],
)
+@router.post(
+ "/openai/deployments/{model:path}/completions",
+ dependencies=[Depends(user_api_key_auth)],
+ tags=["completions"],
+)
async def completion(
request: Request,
fastapi_response: Response,
@@ -4049,7 +4136,6 @@ async def generate_key_fn(
)
_budget_id = getattr(_budget, "budget_id", None)
data_json = data.json() # type: ignore
-
# if we get max_budget passed to /key/generate, then use it as key_max_budget. Since generate_key_helper_fn is used to make new users
if "max_budget" in data_json:
data_json["key_max_budget"] = data_json.pop("max_budget", None)
@@ -4108,6 +4194,13 @@ async def update_key_fn(request: Request, data: UpdateKeyRequest):
0,
): # models default to [], spend defaults to 0, we should not reset these values
non_default_values[k] = v
+
+ if "duration" in non_default_values:
+ duration = non_default_values.pop("duration")
+ duration_s = _duration_in_seconds(duration=duration)
+ expires = datetime.utcnow() + timedelta(seconds=duration_s)
+ non_default_values["expires"] = expires
+
response = await prisma_client.update_data(
token=key, data={**non_default_values, "token": key}
)
@@ -6051,7 +6144,7 @@ async def team_member_delete(
-D '{
"team_id": "45e3e396-ee08-4a61-a88e-16b3ce7e0849",
- "member": {"role": "user", "user_id": "krrish247652@berri.ai"}
+ "user_id": "krrish247652@berri.ai"
}'
```
"""
diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py
index f313a7985..f70d67aac 100644
--- a/litellm/proxy/utils.py
+++ b/litellm/proxy/utils.py
@@ -1941,9 +1941,9 @@ async def update_spend(
end_user_id,
response_cost,
) in prisma_client.end_user_list_transactons.items():
- max_user_budget = None
- if litellm.max_user_budget is not None:
- max_user_budget = litellm.max_user_budget
+ max_end_user_budget = None
+ if litellm.max_end_user_budget is not None:
+ max_end_user_budget = litellm.max_end_user_budget
new_user_obj = LiteLLM_EndUserTable(
user_id=end_user_id, spend=response_cost, blocked=False
)
diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py
index c27dba479..cb4ee84b5 100644
--- a/litellm/tests/test_completion.py
+++ b/litellm/tests/test_completion.py
@@ -195,6 +195,48 @@ def test_completion_claude_3_function_call():
pytest.fail(f"Error occurred: {e}")
+def test_parse_xml_params():
+ from litellm.llms.prompt_templates.factory import parse_xml_params
+
+ ## SCENARIO 1 ## - W/ ARRAY
+ xml_content = """return_list_of_str\n\n\n- apple
\n- banana
\n- orange
\n\n"""
+ json_schema = {
+ "properties": {
+ "value": {
+ "items": {"type": "string"},
+ "title": "Value",
+ "type": "array",
+ }
+ },
+ "required": ["value"],
+ "type": "object",
+ }
+ response = parse_xml_params(xml_content=xml_content, json_schema=json_schema)
+
+ print(f"response: {response}")
+ assert response["value"] == ["apple", "banana", "orange"]
+
+ ## SCENARIO 2 ## - W/OUT ARRAY
+ xml_content = """get_current_weather\n\nBoston, MA\nfahrenheit\n"""
+ json_schema = {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA",
+ },
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
+ },
+ "required": ["location"],
+ }
+
+ response = parse_xml_params(xml_content=xml_content, json_schema=json_schema)
+
+ print(f"response: {response}")
+ assert response["location"] == "Boston, MA"
+ assert response["unit"] == "fahrenheit"
+
+
def test_completion_claude_3_multi_turn_conversations():
litellm.set_verbose = True
litellm.modify_params = True
diff --git a/litellm/tests/test_key_generate_prisma.py b/litellm/tests/test_key_generate_prisma.py
index 103b344f5..8430aa7fd 100644
--- a/litellm/tests/test_key_generate_prisma.py
+++ b/litellm/tests/test_key_generate_prisma.py
@@ -324,7 +324,7 @@ def test_call_with_end_user_over_budget(prisma_client):
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
- setattr(litellm, "max_user_budget", 0.00001)
+ setattr(litellm, "max_end_user_budget", 0.00001)
try:
async def test():
@@ -378,7 +378,9 @@ def test_call_with_end_user_over_budget(prisma_client):
"user_api_key_user_id": user,
},
"proxy_server_request": {
- "user": user,
+ "body": {
+ "user": user,
+ }
},
},
"response_cost": 10,
@@ -407,18 +409,20 @@ def test_call_with_proxy_over_budget(prisma_client):
litellm_proxy_budget_name = f"litellm-proxy-budget-{time.time()}"
setattr(
litellm.proxy.proxy_server,
- "litellm_proxy_budget_name",
+ "litellm_proxy_admin_name",
litellm_proxy_budget_name,
)
+ setattr(litellm, "max_budget", 0.00001)
+ from litellm.proxy.proxy_server import user_api_key_cache
+
+ user_api_key_cache.set_cache(
+ key="{}:spend".format(litellm_proxy_budget_name), value=0
+ )
+ setattr(litellm.proxy.proxy_server, "user_api_key_cache", user_api_key_cache)
try:
async def test():
await litellm.proxy.proxy_server.prisma_client.connect()
- ## CREATE PROXY + USER BUDGET ##
- request = NewUserRequest(
- max_budget=0.00001, user_id=litellm_proxy_budget_name
- )
- await new_user(request)
request = NewUserRequest()
key = await new_user(request)
print(key)
@@ -470,6 +474,7 @@ def test_call_with_proxy_over_budget(prisma_client):
start_time=datetime.now(),
end_time=datetime.now(),
)
+
await asyncio.sleep(5)
# use generated key to auth in
result = await user_api_key_auth(request=request, api_key=bearer_token)
@@ -571,9 +576,17 @@ def test_call_with_proxy_over_budget_stream(prisma_client):
litellm_proxy_budget_name = f"litellm-proxy-budget-{time.time()}"
setattr(
litellm.proxy.proxy_server,
- "litellm_proxy_budget_name",
+ "litellm_proxy_admin_name",
litellm_proxy_budget_name,
)
+ setattr(litellm, "max_budget", 0.00001)
+ from litellm.proxy.proxy_server import user_api_key_cache
+
+ user_api_key_cache.set_cache(
+ key="{}:spend".format(litellm_proxy_budget_name), value=0
+ )
+ setattr(litellm.proxy.proxy_server, "user_api_key_cache", user_api_key_cache)
+
from litellm._logging import verbose_proxy_logger
import logging
@@ -584,10 +597,10 @@ def test_call_with_proxy_over_budget_stream(prisma_client):
async def test():
await litellm.proxy.proxy_server.prisma_client.connect()
## CREATE PROXY + USER BUDGET ##
- request = NewUserRequest(
- max_budget=0.00001, user_id=litellm_proxy_budget_name
- )
- await new_user(request)
+ # request = NewUserRequest(
+ # max_budget=0.00001, user_id=litellm_proxy_budget_name
+ # )
+ # await new_user(request)
request = NewUserRequest()
key = await new_user(request)
print(key)
diff --git a/litellm/tests/test_llmonitor_integration.py b/litellm/tests/test_llmonitor_integration.py
deleted file mode 100644
index e88995f3b..000000000
--- a/litellm/tests/test_llmonitor_integration.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# #### What this tests ####
-# # This tests if logging to the llmonitor integration actually works
-# # Adds the parent directory to the system path
-# import sys
-# import os
-
-# sys.path.insert(0, os.path.abspath("../.."))
-
-# from litellm import completion, embedding
-# import litellm
-
-# litellm.success_callback = ["llmonitor"]
-# litellm.failure_callback = ["llmonitor"]
-
-# litellm.set_verbose = True
-
-
-# def test_chat_openai():
-# try:
-# response = completion(
-# model="gpt-3.5-turbo",
-# messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}],
-# user="ishaan_from_litellm"
-# )
-
-# print(response)
-
-# except Exception as e:
-# print(e)
-
-
-# def test_embedding_openai():
-# try:
-# response = embedding(model="text-embedding-ada-002", input=["test"])
-# # Add any assertions here to check the response
-# print(f"response: {str(response)[:50]}")
-# except Exception as e:
-# print(e)
-
-
-# test_chat_openai()
-# # test_embedding_openai()
-
-
-# def test_llmonitor_logging_function_calling():
-# function1 = [
-# {
-# "name": "get_current_weather",
-# "description": "Get the current weather in a given location",
-# "parameters": {
-# "type": "object",
-# "properties": {
-# "location": {
-# "type": "string",
-# "description": "The city and state, e.g. San Francisco, CA",
-# },
-# "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
-# },
-# "required": ["location"],
-# },
-# }
-# ]
-# try:
-# response = completion(model="gpt-3.5-turbo",
-# messages=[{
-# "role": "user",
-# "content": "what's the weather in boston"
-# }],
-# temperature=0.1,
-# functions=function1,
-# )
-# print(response)
-# except Exception as e:
-# print(e)
-
-# # test_llmonitor_logging_function_calling()
diff --git a/litellm/tests/test_lunary.py b/litellm/tests/test_lunary.py
new file mode 100644
index 000000000..cbf9364af
--- /dev/null
+++ b/litellm/tests/test_lunary.py
@@ -0,0 +1,85 @@
+import sys
+import os
+import io
+
+sys.path.insert(0, os.path.abspath("../.."))
+
+from litellm import completion
+import litellm
+
+litellm.failure_callback = ["lunary"]
+litellm.success_callback = ["lunary"]
+litellm.set_verbose = True
+
+
+def test_lunary_logging():
+ try:
+ response = completion(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "what llm are u"}],
+ max_tokens=10,
+ temperature=0.2,
+ user="test-user",
+ )
+ print(response)
+ except Exception as e:
+ print(e)
+
+
+# test_lunary_logging()
+
+
+def test_lunary_template():
+ import lunary
+
+ try:
+ template = lunary.render_template("test-template", {"question": "Hello!"})
+ response = completion(**template)
+ print(response)
+ except Exception as e:
+ print(e)
+
+
+# test_lunary_template()
+
+
+def test_lunary_logging_with_metadata():
+ try:
+ response = completion(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "what llm are u"}],
+ max_tokens=10,
+ temperature=0.2,
+ metadata={
+ "run_name": "litellmRUN",
+ "project_name": "litellm-completion",
+ },
+ )
+ print(response)
+ except Exception as e:
+ print(e)
+
+
+# test_lunary_logging_with_metadata()
+
+
+def test_lunary_logging_with_streaming_and_metadata():
+ try:
+ response = completion(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "what llm are u"}],
+ max_tokens=10,
+ temperature=0.2,
+ metadata={
+ "run_name": "litellmRUN",
+ "project_name": "litellm-completion",
+ },
+ stream=True,
+ )
+ for chunk in response:
+ continue
+ except Exception as e:
+ print(e)
+
+
+# test_lunary_logging_with_streaming_and_metadata()
diff --git a/litellm/utils.py b/litellm/utils.py
index 0001ba760..3ec882f0f 100644
--- a/litellm/utils.py
+++ b/litellm/utils.py
@@ -59,7 +59,7 @@ from .integrations.helicone import HeliconeLogger
from .integrations.aispend import AISpendLogger
from .integrations.berrispend import BerriSpendLogger
from .integrations.supabase import Supabase
-from .integrations.llmonitor import LLMonitorLogger
+from .integrations.lunary import LunaryLogger
from .integrations.prompt_layer import PromptLayerLogger
from .integrations.langsmith import LangsmithLogger
from .integrations.weights_biases import WeightsBiasesLogger
@@ -129,7 +129,7 @@ dynamoLogger = None
s3Logger = None
genericAPILogger = None
clickHouseLogger = None
-llmonitorLogger = None
+lunaryLogger = None
aispendLogger = None
berrispendLogger = None
supabaseClient = None
@@ -882,7 +882,7 @@ class CallTypes(Enum):
# Logging function -> log the exact model details + what's being sent | Non-BlockingP
class Logging:
- global supabaseClient, liteDebuggerClient, promptLayerLogger, weightsBiasesLogger, langsmithLogger, capture_exception, add_breadcrumb, llmonitorLogger
+ global supabaseClient, liteDebuggerClient, promptLayerLogger, weightsBiasesLogger, langsmithLogger, capture_exception, add_breadcrumb, lunaryLogger
def __init__(
self,
@@ -1429,27 +1429,37 @@ class Logging:
end_time=end_time,
print_verbose=print_verbose,
)
- if callback == "llmonitor":
- print_verbose("reaches llmonitor for logging!")
+ if callback == "lunary":
+ print_verbose("reaches lunary for logging!")
model = self.model
+ kwargs = self.model_call_details
- input = self.model_call_details.get(
- "messages", self.model_call_details.get("input", None)
+ input = kwargs.get(
+ "messages", kwargs.get("input", None)
)
- # if contains input, it's 'embedding', otherwise 'llm'
type = (
"embed"
if self.call_type == CallTypes.embedding.value
else "llm"
)
- llmonitorLogger.log_event(
+ # this only logs streaming once, complete_streaming_response exists i.e when stream ends
+ if self.stream:
+ if "complete_streaming_response" not in kwargs:
+ break
+ else:
+ result = kwargs["complete_streaming_response"]
+
+ lunaryLogger.log_event(
type=type,
+ kwargs=kwargs,
event="end",
model=model,
input=input,
- user_id=self.model_call_details.get("user", "default"),
+ user_id=kwargs.get("user", None),
+ #user_props=self.model_call_details.get("user_props", None),
+ extra=kwargs.get("optional_params", {}),
response_obj=result,
start_time=start_time,
end_time=end_time,
@@ -2041,8 +2051,8 @@ class Logging:
call_type=self.call_type,
stream=self.stream,
)
- elif callback == "llmonitor":
- print_verbose("reaches llmonitor for logging error!")
+ elif callback == "lunary":
+ print_verbose("reaches lunary for logging error!")
model = self.model
@@ -2054,7 +2064,9 @@ class Logging:
else "llm"
)
- llmonitorLogger.log_event(
+
+
+ lunaryLogger.log_event(
type=_type,
event="error",
user_id=self.model_call_details.get("user", "default"),
@@ -6166,7 +6178,9 @@ def validate_environment(model: Optional[str] = None) -> dict:
def set_callbacks(callback_list, function_id=None):
- global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, athinaLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger, dynamoLogger, s3Logger, dataDogLogger, prometheusLogger
+
+ global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, athinaLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, lunaryLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger, dynamoLogger, s3Logger, dataDogLogger, prometheusLogger
+
try:
for callback in callback_list:
print_verbose(f"callback: {callback}")
@@ -6226,8 +6240,8 @@ def set_callbacks(callback_list, function_id=None):
print_verbose("Initialized Athina Logger")
elif callback == "helicone":
heliconeLogger = HeliconeLogger()
- elif callback == "llmonitor":
- llmonitorLogger = LLMonitorLogger()
+ elif callback == "lunary":
+ lunaryLogger = LunaryLogger()
elif callback == "promptlayer":
promptLayerLogger = PromptLayerLogger()
elif callback == "langfuse":
@@ -6270,7 +6284,7 @@ def set_callbacks(callback_list, function_id=None):
# NOTE: DEPRECATING this in favor of using failure_handler() in Logging:
def handle_failure(exception, traceback_exception, start_time, end_time, args, kwargs):
- global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
+ global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, lunaryLogger
try:
# print_verbose(f"handle_failure args: {args}")
# print_verbose(f"handle_failure kwargs: {kwargs}")
diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json
index d24d5a8ee..d3eb93d72 100644
--- a/model_prices_and_context_window.json
+++ b/model_prices_and_context_window.json
@@ -1503,7 +1503,7 @@
"litellm_provider": "bedrock",
"mode": "chat"
},
- "mistral.mixtral-8x7b-instruct": {
+ "mistral.mixtral-8x7b-instruct-v0:1": {
"max_tokens": 8191,
"max_input_tokens": 32000,
"max_output_tokens": 8191,
@@ -1512,7 +1512,7 @@
"litellm_provider": "bedrock",
"mode": "chat"
},
- "bedrock/us-west-2/mistral.mixtral-8x7b-instruct": {
+ "bedrock/us-west-2/mistral.mixtral-8x7b-instruct-v0:1": {
"max_tokens": 8191,
"max_input_tokens": 32000,
"max_output_tokens": 8191,
diff --git a/proxy_server_config.yaml b/proxy_server_config.yaml
index 30033b28b..089c1e95c 100644
--- a/proxy_server_config.yaml
+++ b/proxy_server_config.yaml
@@ -45,8 +45,8 @@ model_list:
litellm_settings:
drop_params: True
- max_budget: 100
- budget_duration: 30d
+ # max_budget: 100
+ # budget_duration: 30d
num_retries: 5
request_timeout: 600
telemetry: False
diff --git a/pyproject.toml b/pyproject.toml
index f950c246d..8e328412d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
-version = "1.34.14"
+version = "1.34.17"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT"
@@ -80,7 +80,7 @@ requires = ["poetry-core", "wheel"]
build-backend = "poetry.core.masonry.api"
[tool.commitizen]
-version = "1.34.14"
+version = "1.34.17"
version_files = [
"pyproject.toml:^version"
]
diff --git a/tests/test_keys.py b/tests/test_keys.py
index 4a563cc18..33a5f2650 100644
--- a/tests/test_keys.py
+++ b/tests/test_keys.py
@@ -65,7 +65,7 @@ async def update_key(session, get_key):
"Authorization": f"Bearer sk-1234",
"Content-Type": "application/json",
}
- data = {"key": get_key, "models": ["gpt-4"]}
+ data = {"key": get_key, "models": ["gpt-4"], "duration": "120s"}
async with session.post(url, headers=headers, json=data) as response:
status = response.status
diff --git a/tests/test_openai_endpoints.py b/tests/test_openai_endpoints.py
index 432f2f1ab..9535b4842 100644
--- a/tests/test_openai_endpoints.py
+++ b/tests/test_openai_endpoints.py
@@ -2,7 +2,8 @@
## Tests /chat/completions by generating a key and then making a chat completions request
import pytest
import asyncio
-import aiohttp
+import aiohttp, openai
+from openai import OpenAI
async def generate_key(session):
@@ -114,14 +115,14 @@ async def completion(session, key):
async with session.post(url, headers=headers, json=data) as response:
status = response.status
- response_text = await response.text()
-
- print(response_text)
- print()
if status != 200:
raise Exception(f"Request did not return a 200 status code: {status}")
+ response = await response.json()
+
+ return response
+
@pytest.mark.asyncio
async def test_completion():
@@ -137,7 +138,17 @@ async def test_completion():
await completion(session=session, key=key)
key_gen = await new_user(session=session)
key_2 = key_gen["key"]
- await completion(session=session, key=key_2)
+ # response = await completion(session=session, key=key_2)
+
+ ## validate openai format ##
+ client = OpenAI(api_key=key_2, base_url="http://0.0.0.0:4000")
+
+ client.completions.create(
+ model="gpt-4",
+ prompt="Say this is a test",
+ max_tokens=7,
+ temperature=0,
+ )
async def embeddings(session, key):
diff --git a/ui/litellm-dashboard/out/404.html b/ui/litellm-dashboard/out/404.html
index 4f087e491..e69de29bb 100644
--- a/ui/litellm-dashboard/out/404.html
+++ b/ui/litellm-dashboard/out/404.html
@@ -1 +0,0 @@
-404: This page could not be found.🚅 LiteLLM404
This page could not be found.
\ No newline at end of file
diff --git a/ui/litellm-dashboard/out/_next/static/DptMjzo5xd96cx0b56k4u/_buildManifest.js b/ui/litellm-dashboard/out/_next/static/DptMjzo5xd96cx0b56k4u/_buildManifest.js
new file mode 100644
index 000000000..f779caa02
--- /dev/null
+++ b/ui/litellm-dashboard/out/_next/static/DptMjzo5xd96cx0b56k4u/_buildManifest.js
@@ -0,0 +1 @@
+self.__BUILD_MANIFEST={__rewrites:{afterFiles:[],beforeFiles:[],fallback:[]},"/_error":["static/chunks/pages/_error-d6107f1aac0c574c.js"],sortedPages:["/_app","/_error"]},self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB();
\ No newline at end of file
diff --git a/ui/litellm-dashboard/out/_next/static/DptMjzo5xd96cx0b56k4u/_ssgManifest.js b/ui/litellm-dashboard/out/_next/static/DptMjzo5xd96cx0b56k4u/_ssgManifest.js
new file mode 100644
index 000000000..5b3ff592f
--- /dev/null
+++ b/ui/litellm-dashboard/out/_next/static/DptMjzo5xd96cx0b56k4u/_ssgManifest.js
@@ -0,0 +1 @@
+self.__SSG_MANIFEST=new Set([]);self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB()
\ No newline at end of file
diff --git a/ui/litellm-dashboard/out/_next/static/chunks/app/page-a5a04da2a9356785.js b/ui/litellm-dashboard/out/_next/static/chunks/app/page-a5a04da2a9356785.js
new file mode 100644
index 000000000..77f6db469
--- /dev/null
+++ b/ui/litellm-dashboard/out/_next/static/chunks/app/page-a5a04da2a9356785.js
@@ -0,0 +1 @@
+(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[931],{20661:function(e,t,s){Promise.resolve().then(s.bind(s,90177))},90177:function(e,t,s){"use strict";s.r(t),s.d(t,{default:function(){return eB}});var l=s(3827),r=s(64090),n=s(47907),a=s(8792),o=s(40491),i=s(65270),c=e=>{let{userID:t,userRole:s,userEmail:r,showSSOBanner:n}=e;console.log("User ID:",t),console.log("userEmail:",r);let c=[{key:"1",label:(0,l.jsxs)(l.Fragment,{children:[(0,l.jsxs)("p",{children:["Role: ",s]}),(0,l.jsxs)("p",{children:["ID: ",t]})]})}];return(0,l.jsxs)("nav",{className:"left-0 right-0 top-0 flex justify-between items-center h-12 mb-4",children:[(0,l.jsx)("div",{className:"text-left my-2 absolute top-0 left-0",children:(0,l.jsx)("div",{className:"flex flex-col items-center",children:(0,l.jsx)(a.default,{href:"/",children:(0,l.jsx)("button",{className:"text-gray-800 text-2xl py-1 rounded text-center",children:(0,l.jsx)("img",{src:"/get_image",width:200,height:200,alt:"LiteLLM Brand",className:"mr-2"})})})})}),(0,l.jsxs)("div",{className:"text-right mx-4 my-2 absolute top-0 right-0 flex items-center justify-end space-x-2",children:[n?(0,l.jsx)("a",{href:"https://docs.litellm.ai/docs/proxy/ui#setup-ssoauth-for-ui",target:"_blank",className:"mr-2"}):null,(0,l.jsx)("div",{style:{border:"1px solid #391085",padding:"6px",borderRadius:"8px"},children:(0,l.jsx)(o.Z,{menu:{items:c},children:(0,l.jsx)(i.Z,{children:r})})})]})]})},d=s(80588);let m=async(e,t,s)=>{try{if(console.log("Form Values in keyCreateCall:",s),s.description&&(s.metadata||(s.metadata={}),s.metadata.description=s.description,delete s.description,s.metadata=JSON.stringify(s.metadata)),s.metadata){console.log("formValues.metadata:",s.metadata);try{s.metadata=JSON.parse(s.metadata)}catch(e){throw d.ZP.error("Failed to parse metadata: "+e),Error("Failed to parse metadata: "+e)}}console.log("Form Values after check:",s);let l=await fetch("/key/generate",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({user_id:t,...s})});if(!l.ok){let e=await l.text();throw d.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let r=await l.json();return console.log("API Response:",r),r}catch(e){throw console.error("Failed to create key:",e),e}},h=async(e,t,s)=>{try{if(console.log("Form Values in keyCreateCall:",s),s.description&&(s.metadata||(s.metadata={}),s.metadata.description=s.description,delete s.description,s.metadata=JSON.stringify(s.metadata)),s.metadata){console.log("formValues.metadata:",s.metadata);try{s.metadata=JSON.parse(s.metadata)}catch(e){throw d.ZP.error("Failed to parse metadata: "+e),Error("Failed to parse metadata: "+e)}}console.log("Form Values after check:",s);let l=await fetch("/user/new",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({user_id:t,...s})});if(!l.ok){let e=await l.text();throw d.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let r=await l.json();return console.log("API Response:",r),r}catch(e){throw console.error("Failed to create key:",e),e}},u=async(e,t)=>{try{console.log("in keyDeleteCall:",t);let s=await fetch("/key/delete",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({keys:[t]})});if(!s.ok){let e=await s.text();throw d.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to create key:",e),e}},x=async(e,t)=>{try{console.log("in teamDeleteCall:",t);let s=await fetch("/team/delete",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({team_ids:[t]})});if(!s.ok){let e=await s.text();throw d.ZP.error("Failed to delete team: "+e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to delete key:",e),e}},p=async function(e,t,s){let l=arguments.length>3&&void 0!==arguments[3]&&arguments[3];try{let r="/user/info";"App Owner"==s&&t&&(r="".concat(r,"?user_id=").concat(t)),console.log("in userInfoCall viewAll=",l),l&&(r="".concat(r,"?view_all=true"));let n=await fetch(r,{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!n.ok){let e=await n.text();throw d.ZP.error(e),Error("Network response was not ok")}let a=await n.json();return console.log("API Response:",a),a}catch(e){throw console.error("Failed to create key:",e),e}},j=async e=>{try{let t=await fetch("/global/spend",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}return await t.json()}catch(e){throw console.error("Failed to create key:",e),e}},g=async(e,t,s)=>{try{let t=await fetch("/v2/model/info",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}return await t.json()}catch(e){throw console.error("Failed to create key:",e),e}},y=async(e,t,s)=>{try{let t=await fetch("/model/metrics",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}return await t.json()}catch(e){throw console.error("Failed to create key:",e),e}},f=async(e,t,s)=>{try{let t=await fetch("/models",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}return await t.json()}catch(e){throw console.error("Failed to create key:",e),e}},w=async(e,t)=>{try{let s="/global/spend/logs";console.log("in keySpendLogsCall:",s);let l=await fetch("".concat(s,"?api_key=").concat(t),{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!l.ok){let e=await l.text();throw d.ZP.error(e),Error("Network response was not ok")}let r=await l.json();return console.log(r),r}catch(e){throw console.error("Failed to create key:",e),e}},Z=async e=>{try{let t="/global/spend/teams";console.log("in teamSpendLogsCall:",t);let s=await fetch("".concat(t),{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!s.ok){let e=await s.text();throw d.ZP.error(e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to create key:",e),e}},_=async(e,t,s,l,r,n)=>{try{console.log("user role in spend logs call: ".concat(s));let t="/spend/logs";t="App Owner"==s?"".concat(t,"?user_id=").concat(l,"&start_date=").concat(r,"&end_date=").concat(n):"".concat(t,"?start_date=").concat(r,"&end_date=").concat(n);let a=await fetch(t,{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!a.ok){let e=await a.text();throw d.ZP.error(e),Error("Network response was not ok")}let o=await a.json();return console.log(o),o}catch(e){throw console.error("Failed to create key:",e),e}},k=async e=>{try{let t=await fetch("/global/spend/logs",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}let s=await t.json();return console.log(s),s}catch(e){throw console.error("Failed to create key:",e),e}},b=async e=>{try{let t=await fetch("/global/spend/keys?limit=5",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}let s=await t.json();return console.log(s),s}catch(e){throw console.error("Failed to create key:",e),e}},v=async(e,t)=>{try{t&&JSON.stringify({api_key:t});let s={method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}};t&&(s.body=JSON.stringify({api_key:t}));let l=await fetch("/global/spend/end_users",s);if(!l.ok){let e=await l.text();throw d.ZP.error(e),Error("Network response was not ok")}let r=await l.json();return console.log(r),r}catch(e){throw console.error("Failed to create key:",e),e}},N=async e=>{try{let t=await fetch("/global/spend/models?limit=5",{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!t.ok){let e=await t.text();throw d.ZP.error(e),Error("Network response was not ok")}let s=await t.json();return console.log(s),s}catch(e){throw console.error("Failed to create key:",e),e}},S=async(e,t)=>{try{let s=await fetch("/v2/key/info",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({keys:t})});if(!s.ok){let e=await s.text();throw d.ZP.error(e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to create key:",e),e}},A=async(e,t,s,l)=>{try{let r=await fetch("/user/request_model",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({models:[t],user_id:s,justification:l})});if(!r.ok){let e=await r.text();throw d.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let n=await r.json();return console.log(n),n}catch(e){throw console.error("Failed to create key:",e),e}},C=async e=>{try{let t="/user/get_requests";console.log("in userGetRequesedtModelsCall:",t);let s=await fetch(t,{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!s.ok){let e=await s.text();throw d.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to get requested models:",e),e}},T=async(e,t)=>{try{let s="/user/get_users?role=".concat(t);console.log("in userGetAllUsersCall:",s);let l=await fetch(s,{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!l.ok){let e=await l.text();throw d.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let r=await l.json();return console.log(r),r}catch(e){throw console.error("Failed to get requested models:",e),e}},I=async(e,t)=>{try{console.log("Form Values in teamCreateCall:",t);let s=await fetch("/team/new",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({...t})});if(!s.ok){let e=await s.text();throw d.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let l=await s.json();return console.log("API Response:",l),l}catch(e){throw console.error("Failed to create key:",e),e}},P=async(e,t,s)=>{try{console.log("Form Values in teamMemberAddCall:",s);let l=await fetch("/team/member_add",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({team_id:t,member:s})});if(!l.ok){let e=await l.text();throw d.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let r=await l.json();return console.log("API Response:",r),r}catch(e){throw console.error("Failed to create key:",e),e}},E=async(e,t)=>{try{console.log("Form Values in userUpdateUserCall:",t);let s=await fetch("/user/update",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({user_role:"proxy_admin_viewer",...t})});if(!s.ok){let e=await s.text();throw d.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let l=await s.json();return console.log("API Response:",l),l}catch(e){throw console.error("Failed to create key:",e),e}},F=async(e,t)=>{try{let s=await fetch("/global/predict/spend/logs",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({data:t})});if(!s.ok){let e=await s.text();throw d.ZP.error(e),Error("Network response was not ok")}let l=await s.json();return console.log(l),l}catch(e){throw console.error("Failed to create key:",e),e}};var M=s(10384),O=s(46453),R=s(2179),D=s(71801),U=s(42440),z=s(55217),B=s(50670),L=s(12143),K=s(77171),V=s(42539),q=s(88707),J=s(1861);let{Option:G}=B.default;var W=e=>{let{userID:t,team:s,userRole:n,accessToken:a,data:o,setData:i}=e,[c]=L.Z.useForm(),[h,u]=(0,r.useState)(!1),[x,p]=(0,r.useState)(null),[j,g]=(0,r.useState)(null),[y,w]=(0,r.useState)([]),Z=()=>{u(!1),c.resetFields()},_=()=>{u(!1),p(null),c.resetFields()};(0,r.useEffect)(()=>{(async()=>{try{if(null===t||null===n)return;if(null!==a){let e=(await f(a,t,n)).data.map(e=>e.id);console.log("available_model_names:",e),w(e)}}catch(e){console.error("Error fetching user models:",e)}})()},[a,t,n]);let k=async e=>{try{d.ZP.info("Making API Call"),u(!0);let s=await m(a,t,e);console.log("key create Response:",s),i(e=>e?[...e,s]:[s]),p(s.key),g(s.soft_budget),d.ZP.success("API Key Created"),c.resetFields(),localStorage.removeItem("userData"+t)}catch(e){console.error("Error creating the key:",e)}},b=e=>{if(e.includes("all_models")){let e=s?s.models:y;c.setFieldsValue({models:e})}};return(0,l.jsxs)("div",{children:[(0,l.jsx)(R.Z,{className:"mx-auto",onClick:()=>u(!0),children:"+ Create New Key"}),(0,l.jsx)(K.Z,{title:"Create Key",visible:h,width:800,footer:null,onOk:Z,onCancel:_,children:(0,l.jsxs)(L.Z,{form:c,onFinish:k,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:["App Owner"===n||"Admin"===n?(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)(L.Z.Item,{label:"Key Name",name:"key_alias",rules:[{required:!0,message:"Please input a key name"}],help:"required",children:(0,l.jsx)(V.Z,{})}),(0,l.jsx)(L.Z.Item,{label:"Team ID",name:"team_id",initialValue:s?s.team_id:null,valuePropName:"team_id",className:"mt-8",children:(0,l.jsx)(V.Z,{value:s?s.team_alias:"",disabled:!0})}),(0,l.jsx)(L.Z.Item,{label:"Models",name:"models",children:(0,l.jsxs)(B.default,{mode:"multiple",placeholder:"Select models",style:{width:"100%"},onChange:e=>b(e),children:[(0,l.jsx)(G,{value:"all_models",children:"All Models"},"all_models"),s&&s.models?s.models.map(e=>(0,l.jsx)(G,{value:e,children:e},e)):y.map(e=>(0,l.jsx)(G,{value:e,children:e},e))]})}),(0,l.jsx)(L.Z.Item,{className:"mt-8",label:"Max Budget (USD)",name:"max_budget",help:"Budget cannot exceed team max budget: $".concat((null==s?void 0:s.max_budget)!==null&&(null==s?void 0:s.max_budget)!==void 0?null==s?void 0:s.max_budget:"unlimited"),rules:[{validator:async(e,t)=>{if(t&&s&&null!==s.max_budget&&t>s.max_budget)throw Error("Budget cannot exceed team max budget: $".concat(s.max_budget))}}],children:(0,l.jsx)(q.Z,{step:.01,precision:2,width:200})}),(0,l.jsx)(L.Z.Item,{className:"mt-8",label:"Reset Budget",name:"budget_duration",help:"Team Reset Budget: ".concat((null==s?void 0:s.budget_duration)!==null&&(null==s?void 0:s.budget_duration)!==void 0?null==s?void 0:s.budget_duration:"None"),children:(0,l.jsxs)(B.default,{defaultValue:null,placeholder:"n/a",children:[(0,l.jsx)(B.default.Option,{value:"24h",children:"daily"}),(0,l.jsx)(B.default.Option,{value:"30d",children:"monthly"})]})}),(0,l.jsx)(L.Z.Item,{className:"mt-8",label:"Tokens per minute Limit (TPM)",name:"tpm_limit",help:"TPM cannot exceed team TPM limit: ".concat((null==s?void 0:s.tpm_limit)!==null&&(null==s?void 0:s.tpm_limit)!==void 0?null==s?void 0:s.tpm_limit:"unlimited"),rules:[{validator:async(e,t)=>{if(t&&s&&null!==s.tpm_limit&&t>s.tpm_limit)throw Error("TPM limit cannot exceed team TPM limit: ".concat(s.tpm_limit))}}],children:(0,l.jsx)(q.Z,{step:1,width:400})}),(0,l.jsx)(L.Z.Item,{className:"mt-8",label:"Requests per minute Limit (RPM)",name:"rpm_limit",help:"RPM cannot exceed team RPM limit: ".concat((null==s?void 0:s.rpm_limit)!==null&&(null==s?void 0:s.rpm_limit)!==void 0?null==s?void 0:s.rpm_limit:"unlimited"),rules:[{validator:async(e,t)=>{if(t&&s&&null!==s.rpm_limit&&t>s.rpm_limit)throw Error("RPM limit cannot exceed team RPM limit: ".concat(s.rpm_limit))}}],children:(0,l.jsx)(q.Z,{step:1,width:400})}),(0,l.jsx)(L.Z.Item,{label:"Expire Key (eg: 30s, 30h, 30d)",name:"duration",className:"mt-8",children:(0,l.jsx)(V.Z,{})}),(0,l.jsx)(L.Z.Item,{label:"Metadata",name:"metadata",children:(0,l.jsx)(V.Z.TextArea,{rows:4,placeholder:"Enter metadata as JSON"})})]}):(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)(L.Z.Item,{label:"Key Name",name:"key_alias",children:(0,l.jsx)(V.Z,{})}),(0,l.jsx)(L.Z.Item,{label:"Team ID (Contact Group)",name:"team_id",children:(0,l.jsx)(V.Z,{placeholder:"default team (create a new team)"})}),(0,l.jsx)(L.Z.Item,{label:"Description",name:"description",children:(0,l.jsx)(V.Z.TextArea,{placeholder:"Enter description",rows:4})})]}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(J.ZP,{htmlType:"submit",children:"Create Key"})})]})}),x&&(0,l.jsx)(K.Z,{visible:h,onOk:Z,onCancel:_,footer:null,children:(0,l.jsxs)(O.Z,{numItems:1,className:"gap-2 w-full",children:[(0,l.jsx)(U.Z,{children:"Save your Key"}),(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsxs)("p",{children:["Please save this secret key somewhere safe and accessible. For security reasons, ",(0,l.jsx)("b",{children:"you will not be able to view it again"})," ","through your LiteLLM account. If you lose this secret key, you will need to generate a new one."]})}),(0,l.jsx)(M.Z,{numColSpan:1,children:null!=x?(0,l.jsxs)("div",{children:[(0,l.jsx)(D.Z,{className:"mt-3",children:"API Key:"}),(0,l.jsx)("div",{style:{background:"#f8f8f8",padding:"10px",borderRadius:"5px",marginBottom:"10px"},children:(0,l.jsx)("pre",{style:{wordWrap:"break-word",whiteSpace:"normal"},children:x})}),(0,l.jsx)(z.CopyToClipboard,{text:x,onCopy:()=>{d.ZP.success("API Key copied to clipboard")},children:(0,l.jsx)(R.Z,{className:"mt-3",children:"Copy API Key"})})]}):(0,l.jsx)(D.Z,{children:"Key being created, this might take 30s"})})]})})]})},$=s(9454),Y=s(33393),H=s(5),X=s(9853),Q=s(13810),ee=s(39290),et=s(66952),es=s(61244),el=s(10827),er=s(3851),en=s(2044),ea=s(64167),eo=s(74480),ei=s(7178),ec=e=>{let{userID:t,accessToken:s,selectedTeam:n,data:a,setData:o}=e,[i,c]=(0,r.useState)(!1),[d,m]=(0,r.useState)(!1),[h,x]=(0,r.useState)(null),[p,j]=r.useState(null),[g,y]=(0,r.useState)(null),[f,Z]=(0,r.useState)(null),[_,k]=(0,r.useState)(""),b=async e=>{try{if(null==s||null==e)return;console.log("accessToken: ".concat(s,"; token: ").concat(e.token));let t=await w(s,e.token);console.log("Response:",t),Z(t);let l=await F(s,t);console.log("Response2:",l);let r=[...t,...l.response];Z(r),k(l.predicted_spend),console.log("Combined Data:",r)}catch(e){console.error("There was an error fetching the data",e)}};(0,r.useEffect)(()=>{b(g)},[g]);let v=async e=>{null!=a&&(x(e),localStorage.removeItem("userData"+t),m(!0))},N=async()=>{if(null!=h&&null!=a){try{await u(s,h);let e=a.filter(e=>e.token!==h);o(e)}catch(e){console.error("Error deleting the key:",e)}m(!1),x(null)}};if(null!=a)return console.log("RERENDER TRIGGERED"),(0,l.jsx)("div",{children:(0,l.jsxs)(Q.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh] mb-4 mt-2",children:[(0,l.jsxs)(el.Z,{className:"mt-5",children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"Key Alias"}),(0,l.jsx)(eo.Z,{children:"Secret Key"}),(0,l.jsx)(eo.Z,{children:"Spend (USD)"}),(0,l.jsx)(eo.Z,{children:"Budget (USD)"}),(0,l.jsx)(eo.Z,{children:"Models"}),(0,l.jsx)(eo.Z,{children:"TPM / RPM Limits"})]})}),(0,l.jsx)(er.Z,{children:a.map(e=>(console.log(e),"litellm-dashboard"===e.team_id||n&&e.team_id!=n.team_id)?null:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{style:{maxWidth:"2px",whiteSpace:"pre-wrap",overflow:"hidden"},children:null!=e.key_alias?(0,l.jsx)(D.Z,{children:e.key_alias}):(0,l.jsx)(D.Z,{children:"Not Set"})}),(0,l.jsx)(en.Z,{children:(0,l.jsx)(D.Z,{children:e.key_name})}),(0,l.jsx)(en.Z,{children:(0,l.jsx)(D.Z,{children:(()=>{try{return parseFloat(e.spend).toFixed(4)}catch(t){return e.spend}})()})}),(0,l.jsx)(en.Z,{children:null!=e.max_budget?(0,l.jsx)(D.Z,{children:e.max_budget}):(0,l.jsx)(D.Z,{children:"Unlimited"})}),(0,l.jsx)(en.Z,{children:Array.isArray(e.models)?(0,l.jsx)("div",{style:{display:"flex",flexDirection:"column"},children:0===e.models.length?(0,l.jsx)(l.Fragment,{children:n&&n.models&&n.models.length>0?n.models.map((e,t)=>(0,l.jsx)(H.Z,{size:"xs",className:"mb-1",color:"blue",children:(0,l.jsx)(D.Z,{children:e.length>30?"".concat(e.slice(0,30),"..."):e})},t)):(0,l.jsx)(H.Z,{size:"xs",className:"mb-1",color:"purple",children:(0,l.jsx)(D.Z,{children:"All Models"})})}):e.models.map((e,t)=>(0,l.jsx)(H.Z,{size:"xs",className:"mb-1",color:"blue",children:(0,l.jsx)(D.Z,{children:e.length>30?"".concat(e.slice(0,30),"..."):e})},t))}):null}),(0,l.jsx)(en.Z,{children:(0,l.jsxs)(D.Z,{children:["TPM: ",e.tpm_limit?e.tpm_limit:"Unlimited"," ",(0,l.jsx)("br",{})," RPM:"," ",e.rpm_limit?e.rpm_limit:"Unlimited"]})}),(0,l.jsxs)(en.Z,{children:[(0,l.jsx)(es.Z,{onClick:()=>{y(e),j(e.id)},icon:$.Z,size:"sm"}),(0,l.jsx)(ee.Z,{open:null!==p,onClose:()=>{j(null),y(null)},children:(0,l.jsx)(et.Z,{children:g&&(0,l.jsxs)(l.Fragment,{children:[(0,l.jsxs)("div",{className:"grid grid-cols-1 gap-6 sm:grid-cols-2 lg:grid-cols-3",children:[(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)("p",{className:"text-tremor-default font-medium text-tremor-content dark:text-dark-tremor-content",children:"Spend"}),(0,l.jsx)("div",{className:"mt-2 flex items-baseline space-x-2.5",children:(0,l.jsx)("p",{className:"text-tremor font-semibold text-tremor-content-strong dark:text-dark-tremor-content-strong",children:(()=>{try{return parseFloat(g.spend).toFixed(4)}catch(e){return g.spend}})()})})]}),(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)("p",{className:"text-tremor-default font-medium text-tremor-content dark:text-dark-tremor-content",children:"Budget"}),(0,l.jsx)("div",{className:"mt-2 flex items-baseline space-x-2.5",children:(0,l.jsx)("p",{className:"text-tremor font-semibold text-tremor-content-strong dark:text-dark-tremor-content-strong",children:null!=g.max_budget?(0,l.jsx)(l.Fragment,{children:g.max_budget}):(0,l.jsx)(l.Fragment,{children:"Unlimited"})})})]},e.name),(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)("p",{className:"text-tremor-default font-medium text-tremor-content dark:text-dark-tremor-content",children:"Expires"}),(0,l.jsx)("div",{className:"mt-2 flex items-baseline space-x-2.5",children:(0,l.jsx)("p",{className:"text-tremor-default font-small text-tremor-content-strong dark:text-dark-tremor-content-strong",children:null!=g.expires?(0,l.jsx)(l.Fragment,{children:new Date(g.expires).toLocaleString(void 0,{day:"numeric",month:"long",year:"numeric",hour:"numeric",minute:"numeric",second:"numeric"})}):(0,l.jsx)(l.Fragment,{children:"Never"})})})]},e.name)]}),(0,l.jsx)(Q.Z,{className:"mt-6 mb-6",children:f&&(0,l.jsx)(X.Z,{className:"mt-6",data:f,colors:["blue","amber"],index:"date",categories:["spend","predicted_spend"],yAxisWidth:80})}),(0,l.jsx)(U.Z,{children:"Metadata"}),(0,l.jsx)(D.Z,{children:JSON.stringify(g.metadata)}),(0,l.jsx)(R.Z,{variant:"light",className:"mx-auto flex items-center",onClick:()=>{j(null),y(null)},children:"Close"})]})})}),(0,l.jsx)(es.Z,{onClick:()=>v(e.token),icon:Y.Z,size:"sm"})]})]},e.token))})]}),d&&(0,l.jsx)("div",{className:"fixed z-10 inset-0 overflow-y-auto",children:(0,l.jsxs)("div",{className:"flex items-end justify-center min-h-screen pt-4 px-4 pb-20 text-center sm:block sm:p-0",children:[(0,l.jsx)("div",{className:"fixed inset-0 transition-opacity","aria-hidden":"true",children:(0,l.jsx)("div",{className:"absolute inset-0 bg-gray-500 opacity-75"})}),(0,l.jsx)("span",{className:"hidden sm:inline-block sm:align-middle sm:h-screen","aria-hidden":"true",children:""}),(0,l.jsxs)("div",{className:"inline-block align-bottom bg-white rounded-lg text-left overflow-hidden shadow-xl transform transition-all sm:my-8 sm:align-middle sm:max-w-lg sm:w-full",children:[(0,l.jsx)("div",{className:"bg-white px-4 pt-5 pb-4 sm:p-6 sm:pb-4",children:(0,l.jsx)("div",{className:"sm:flex sm:items-start",children:(0,l.jsxs)("div",{className:"mt-3 text-center sm:mt-0 sm:ml-4 sm:text-left",children:[(0,l.jsx)("h3",{className:"text-lg leading-6 font-medium text-gray-900",children:"Delete Key"}),(0,l.jsx)("div",{className:"mt-2",children:(0,l.jsx)("p",{className:"text-sm text-gray-500",children:"Are you sure you want to delete this key ?"})})]})})}),(0,l.jsxs)("div",{className:"bg-gray-50 px-4 py-3 sm:px-6 sm:flex sm:flex-row-reverse",children:[(0,l.jsx)(R.Z,{onClick:N,color:"red",className:"ml-2",children:"Delete"}),(0,l.jsx)(R.Z,{onClick:()=>{m(!1),x(null)},children:"Cancel"})]})]})]})})]})})},ed=e=>{let{userID:t,userSpendData:s,userRole:n,accessToken:a}=e,[o,i]=(0,r.useState)(null==s?void 0:s.spend),[c,d]=(0,r.useState)((null==s?void 0:s.max_budget)||null);(0,r.useEffect)(()=>{(async()=>{if("Admin"===n)try{let e=await j(a);i(e.spend),d(e.max_budget||null)}catch(e){console.error("Error fetching global spend data:",e)}})()},[n,a]);let m=void 0!==o?o.toFixed(5):null;return(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)("p",{className:"text-tremor-default text-tremor-content dark:text-dark-tremor-content",children:"Total Spend"}),(0,l.jsxs)("p",{className:"text-3xl text-tremor-content-strong dark:text-dark-tremor-content-strong font-semibold",children:["$",m]})]})},em=s(55901),eh=s(27166),eu=e=>{let{teams:t,setSelectedTeam:s}=e,[n,a]=(0,r.useState)("");return(0,l.jsxs)("div",{className:"mt-5 mb-5",children:[(0,l.jsx)(U.Z,{children:"Select Team"}),(0,l.jsx)(D.Z,{children:"If you belong to multiple teams, this setting controls which team is used by default when creating new API Keys."}),t&&t.length>0?(0,l.jsx)(em.Z,{defaultValue:"0",children:t.map((e,t)=>(0,l.jsx)(eh.Z,{value:String(t),onClick:()=>s(e),children:e.team_alias},t))}):(0,l.jsxs)(D.Z,{children:["No team created. ",(0,l.jsx)("b",{children:"Defaulting to personal account."})]})]})},ex=s(37963),ep=s(36083);console.log("isLocal:",!1);var ej=e=>{let{userID:t,userRole:s,teams:a,keys:o,setUserRole:i,userEmail:c,setUserEmail:d,setTeams:m,setKeys:h}=e,[u,x]=(0,r.useState)(null),g=(0,n.useSearchParams)();g.get("viewSpend"),(0,n.useRouter)();let y=g.get("token"),[w,Z]=(0,r.useState)(null),[_,k]=(0,r.useState)([]),[b,v]=(0,r.useState)(a?a[0]:null);if(window.addEventListener("beforeunload",function(){sessionStorage.clear()}),(0,r.useEffect)(()=>{if(y){let e=(0,ex.o)(y);if(e){if(console.log("Decoded token:",e),console.log("Decoded key:",e.key),Z(e.key),e.user_role){let t=function(e){if(!e)return"Undefined Role";switch(console.log("Received user role: ".concat(e)),e.toLowerCase()){case"app_owner":case"demo_app_owner":return"App Owner";case"app_admin":case"proxy_admin":return"Admin";case"proxy_admin_viewer":return"Admin Viewer";case"app_user":return"App User";default:return"Unknown Role"}}(e.user_role);console.log("Decoded user_role:",t),i(t)}else console.log("User role not defined");e.user_email?d(e.user_email):console.log("User Email is not set ".concat(e))}}if(t&&w&&s&&!o&&!u){let e=sessionStorage.getItem("userModels"+t);e?k(JSON.parse(e)):(async()=>{try{let e=await p(w,t,s);if(console.log("received teams in user dashboard: ".concat(Object.keys(e),"; team values: ").concat(Object.entries(e.teams))),"Admin"==s){let e=await j(w);x(e),console.log("globalSpend:",e)}else x(e.user_info);h(e.keys),m(e.teams),v(e.teams?e.teams[0]:null),sessionStorage.setItem("userData"+t,JSON.stringify(e.keys)),sessionStorage.setItem("userSpendData"+t,JSON.stringify(e.user_info));let l=(await f(w,t,s)).data.map(e=>e.id);console.log("available_model_names:",l),k(l),console.log("userModels:",_),sessionStorage.setItem("userModels"+t,JSON.stringify(l))}catch(e){console.error("There was an error fetching the data",e)}})()}},[t,y,w,o,s]),null==t||null==y){let e="/sso/key/generate";return console.log("Full URL:",e),window.location.href=e,null}if(null==w)return null;if(null==s&&i("App Owner"),s&&"Admin Viewer"==s){let{Title:e,Paragraph:t}=ep.default;return(0,l.jsxs)("div",{children:[(0,l.jsx)(e,{level:1,children:"Access Denied"}),(0,l.jsx)(t,{children:"Ask your proxy admin for access to create keys"})]})}return console.log("inside user dashboard, selected team",b),(0,l.jsx)("div",{className:"w-full mx-4",children:(0,l.jsx)(O.Z,{numItems:1,className:"gap-2 p-8 h-[75vh] w-full mt-2",children:(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(ed,{userID:t,userSpendData:u,userRole:s,accessToken:w}),(0,l.jsx)(ec,{userID:t,accessToken:w,selectedTeam:b||null,data:o,setData:h}),(0,l.jsx)(W,{userID:t,team:b||null,userRole:s,accessToken:w,data:o,setData:h},b?b.team_id:null),(0,l.jsx)(eu,{teams:a,setSelectedTeam:v})]})})})};let{Option:eg}=B.default;var ey=e=>{let{userModels:t,accessToken:s,userID:n}=e,[a]=L.Z.useForm(),[o,i]=(0,r.useState)(!1),c=async e=>{try{d.ZP.info("Requesting access");let{selectedModel:t,accessReason:l}=e;await A(s,t,n,l),i(!0)}catch(e){console.error("Error requesting access:",e)}};return(0,l.jsxs)("div",{children:[(0,l.jsx)(R.Z,{size:"xs",onClick:()=>i(!0),children:"Request Access"}),(0,l.jsx)(K.Z,{title:"Request Access",visible:o,width:800,footer:null,onOk:()=>{i(!1),a.resetFields()},onCancel:()=>{i(!1),a.resetFields()},children:(0,l.jsxs)(L.Z,{form:a,onFinish:c,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:[(0,l.jsx)(L.Z.Item,{label:"Select Model",name:"selectedModel",children:(0,l.jsx)(B.default,{placeholder:"Select model",style:{width:"100%"},children:t.map(e=>(0,l.jsx)(eg,{value:e,children:e},e))})}),(0,l.jsx)(L.Z.Item,{label:"Reason for Access",name:"accessReason",children:(0,l.jsx)(V.Z.TextArea,{rows:4,placeholder:"Enter reason for access"})}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(R.Z,{children:"Request Access"})})]})})]})},ef=e=>{let{accessToken:t,token:s,userRole:n,userID:a}=e,[o,i]=(0,r.useState)({data:[]}),[c,d]=(0,r.useState)([]),[m,h]=(0,r.useState)([]);if((0,r.useEffect)(()=>{if(!t||!s||!n||!a)return;let e=async()=>{try{let e=await g(t,a,n);console.log("Model data response:",e.data),i(e);let s=await y(t,a,n);if(console.log("Model metrics response:",s),d(s),"Admin"===n&&t){let e=await C(t);console.log("Pending Requests:",m),h(e.requests||[])}}catch(e){console.error("There was an error fetching the model data",e)}};t&&s&&n&&a&&e()},[t,s,n,a]),!o||!t||!s||!n||!a)return(0,l.jsx)("div",{children:"Loading..."});let u=[];for(let e=0;e(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{children:(0,l.jsx)(U.Z,{children:e.model_name})}),(0,l.jsx)(en.Z,{children:e.provider}),"Admin"===n&&(0,l.jsx)(en.Z,{children:e.api_base}),(0,l.jsx)(en.Z,{children:e.user_access?(0,l.jsx)(H.Z,{color:"green",children:"Yes"}):(0,l.jsx)(ey,{userModels:u,accessToken:t,userID:a})}),(0,l.jsx)(en.Z,{children:e.input_cost}),(0,l.jsx)(en.Z,{children:e.output_cost}),(0,l.jsx)(en.Z,{children:e.max_tokens})]},e.model_name))})]})}),(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Model Statistics (Number Requests)"}),(0,l.jsx)(X.Z,{data:c,index:"model",categories:["num_requests"],colors:["blue"],yAxisWidth:400,layout:"vertical",tickGap:5})]}),(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Model Statistics (Latency)"}),(0,l.jsx)(X.Z,{data:c,index:"model",categories:["avg_latency_seconds"],colors:["red"],yAxisWidth:400,layout:"vertical",tickGap:5})]})]})})},ew=s(92836),eZ=s(26734),e_=s(41608),ek=s(32126),eb=s(23682);let{Option:ev}=B.default;var eN=e=>{let{userID:t,accessToken:s}=e,[n]=L.Z.useForm(),[a,o]=(0,r.useState)(!1),[i,c]=(0,r.useState)(null),[m,u]=(0,r.useState)([]);(0,r.useEffect)(()=>{(async()=>{try{let e=await f(s,t,"any"),l=[];for(let t=0;t{o(!1),n.resetFields()},p=()=>{o(!1),c(null),n.resetFields()},j=async e=>{try{d.ZP.info("Making API Call"),o(!0),console.log("formValues in create user:",e);let l=await h(s,t,e);console.log("user create Response:",l),c(l.key),d.ZP.success("API user Created"),n.resetFields(),localStorage.removeItem("userData"+t)}catch(e){console.error("Error creating the user:",e)}};return(0,l.jsxs)("div",{children:[(0,l.jsx)(R.Z,{className:"mx-auto",onClick:()=>o(!0),children:"+ Create New User"}),(0,l.jsx)(K.Z,{title:"Create User",visible:a,width:800,footer:null,onOk:x,onCancel:p,children:(0,l.jsxs)(L.Z,{form:n,onFinish:j,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:[(0,l.jsx)(L.Z.Item,{label:"User ID",name:"user_id",children:(0,l.jsx)(V.Z,{placeholder:"Enter User ID"})}),(0,l.jsx)(L.Z.Item,{label:"Team ID",name:"team_id",children:(0,l.jsx)(V.Z,{placeholder:"ai_team"})}),(0,l.jsx)(L.Z.Item,{label:"Models",name:"models",children:(0,l.jsx)(B.default,{mode:"multiple",placeholder:"Select models",style:{width:"100%"},children:m.map(e=>(0,l.jsx)(ev,{value:e,children:e},e))})}),(0,l.jsx)(L.Z.Item,{label:"Max Budget (USD)",name:"max_budget",children:(0,l.jsx)(q.Z,{step:.01,precision:2,width:200})}),(0,l.jsx)(L.Z.Item,{label:"Tokens per minute Limit (TPM)",name:"tpm_limit",children:(0,l.jsx)(q.Z,{step:1,width:400})}),(0,l.jsx)(L.Z.Item,{label:"Requests per minute Limit (RPM)",name:"rpm_limit",children:(0,l.jsx)(q.Z,{step:1,width:400})}),(0,l.jsx)(L.Z.Item,{label:"Duration (eg: 30s, 30h, 30d)",name:"duration",children:(0,l.jsx)(V.Z,{})}),(0,l.jsx)(L.Z.Item,{label:"Metadata",name:"metadata",children:(0,l.jsx)(V.Z.TextArea,{rows:4,placeholder:"Enter metadata as JSON"})}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(J.ZP,{htmlType:"submit",children:"Create User"})})]})}),i&&(0,l.jsxs)(K.Z,{title:"Save Your User",visible:a,onOk:x,onCancel:p,footer:null,children:[(0,l.jsxs)("p",{children:["Please save this secret user somewhere safe and accessible. For security reasons, ",(0,l.jsx)("b",{children:"you will not be able to view it again"})," ","through your LiteLLM account. If you lose this secret user, you will need to generate a new one."]}),(0,l.jsx)("p",{children:null!=i?"API user: ".concat(i):"User being created, this might take 30s"})]})]})},eS=e=>{let{accessToken:t,token:s,keys:n,userRole:a,userID:o,setKeys:i}=e,[c,d]=(0,r.useState)(null),[m,h]=(0,r.useState)(null),[u,x]=(0,r.useState)(1);if((0,r.useEffect)(()=>{if(!t||!s||!a||!o)return;let e=async()=>{try{let e=await p(t,null,a,!0);console.log("user data response:",e),d(e)}catch(e){console.error("There was an error fetching the model data",e)}};t&&s&&a&&o&&!c&&e();let l=async()=>{try{let e=await v(t,null);console.log("user data response:",e),h(e)}catch(e){console.error("There was an error fetching the model data",e)}};a&&("Admin"==a||"Admin Viewer"==a)&&!m&&l()},[t,s,a,o]),!c||!t||!s||!a||!o)return(0,l.jsx)("div",{children:"Loading..."});let j=async e=>{try{let s=await v(t,e);console.log("user data response:",s),h(s)}catch(e){console.error("There was an error fetching the model data",e)}};return(0,l.jsx)("div",{style:{width:"100%"},children:(0,l.jsxs)(O.Z,{className:"gap-2 p-2 h-[75vh] w-full mt-8",children:[(0,l.jsx)(eN,{userID:o,accessToken:t}),(0,l.jsx)(Q.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh] mb-4",children:(0,l.jsxs)(eZ.Z,{children:[(0,l.jsxs)(e_.Z,{variant:"line",defaultValue:"1",children:[(0,l.jsx)(ew.Z,{value:"1",children:"Key Owners"}),(0,l.jsx)(ew.Z,{value:"2",children:"End-Users"})]}),(0,l.jsxs)(eb.Z,{children:[(0,l.jsx)(ek.Z,{children:(0,l.jsxs)(el.Z,{className:"mt-5",children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"User ID"}),(0,l.jsx)(eo.Z,{children:"User Role"}),(0,l.jsx)(eo.Z,{children:"User Models"}),(0,l.jsx)(eo.Z,{children:"User Spend ($ USD)"}),(0,l.jsx)(eo.Z,{children:"User Max Budget ($ USD)"})]})}),(0,l.jsx)(er.Z,{children:c.map(e=>(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{children:e.user_id}),(0,l.jsx)(en.Z,{children:e.user_role?e.user_role:"app_owner"}),(0,l.jsx)(en.Z,{children:e.models&&e.models.length>0?e.models:"All Models"}),(0,l.jsx)(en.Z,{children:e.spend?e.spend:0}),(0,l.jsx)(en.Z,{children:e.max_budget?e.max_budget:"Unlimited"})]},e.user_id))})]})}),(0,l.jsxs)(ek.Z,{children:[(0,l.jsxs)("div",{className:"flex items-center",children:[(0,l.jsx)("div",{className:"flex-1"}),(0,l.jsxs)("div",{className:"flex-1 flex justify-between items-center",children:[(0,l.jsx)(D.Z,{className:"w-1/4 mr-2 text-right",children:"Key"}),(0,l.jsx)(em.Z,{defaultValue:"1",className:"w-3/4",children:null==n?void 0:n.map((e,t)=>{if(e&&null!==e.key_name&&e.key_name.length>0)return(0,l.jsx)(eh.Z,{value:String(t),onClick:()=>j(e.token),children:e.key_name},t)})})]})]}),(0,l.jsxs)(el.Z,{children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"End User"}),(0,l.jsx)(eo.Z,{children:"Spend"}),(0,l.jsx)(eo.Z,{children:"Total Events"})]})}),(0,l.jsx)(er.Z,{children:null==m?void 0:m.map((e,t)=>(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{children:e.end_user}),(0,l.jsx)(en.Z,{children:e.total_spend}),(0,l.jsx)(en.Z,{children:e.total_events})]},t))})]})]})]})]})}),function(){if(!c)return null;let e=Math.ceil(c.length/25),t=Math.min(25*u,c.length);return(0,l.jsxs)("div",{className:"flex justify-between items-center",children:[(0,l.jsxs)("div",{children:["Showing ",(u-1)*25+1," – ",t," of ",c.length]}),(0,l.jsxs)("div",{className:"flex",children:[(0,l.jsx)("button",{className:"bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-l focus:outline-none",disabled:1===u,onClick:()=>x(u-1),children:"← Prev"}),(0,l.jsx)("button",{className:"bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-r focus:outline-none",disabled:u===e,onClick:()=>x(u+1),children:"Next →"})]})]})}()]})})},eA=s(98941),eC=e=>{let{teams:t,searchParams:s,accessToken:n,setTeams:a,userID:o,userRole:i}=e,[c]=L.Z.useForm(),[m]=L.Z.useForm(),{Title:h,Paragraph:u}=ep.default,[p,j]=(0,r.useState)(""),[g,y]=(0,r.useState)(t?t[0]:null),[w,Z]=(0,r.useState)(!1),[_,k]=(0,r.useState)(!1),[b,v]=(0,r.useState)([]),[N,S]=(0,r.useState)(!1),[A,C]=(0,r.useState)(null),T=async e=>{C(e),S(!0)},E=e=>{if(e.includes("all_models")){let e=b.filter(e=>"all"!==e);c.setFieldsValue({models:e})}},F=async()=>{if(null!=A&&null!=t&&null!=n){try{await x(n,A);let e=t.filter(e=>e.team_id!==A);a(e)}catch(e){console.error("Error deleting the team:",e)}S(!1),C(null)}};(0,r.useEffect)(()=>{(async()=>{try{if(null===o||null===i)return;if(null!==n){let e=(await f(n,o,i)).data.map(e=>e.id);console.log("available_model_names:",e),v(e)}}catch(e){console.error("Error fetching user models:",e)}})()},[n,o,i]);let U=async e=>{try{if(null!=n){d.ZP.info("Creating Team");let s=await I(n,e);null!==t?a([...t,s]):a([s]),console.log("response for team create call: ".concat(s)),d.ZP.success("Team created"),Z(!1)}}catch(e){console.error("Error creating the team:",e),d.ZP.error("Error creating the team: "+e)}},z=async e=>{try{if(null!=n&&null!=t){d.ZP.info("Adding Member");let s={role:"user",user_email:e.user_email,user_id:e.user_id},l=await P(n,g.team_id,s);console.log("response for team create call: ".concat(l.data));let r=t.findIndex(e=>(console.log("team.team_id=".concat(e.team_id,"; response.data.team_id=").concat(l.data.team_id)),e.team_id===l.data.team_id));if(console.log("foundIndex: ".concat(r)),-1!==r){let e=[...t];e[r]=l.data,a(e),y(l.data)}k(!1)}}catch(e){console.error("Error creating the team:",e)}};return console.log("received teams ".concat(t)),(0,l.jsx)("div",{className:"w-full mx-4",children:(0,l.jsxs)(O.Z,{numItems:1,className:"gap-2 p-8 h-[75vh] w-full mt-2",children:[(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(h,{level:4,children:"All Teams"}),(0,l.jsxs)(Q.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh]",children:[(0,l.jsxs)(el.Z,{children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"Team Name"}),(0,l.jsx)(eo.Z,{children:"Spend (USD)"}),(0,l.jsx)(eo.Z,{children:"Budget (USD)"}),(0,l.jsx)(eo.Z,{children:"Models"}),(0,l.jsx)(eo.Z,{children:"TPM / RPM Limits"})]})}),(0,l.jsx)(er.Z,{children:t&&t.length>0?t.map(e=>(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{style:{maxWidth:"4px",whiteSpace:"pre-wrap",overflow:"hidden"},children:e.team_alias}),(0,l.jsx)(en.Z,{style:{maxWidth:"4px",whiteSpace:"pre-wrap",overflow:"hidden"},children:e.spend}),(0,l.jsx)(en.Z,{style:{maxWidth:"4px",whiteSpace:"pre-wrap",overflow:"hidden"},children:e.max_budget?e.max_budget:"No limit"}),(0,l.jsx)(en.Z,{style:{maxWidth:"8-x",whiteSpace:"pre-wrap",overflow:"hidden"},children:Array.isArray(e.models)?(0,l.jsx)("div",{style:{display:"flex",flexDirection:"column"},children:0===e.models.length?(0,l.jsx)(H.Z,{size:"xs",className:"mb-1",color:"purple",children:(0,l.jsx)(D.Z,{children:"All Models"})}):e.models.map((e,t)=>(0,l.jsx)(H.Z,{size:"xs",className:"mb-1",color:"blue",children:(0,l.jsx)(D.Z,{children:e.length>30?"".concat(e.slice(0,30),"..."):e})},t))}):null}),(0,l.jsx)(en.Z,{style:{maxWidth:"4px",whiteSpace:"pre-wrap",overflow:"hidden"},children:(0,l.jsxs)(D.Z,{children:["TPM:"," ",e.tpm_limit?e.tpm_limit:"Unlimited"," ",(0,l.jsx)("br",{}),"RPM:"," ",e.rpm_limit?e.rpm_limit:"Unlimited"]})}),(0,l.jsxs)(en.Z,{children:[(0,l.jsx)(es.Z,{icon:eA.Z,size:"sm"}),(0,l.jsx)(es.Z,{onClick:()=>T(e.team_id),icon:Y.Z,size:"sm"})]})]},e.team_id)):null})]}),N&&(0,l.jsx)("div",{className:"fixed z-10 inset-0 overflow-y-auto",children:(0,l.jsxs)("div",{className:"flex items-end justify-center min-h-screen pt-4 px-4 pb-20 text-center sm:block sm:p-0",children:[(0,l.jsx)("div",{className:"fixed inset-0 transition-opacity","aria-hidden":"true",children:(0,l.jsx)("div",{className:"absolute inset-0 bg-gray-500 opacity-75"})}),(0,l.jsx)("span",{className:"hidden sm:inline-block sm:align-middle sm:h-screen","aria-hidden":"true",children:""}),(0,l.jsxs)("div",{className:"inline-block align-bottom bg-white rounded-lg text-left overflow-hidden shadow-xl transform transition-all sm:my-8 sm:align-middle sm:max-w-lg sm:w-full",children:[(0,l.jsx)("div",{className:"bg-white px-4 pt-5 pb-4 sm:p-6 sm:pb-4",children:(0,l.jsx)("div",{className:"sm:flex sm:items-start",children:(0,l.jsxs)("div",{className:"mt-3 text-center sm:mt-0 sm:ml-4 sm:text-left",children:[(0,l.jsx)("h3",{className:"text-lg leading-6 font-medium text-gray-900",children:"Delete Team"}),(0,l.jsx)("div",{className:"mt-2",children:(0,l.jsx)("p",{className:"text-sm text-gray-500",children:"Are you sure you want to delete this team ?"})})]})})}),(0,l.jsxs)("div",{className:"bg-gray-50 px-4 py-3 sm:px-6 sm:flex sm:flex-row-reverse",children:[(0,l.jsx)(R.Z,{onClick:F,color:"red",className:"ml-2",children:"Delete"}),(0,l.jsx)(R.Z,{onClick:()=>{S(!1),C(null)},children:"Cancel"})]})]})]})})]})]}),(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(R.Z,{className:"mx-auto",onClick:()=>Z(!0),children:"+ Create New Team"}),(0,l.jsx)(K.Z,{title:"Create Team",visible:w,width:800,footer:null,onOk:()=>{Z(!1),c.resetFields()},onCancel:()=>{Z(!1),c.resetFields()},children:(0,l.jsxs)(L.Z,{form:c,onFinish:U,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:[(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)(L.Z.Item,{label:"Team Name",name:"team_alias",rules:[{required:!0,message:"Please input a team name"}],children:(0,l.jsx)(V.Z,{})}),(0,l.jsx)(L.Z.Item,{label:"Models",name:"models",children:(0,l.jsxs)(B.default,{mode:"multiple",placeholder:"Select models",style:{width:"100%"},onChange:e=>E(e),children:[(0,l.jsx)(B.default.Option,{value:"all_models",children:"All Models"},"all_models"),b.map(e=>(0,l.jsx)(B.default.Option,{value:e,children:e},e))]})}),(0,l.jsx)(L.Z.Item,{label:"Max Budget (USD)",name:"max_budget",children:(0,l.jsx)(q.Z,{step:.01,precision:2,width:200})}),(0,l.jsx)(L.Z.Item,{label:"Tokens per minute Limit (TPM)",name:"tpm_limit",children:(0,l.jsx)(q.Z,{step:1,width:400})}),(0,l.jsx)(L.Z.Item,{label:"Requests per minute Limit (RPM)",name:"rpm_limit",children:(0,l.jsx)(q.Z,{step:1,width:400})})]}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(J.ZP,{htmlType:"submit",children:"Create Team"})})]})})]}),(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(h,{level:4,children:"Team Members"}),(0,l.jsx)(u,{children:"If you belong to multiple teams, this setting controls which teams members you see."}),t&&t.length>0?(0,l.jsx)(em.Z,{defaultValue:"0",children:t.map((e,t)=>(0,l.jsx)(eh.Z,{value:String(t),onClick:()=>{y(e)},children:e.team_alias},t))}):(0,l.jsxs)(u,{children:["No team created. ",(0,l.jsx)("b",{children:"Defaulting to personal account."})]})]}),(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsx)(Q.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh]",children:(0,l.jsxs)(el.Z,{children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"Member Name"}),(0,l.jsx)(eo.Z,{children:"Role"})]})}),(0,l.jsx)(er.Z,{children:g?g.members_with_roles.map((e,t)=>(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{children:e.user_email?e.user_email:e.user_id?e.user_id:null}),(0,l.jsx)(en.Z,{children:e.role})]},t)):null})]})})}),(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(R.Z,{className:"mx-auto mb-5",onClick:()=>k(!0),children:"+ Add member"}),(0,l.jsx)(K.Z,{title:"Add member",visible:_,width:800,footer:null,onOk:()=>{k(!1),m.resetFields()},onCancel:()=>{k(!1),m.resetFields()},children:(0,l.jsxs)(L.Z,{form:c,onFinish:z,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:[(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)(L.Z.Item,{label:"Email",name:"user_email",className:"mb-4",children:(0,l.jsx)(V.Z,{name:"user_email",className:"px-3 py-2 border rounded-md w-full"})}),(0,l.jsx)("div",{className:"text-center mb-4",children:"OR"}),(0,l.jsx)(L.Z.Item,{label:"User ID",name:"user_id",className:"mb-4",children:(0,l.jsx)(V.Z,{name:"user_id",className:"px-3 py-2 border rounded-md w-full"})})]}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(J.ZP,{htmlType:"submit",children:"Add member"})})]})})]})]})})},eT=e=>{let{searchParams:t,accessToken:s}=e,[n]=L.Z.useForm(),[a]=L.Z.useForm(),{Title:o,Paragraph:i}=ep.default,[c,m]=(0,r.useState)(""),[h,u]=(0,r.useState)(null),[x,p]=(0,r.useState)(!1);(0,r.useEffect)(()=>{(async()=>{if(null!=s){let e=[],t=await T(s,"proxy_admin_viewer");t.forEach(t=>{e.push({user_role:t.user_role,user_id:t.user_id,user_email:t.user_email})}),console.log("proxy viewers: ".concat(t));let l=await T(s,"proxy_admin");l.forEach(t=>{e.push({user_role:t.user_role,user_id:t.user_id,user_email:t.user_email})}),console.log("proxy admins: ".concat(l)),console.log("combinedList: ".concat(e)),u(e)}})()},[s]);let j=async e=>{try{if(null!=s&&null!=h){d.ZP.info("Making API Call"),e.user_email,e.user_id;let t=await E(s,e);console.log("response for team create call: ".concat(t));let l=h.findIndex(e=>(console.log("user.user_id=".concat(e.user_id,"; response.user_id=").concat(t.user_id)),e.user_id===t.user_id));console.log("foundIndex: ".concat(l)),-1==l&&(console.log("updates admin with new user"),h.push(t),u(h)),p(!1)}}catch(e){console.error("Error creating the key:",e)}};return console.log("admins: ".concat(null==h?void 0:h.length)),(0,l.jsxs)("div",{className:"w-full m-2 mt-2 p-8",children:[(0,l.jsx)(o,{level:4,children:"Restricted Access"}),(0,l.jsxs)(i,{children:["Add other people to just view spend. They cannot create keys, teams or grant users access to new models."," ",(0,l.jsx)("a",{href:"https://docs.litellm.ai/docs/proxy/ui#restrict-ui-access",children:"Requires SSO Setup"})]}),(0,l.jsxs)(O.Z,{numItems:1,className:"gap-2 p-2 w-full",children:[(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsx)(Q.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh]",children:(0,l.jsxs)(el.Z,{children:[(0,l.jsx)(ea.Z,{children:(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(eo.Z,{children:"Member Name"}),(0,l.jsx)(eo.Z,{children:"Role"})]})}),(0,l.jsx)(er.Z,{children:h?h.map((e,t)=>(0,l.jsxs)(ei.Z,{children:[(0,l.jsx)(en.Z,{children:e.user_email?e.user_email:e.user_id?e.user_id:null}),(0,l.jsx)(en.Z,{children:e.user_role})]},t)):null})]})})}),(0,l.jsxs)(M.Z,{numColSpan:1,children:[(0,l.jsx)(R.Z,{className:"mx-auto mb-5",onClick:()=>p(!0),children:"+ Add viewer"}),(0,l.jsx)(K.Z,{title:"Add viewer",visible:x,width:800,footer:null,onOk:()=>{p(!1),a.resetFields()},onCancel:()=>{p(!1),a.resetFields()},children:(0,l.jsxs)(L.Z,{form:n,onFinish:j,labelCol:{span:8},wrapperCol:{span:16},labelAlign:"left",children:[(0,l.jsxs)(l.Fragment,{children:[(0,l.jsx)(L.Z.Item,{label:"Email",name:"user_email",className:"mb-4",children:(0,l.jsx)(V.Z,{name:"user_email",className:"px-3 py-2 border rounded-md w-full"})}),(0,l.jsx)("div",{className:"text-center mb-4",children:"OR"}),(0,l.jsx)(L.Z.Item,{label:"User ID",name:"user_id",className:"mb-4",children:(0,l.jsx)(V.Z,{name:"user_id",className:"px-3 py-2 border rounded-md w-full"})})]}),(0,l.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,l.jsx)(J.ZP,{htmlType:"submit",children:"Add member"})})]})})]})]})]})},eI=s(52273),eP=s(12968),eE=s(67951);async function eF(e,t,s,l){console.log("isLocal:",!1);let r=window.location.origin,n=new eP.ZP.OpenAI({apiKey:l,baseURL:r,dangerouslyAllowBrowser:!0});try{for await(let l of(await n.chat.completions.create({model:s,stream:!0,messages:[{role:"user",content:e}]})))console.log(l),l.choices[0].delta.content&&t(l.choices[0].delta.content)}catch(e){d.ZP.error("Error occurred while generating model response. Please try again. Error: ".concat(e))}}var eM=e=>{let{accessToken:t,token:s,userRole:n,userID:a}=e,[o,i]=(0,r.useState)(""),[c,d]=(0,r.useState)(""),[m,h]=(0,r.useState)([]),[u,x]=(0,r.useState)(void 0),[p,j]=(0,r.useState)([]);(0,r.useEffect)(()=>{t&&s&&n&&a&&(async()=>{try{let e=await f(t,a,n);if(console.log("model_info:",e),(null==e?void 0:e.data.length)>0){let t=e.data.map(e=>({value:e.id,label:e.id}));console.log(t),j(t),x(e.data[0].id)}}catch(e){console.error("Error fetching model info:",e)}})()},[t,a,n]);let g=(e,t)=>{h(s=>{let l=s[s.length-1];return l&&l.role===e?[...s.slice(0,s.length-1),{role:e,content:l.content+t}]:[...s,{role:e,content:t}]})},y=async()=>{if(""!==c.trim()&&o&&s&&n&&a){h(e=>[...e,{role:"user",content:c}]);try{u&&await eF(c,e=>g("assistant",e),u,o)}catch(e){console.error("Error fetching model response",e),g("assistant","Error fetching model response")}d("")}};if(n&&"Admin Viewer"==n){let{Title:e,Paragraph:t}=ep.default;return(0,l.jsxs)("div",{children:[(0,l.jsx)(e,{level:1,children:"Access Denied"}),(0,l.jsx)(t,{children:"Ask your proxy admin for access to test models"})]})}return(0,l.jsx)("div",{style:{width:"100%",position:"relative"},children:(0,l.jsx)(O.Z,{className:"gap-2 p-8 h-[80vh] w-full mt-2",children:(0,l.jsx)(Q.Z,{children:(0,l.jsxs)(eZ.Z,{children:[(0,l.jsxs)(e_.Z,{children:[(0,l.jsx)(ew.Z,{children:"Chat"}),(0,l.jsx)(ew.Z,{children:"API Reference"})]}),(0,l.jsxs)(eb.Z,{children:[(0,l.jsxs)(ek.Z,{children:[(0,l.jsx)("div",{className:"sm:max-w-2xl",children:(0,l.jsxs)(O.Z,{numItems:2,children:[(0,l.jsxs)(M.Z,{children:[(0,l.jsx)(D.Z,{children:"API Key"}),(0,l.jsx)(eI.Z,{placeholder:"Type API Key here",type:"password",onValueChange:i,value:o})]}),(0,l.jsxs)(M.Z,{className:"mx-2",children:[(0,l.jsx)(D.Z,{children:"Select Model:"}),(0,l.jsx)(B.default,{placeholder:"Select a Model",onChange:e=>{console.log("selected ".concat(e)),x(e)},options:p,style:{width:"200px"}})]})]})}),(0,l.jsxs)(el.Z,{className:"mt-5",style:{display:"block",maxHeight:"60vh",overflowY:"auto"},children:[(0,l.jsx)(ea.Z,{children:(0,l.jsx)(ei.Z,{children:(0,l.jsx)(en.Z,{})})}),(0,l.jsx)(er.Z,{children:m.map((e,t)=>(0,l.jsx)(ei.Z,{children:(0,l.jsx)(en.Z,{children:"".concat(e.role,": ").concat(e.content)})},t))})]}),(0,l.jsx)("div",{className:"mt-3",style:{position:"absolute",bottom:5,width:"95%"},children:(0,l.jsxs)("div",{className:"flex",children:[(0,l.jsx)(eI.Z,{type:"text",value:c,onChange:e=>d(e.target.value),placeholder:"Type your message..."}),(0,l.jsx)(R.Z,{onClick:y,className:"ml-2",children:"Send"})]})})]}),(0,l.jsx)(ek.Z,{children:(0,l.jsxs)(eZ.Z,{children:[(0,l.jsxs)(e_.Z,{children:[(0,l.jsx)(ew.Z,{children:"OpenAI Python SDK"}),(0,l.jsx)(ew.Z,{children:"LlamaIndex"}),(0,l.jsx)(ew.Z,{children:"Langchain Py"})]}),(0,l.jsxs)(eb.Z,{children:[(0,l.jsx)(ek.Z,{children:(0,l.jsx)(eE.Z,{language:"python",children:'\nimport openai\nclient = openai.OpenAI(\n api_key="your_api_key",\n base_url="http://0.0.0.0:4000" # proxy base url\n)\n\nresponse = client.chat.completions.create(\n model="gpt-3.5-turbo", # model to use from Models Tab\n messages = [\n {\n "role": "user",\n "content": "this is a test request, write a short poem"\n }\n ],\n extra_body={\n "metadata": {\n "generation_name": "ishaan-generation-openai-client",\n "generation_id": "openai-client-gen-id22",\n "trace_id": "openai-client-trace-id22",\n "trace_user_id": "openai-client-user-id2"\n }\n }\n)\n\nprint(response)\n '})}),(0,l.jsx)(ek.Z,{children:(0,l.jsx)(eE.Z,{language:"python",children:'\nimport os, dotenv\n\nfrom llama_index.llms import AzureOpenAI\nfrom llama_index.embeddings import AzureOpenAIEmbedding\nfrom llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n\nllm = AzureOpenAI(\n engine="azure-gpt-3.5", # model_name on litellm proxy\n temperature=0.0,\n azure_endpoint="http://0.0.0.0:4000", # litellm proxy endpoint\n api_key="sk-1234", # litellm proxy API Key\n api_version="2023-07-01-preview",\n)\n\nembed_model = AzureOpenAIEmbedding(\n deployment_name="azure-embedding-model",\n azure_endpoint="http://0.0.0.0:4000",\n api_key="sk-1234",\n api_version="2023-07-01-preview",\n)\n\n\ndocuments = SimpleDirectoryReader("llama_index_data").load_data()\nservice_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)\nindex = VectorStoreIndex.from_documents(documents, service_context=service_context)\n\nquery_engine = index.as_query_engine()\nresponse = query_engine.query("What did the author do growing up?")\nprint(response)\n\n '})}),(0,l.jsx)(ek.Z,{children:(0,l.jsx)(eE.Z,{language:"python",children:'\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n SystemMessagePromptTemplate,\n)\nfrom langchain.schema import HumanMessage, SystemMessage\n\nchat = ChatOpenAI(\n openai_api_base="http://0.0.0.0:8000",\n model = "gpt-3.5-turbo",\n temperature=0.1,\n extra_body={\n "metadata": {\n "generation_name": "ishaan-generation-langchain-client",\n "generation_id": "langchain-client-gen-id22",\n "trace_id": "langchain-client-trace-id22",\n "trace_user_id": "langchain-client-user-id2"\n }\n }\n)\n\nmessages = [\n SystemMessage(\n content="You are a helpful assistant that im using to make a test request to."\n ),\n HumanMessage(\n content="test from litellm. tell me why it\'s amazing in 1 sentence"\n ),\n]\nresponse = chat(messages)\n\nprint(response)\n\n '})})]})]})})]})]})})})})},eO=s(33509),eR=s(95781);let{Sider:eD}=eO.default;var eU=e=>{let{setPage:t,userRole:s,defaultSelectedKey:r}=e;return"Admin Viewer"==s?(0,l.jsx)(eO.default,{style:{minHeight:"100vh",maxWidth:"120px"},children:(0,l.jsx)(eD,{width:120,children:(0,l.jsxs)(eR.Z,{mode:"inline",defaultSelectedKeys:r||["4"],style:{height:"100%",borderRight:0},children:[(0,l.jsx)(eR.Z.Item,{onClick:()=>t("api-keys"),children:"API Keys"},"4"),(0,l.jsx)(eR.Z.Item,{onClick:()=>t("models"),children:"Models"},"2"),(0,l.jsx)(eR.Z.Item,{onClick:()=>t("llm-playground"),children:"Chat UI"},"3"),(0,l.jsx)(eR.Z.Item,{onClick:()=>t("usage"),children:"Usage"},"1")]})})}):(0,l.jsx)(eO.default,{style:{minHeight:"100vh",maxWidth:"100px"},children:(0,l.jsx)(eD,{width:100,children:(0,l.jsxs)(eR.Z,{mode:"inline",defaultSelectedKeys:r||["1"],style:{height:"100%",borderRight:0},children:[(0,l.jsx)(eR.Z.Item,{onClick:()=>t("api-keys"),children:(0,l.jsx)(D.Z,{children:"API Keys"})},"1"),(0,l.jsx)(eR.Z.Item,{onClick:()=>t("llm-playground"),children:(0,l.jsx)(D.Z,{children:"Test Key"})},"3"),"Admin"==s?(0,l.jsx)(eR.Z.Item,{onClick:()=>t("teams"),children:(0,l.jsx)(D.Z,{children:"Teams"})},"6"):null,(0,l.jsx)(eR.Z.Item,{onClick:()=>t("usage"),children:(0,l.jsx)(D.Z,{children:"Usage"})},"4"),"Admin"==s?(0,l.jsx)(eR.Z.Item,{onClick:()=>t("users"),children:(0,l.jsx)(D.Z,{children:"Users"})},"5"):null,(0,l.jsx)(eR.Z.Item,{onClick:()=>t("models"),children:(0,l.jsx)(D.Z,{children:"Models"})},"2"),"Admin"==s?(0,l.jsx)(eR.Z.Item,{onClick:()=>t("admin-panel"),children:(0,l.jsx)(D.Z,{children:"Admin"})},"7"):null]})})})},ez=e=>{let{accessToken:t,token:s,userRole:n,userID:a}=e,o=new Date,[i,c]=(0,r.useState)([]),[d,m]=(0,r.useState)([]),[h,u]=(0,r.useState)([]),[x,p]=(0,r.useState)([]),[j,g]=(0,r.useState)([]),[y,f]=(0,r.useState)([]),[w,v]=(0,r.useState)([]),A=new Date(o.getFullYear(),o.getMonth(),1),C=new Date(o.getFullYear(),o.getMonth()+1,0),T=P(A),I=P(C);function P(e){let t=e.getFullYear(),s=e.getMonth()+1,l=e.getDate();return"".concat(t,"-").concat(s<10?"0"+s:s,"-").concat(l<10?"0"+l:l)}return console.log("Start date is ".concat(T)),console.log("End date is ".concat(I)),(0,r.useEffect)(()=>{t&&s&&n&&a&&(async()=>{try{if(console.log("user role: ".concat(n)),"Admin"==n||"Admin Viewer"==n){let e=await k(t);c(e);let s=(await b(t)).map(e=>({key:(e.key_name||e.key_alias||e.api_key).substring(0,7),spend:e.total_spend}));m(s);let l=(await N(t)).map(e=>({key:e.model,spend:e.total_spend}));u(l);let r=await Z(t);console.log("teamSpend",r),g(r.daily_spend),f(r.teams),v(r.total_spend_per_team)}else"App Owner"==n&&await _(t,s,n,a,T,I).then(async e=>{if(console.log("result from spend logs call",e),"daily_spend"in e){let t=e.daily_spend;console.log("daily spend",t),c(t);let s=e.top_api_keys;m(s)}else{let s=(await S(t,function(e){let t=[];e.forEach(e=>{Object.entries(e).forEach(e=>{let[s,l]=e;"spend"!==s&&"startTime"!==s&&"models"!==s&&"users"!==s&&t.push({key:s,spend:l})})}),t.sort((e,t)=>Number(t.spend)-Number(e.spend));let s=t.slice(0,5).map(e=>e.key);return console.log("topKeys: ".concat(Object.keys(s[0]))),s}(e))).info.map(e=>({key:(e.key_name||e.key_alias||e.token).substring(0,7),spend:e.spend}));m(s),p(function(e){let t={};e.forEach(e=>{Object.entries(e.users).forEach(e=>{let[s,l]=e;""!==s&&null!=s&&"None"!=s&&(t[s]||(t[s]=0),t[s]+=l)})});let s=Object.entries(t).map(e=>{let[t,s]=e;return{user_id:t,spend:s}});s.sort((e,t)=>t.spend-e.spend);let l=s.slice(0,5);return console.log("topKeys: ".concat(Object.values(l[0]))),l}(e)),c(e)}})}catch(e){console.error("There was an error fetching the data",e)}})()},[t,s,n,a,T,I]),(0,l.jsx)("div",{style:{width:"100%"},className:"p-8",children:(0,l.jsxs)(eZ.Z,{children:[(0,l.jsxs)(e_.Z,{className:"mt-2",children:[(0,l.jsx)(ew.Z,{children:"All Up"}),(0,l.jsx)(ew.Z,{children:"Team Based Usage"})]}),(0,l.jsxs)(eb.Z,{children:[(0,l.jsx)(ek.Z,{children:(0,l.jsxs)(O.Z,{numItems:2,className:"gap-2 h-[75vh] w-full",children:[(0,l.jsx)(M.Z,{numColSpan:2,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Monthly Spend"}),(0,l.jsx)(X.Z,{data:i,index:"date",categories:["spend"],colors:["blue"],valueFormatter:e=>"$ ".concat(new Intl.NumberFormat("us").format(e).toString()),yAxisWidth:100,tickGap:5})]})}),(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Top API Keys"}),(0,l.jsx)(X.Z,{className:"mt-4 h-40",data:d,index:"key",categories:["spend"],colors:["blue"],yAxisWidth:80,tickGap:5,layout:"vertical",showXAxis:!1,showLegend:!1})]})}),(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Top Users"}),(0,l.jsx)(X.Z,{className:"mt-4 h-40",data:x,index:"user_id",categories:["spend"],colors:["blue"],yAxisWidth:200,layout:"vertical",showXAxis:!1,showLegend:!1})]})}),(0,l.jsx)(M.Z,{numColSpan:1,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Top Models"}),(0,l.jsx)(X.Z,{className:"mt-4 h-40",data:h,index:"key",categories:["spend"],colors:["blue"],yAxisWidth:200,layout:"vertical",showXAxis:!1,showLegend:!1})]})})]})}),(0,l.jsx)(ek.Z,{children:(0,l.jsxs)(O.Z,{numItems:2,className:"gap-2 p-10 h-[75vh] w-full",children:[(0,l.jsx)(M.Z,{numColSpan:2,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Daily Spend Per Team"}),(0,l.jsx)(X.Z,{className:"h-72",data:j,index:"date",categories:y,yAxisWidth:30,stack:!0})]})}),(0,l.jsx)(M.Z,{numColSpan:2,children:(0,l.jsxs)(Q.Z,{children:[(0,l.jsx)(U.Z,{children:"Total Spend Per Team"}),(0,l.jsx)(X.Z,{className:"h-72",data:w,index:"team_id",categories:["total_spend"],yAxisWidth:30})]})})]})})]})]})})},eB=()=>{let{Title:e,Paragraph:t}=ep.default,[s,a]=(0,r.useState)(""),[o,i]=(0,r.useState)(null),[d,m]=(0,r.useState)(null),[h,u]=(0,r.useState)(null),[x,p]=(0,r.useState)(!0),j=(0,n.useSearchParams)(),g=j.get("userID"),y=j.get("token"),[f,w]=(0,r.useState)("api-keys"),[Z,_]=(0,r.useState)(null);return(0,r.useEffect)(()=>{if(y){let e=(0,ex.o)(y);if(e){if(console.log("Decoded token:",e),console.log("Decoded key:",e.key),_(e.key),e.user_role){let t=function(e){if(!e)return"Undefined Role";switch(console.log("Received user role: ".concat(e.toLowerCase())),console.log("Received user role length: ".concat(e.toLowerCase().length)),e.toLowerCase()){case"app_owner":case"demo_app_owner":return"App Owner";case"app_admin":case"proxy_admin":return"Admin";case"proxy_admin_viewer":return"Admin Viewer";case"app_user":return"App User";default:return"Unknown Role"}}(e.user_role);console.log("Decoded user_role:",t),a(t),"Admin Viewer"==t&&w("usage")}else console.log("User role not defined");e.user_email?i(e.user_email):console.log("User Email is not set ".concat(e)),e.login_method?p("username_password"==e.login_method):console.log("User Email is not set ".concat(e))}}},[y]),(0,l.jsx)(r.Suspense,{fallback:(0,l.jsx)("div",{children:"Loading..."}),children:(0,l.jsxs)("div",{className:"flex flex-col min-h-screen",children:[(0,l.jsx)(c,{userID:g,userRole:s,userEmail:o,showSSOBanner:x}),(0,l.jsxs)("div",{className:"flex flex-1 overflow-auto",children:[(0,l.jsx)("div",{className:"mt-8",children:(0,l.jsx)(eU,{setPage:w,userRole:s,defaultSelectedKey:null})}),"api-keys"==f?(0,l.jsx)(ej,{userID:g,userRole:s,teams:d,keys:h,setUserRole:a,userEmail:o,setUserEmail:i,setTeams:m,setKeys:u}):"models"==f?(0,l.jsx)(ef,{userID:g,userRole:s,token:y,accessToken:Z}):"llm-playground"==f?(0,l.jsx)(eM,{userID:g,userRole:s,token:y,accessToken:Z}):"users"==f?(0,l.jsx)(eS,{userID:g,userRole:s,token:y,keys:h,accessToken:Z,setKeys:u}):"teams"==f?(0,l.jsx)(eC,{teams:d,setTeams:m,searchParams:j,accessToken:Z,userID:g,userRole:s}):"admin-panel"==f?(0,l.jsx)(eT,{setTeams:m,searchParams:j,accessToken:Z}):(0,l.jsx)(ez,{userID:g,userRole:s,token:y,accessToken:Z})]})]})})}}},function(e){e.O(0,[798,971,69,744],function(){return e(e.s=20661)}),_N_E=e.O()}]);
\ No newline at end of file
diff --git a/ui/litellm-dashboard/out/index.html b/ui/litellm-dashboard/out/index.html
index 30858c11d..e69de29bb 100644
--- a/ui/litellm-dashboard/out/index.html
+++ b/ui/litellm-dashboard/out/index.html
@@ -1 +0,0 @@
-🚅 LiteLLM
\ No newline at end of file
diff --git a/ui/litellm-dashboard/out/index.txt b/ui/litellm-dashboard/out/index.txt
index dbabf4615..e69de29bb 100644
--- a/ui/litellm-dashboard/out/index.txt
+++ b/ui/litellm-dashboard/out/index.txt
@@ -1,7 +0,0 @@
-2:I[77831,[],""]
-3:I[90177,["798","static/chunks/798-4baed68da0c5497d.js","931","static/chunks/app/page-37392d6753f8a3d0.js"],""]
-4:I[5613,[],""]
-5:I[31778,[],""]
-0:["L9N6TOWJaqSp22Vj96YE4",[[["",{"children":["__PAGE__",{}]},"$undefined","$undefined",true],["",{"children":["__PAGE__",{},["$L1",["$","$L2",null,{"propsForComponent":{"params":{}},"Component":"$3","isStaticGeneration":true}],null]]},[null,["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_c23dc8","children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"loading":"$undefined","loadingStyles":"$undefined","loadingScripts":"$undefined","hasLoading":false,"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[],"styles":null}]}]}],null]],[[["$","link","0",{"rel":"stylesheet","href":"/ui/_next/static/css/f8da5a6a5b29d249.css","precedence":"next","crossOrigin":""}]],"$L6"]]]]
-6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"🚅 LiteLLM"}],["$","meta","3",{"name":"description","content":"LiteLLM Proxy Admin UI"}],["$","link","4",{"rel":"icon","href":"/ui/favicon.ico","type":"image/x-icon","sizes":"16x16"}],["$","meta","5",{"name":"next-size-adjust"}]]
-1:null
diff --git a/ui/litellm-dashboard/src/components/create_key_button.tsx b/ui/litellm-dashboard/src/components/create_key_button.tsx
index 30fcc8432..17476232c 100644
--- a/ui/litellm-dashboard/src/components/create_key_button.tsx
+++ b/ui/litellm-dashboard/src/components/create_key_button.tsx
@@ -91,16 +91,17 @@ const CreateKey: React.FC = ({
}
};
- const sendSlackAlert = async () => {
- try {
- console.log("Sending Slack alert...");
- const response = await slackBudgetAlertsHealthCheck(accessToken);
- console.log("slackBudgetAlertsHealthCheck Response:", response);
- console.log("Testing Slack alert successful");
- } catch (error) {
- console.error("Error sending Slack alert:", error);
+
+ const handleModelSelection = (selectedModels: string[]) => {
+ if (selectedModels.includes("all_models")) {
+ // Select all models except "All Models"
+ const allModelsExceptAll = team ? team.models : userModels;
+ form.setFieldsValue({
+ models: allModelsExceptAll
+ });
}
};
+
const handleCopy = () => {
@@ -153,8 +154,11 @@ const CreateKey: React.FC = ({
mode="multiple"
placeholder="Select models"
style={{ width: "100%" }}
+ onChange={(selectedModels) => handleModelSelection(selectedModels)}
>
-
+
{team && team.models ? (
team.models.map((model: string) => (