refactor location of proxy

This commit is contained in:
Ishaan Jaff 2025-04-23 14:38:44 -07:00
parent baa5564f95
commit ce58c53ff1
413 changed files with 2087 additions and 2088 deletions

View file

@ -61,7 +61,7 @@ from litellm.constants import (
DEFAULT_ALLOWED_FAILS,
)
from litellm.types.guardrails import GuardrailItem
from litellm.proxy._types import (
from litellm_proxy._types import (
KeyManagementSystem,
KeyManagementSettings,
LiteLLM_UpperboundKeyGenerateParams,
@ -1048,7 +1048,6 @@ from .exceptions import (
MockException,
)
from .budget_manager import BudgetManager
from .proxy.proxy_cli import run_server
from .router import Router
from .assistants.main import *
from .batches.main import *

View file

@ -4,7 +4,7 @@ from typing import TYPE_CHECKING, Any, Optional, Union
import litellm
from litellm._logging import verbose_logger
from litellm.proxy._types import UserAPIKeyAuth
from litellm_proxy._types import UserAPIKeyAuth
from .integrations.custom_logger import CustomLogger
from .integrations.datadog.datadog import DataDogLogger
@ -143,7 +143,7 @@ class ServiceLogging(CustomLogger):
event_metadata=event_metadata,
)
elif callback == "otel" or isinstance(callback, OpenTelemetry):
from litellm.proxy.proxy_server import open_telemetry_logger
from litellm_proxy.proxy_server import open_telemetry_logger
await self.init_otel_logger_if_none()
@ -188,7 +188,7 @@ class ServiceLogging(CustomLogger):
initializes otel_logger if it is None or no attribute exists on ServiceLogging Object
"""
from litellm.proxy.proxy_server import open_telemetry_logger
from litellm_proxy.proxy_server import open_telemetry_logger
if not hasattr(self, "otel_logger"):
if open_telemetry_logger is not None and isinstance(
@ -251,7 +251,7 @@ class ServiceLogging(CustomLogger):
event_metadata=event_metadata,
)
elif callback == "otel" or isinstance(callback, OpenTelemetry):
from litellm.proxy.proxy_server import open_telemetry_logger
from litellm_proxy.proxy_server import open_telemetry_logger
await self.init_otel_logger_if_none()

View file

@ -281,7 +281,7 @@ class QdrantSemanticCache(BaseCache):
async def async_set_cache(self, key, value, **kwargs):
import uuid
from litellm.proxy.proxy_server import llm_model_list, llm_router
from litellm_proxy.proxy_server import llm_model_list, llm_router
print_verbose(f"async qdrant semantic-cache set_cache, kwargs: {kwargs}")
@ -344,7 +344,7 @@ class QdrantSemanticCache(BaseCache):
async def async_get_cache(self, key, **kwargs):
print_verbose(f"async qdrant semantic-cache get_cache, kwargs: {kwargs}")
from litellm.proxy.proxy_server import llm_model_list, llm_router
from litellm_proxy.proxy_server import llm_model_list, llm_router
# get the messages
messages = kwargs["messages"]

View file

@ -279,7 +279,7 @@ class RedisSemanticCache(BaseCache):
Returns:
List[float]: The embedding vector
"""
from litellm.proxy.proxy_server import llm_model_list, llm_router
from litellm_proxy.proxy_server import llm_model_list, llm_router
# Route the embedding request through the proxy if appropriate
router_model_names = (

View file

@ -26,8 +26,8 @@ from litellm.llms.custom_httpx.http_handler import (
get_async_httpx_client,
httpxSpecialProvider,
)
from litellm.proxy._types import AlertType, CallInfo, VirtualKeyEvent, WebhookEvent
from litellm.types.integrations.slack_alerting import *
from litellm_proxy._types import AlertType, CallInfo, VirtualKeyEvent, WebhookEvent
from ..email_templates.templates import *
from .batching_handler import send_to_webhook, squash_payloads
@ -823,9 +823,9 @@ class SlackAlerting(CustomBatchLogger):
### UNIQUE CACHE KEY ###
cache_key = provider + region_name
outage_value: Optional[
ProviderRegionOutageModel
] = await self.internal_usage_cache.async_get_cache(key=cache_key)
outage_value: Optional[ProviderRegionOutageModel] = (
await self.internal_usage_cache.async_get_cache(key=cache_key)
)
if (
getattr(exception, "status_code", None) is None
@ -1148,7 +1148,7 @@ Model Info:
email_logo_url: Optional[str] = None,
email_support_contact: Optional[str] = None,
):
from litellm.proxy.proxy_server import CommonProxyErrors, premium_user
from litellm_proxy.proxy_server import CommonProxyErrors, premium_user
if premium_user is not True:
if email_logo_url is not None or email_support_contact is not None:
@ -1161,7 +1161,7 @@ Model Info:
self, webhook_event: WebhookEvent
) -> bool:
try:
from litellm.proxy.utils import send_email
from litellm_proxy.utils import send_email
if self.alerting is None or "email" not in self.alerting:
# do nothing if user does not want email alerts
@ -1170,7 +1170,7 @@ Model Info:
self.alerting,
)
return False
from litellm.proxy.proxy_server import premium_user, prisma_client
from litellm_proxy.proxy_server import premium_user, prisma_client
email_logo_url = os.getenv(
"SMTP_SENDER_LOGO", os.getenv("EMAIL_LOGO_URL", None)
@ -1271,8 +1271,8 @@ Model Info:
Returns -> True if sent, False if not.
"""
from litellm.proxy.proxy_server import premium_user
from litellm.proxy.utils import send_email
from litellm_proxy.proxy_server import premium_user
from litellm_proxy.utils import send_email
email_logo_url = os.getenv(
"SMTP_SENDER_LOGO", os.getenv("EMAIL_LOGO_URL", None)
@ -1406,9 +1406,9 @@ Model Info:
self.alert_to_webhook_url is not None
and alert_type in self.alert_to_webhook_url
):
slack_webhook_url: Optional[
Union[str, List[str]]
] = self.alert_to_webhook_url[alert_type]
slack_webhook_url: Optional[Union[str, List[str]]] = (
self.alert_to_webhook_url[alert_type]
)
elif self.default_webhook_url is not None:
slack_webhook_url = self.default_webhook_url
else:
@ -1598,7 +1598,7 @@ Model Info:
return
try:
from litellm.proxy.spend_tracking.spend_management_endpoints import (
from litellm_proxy.spend_tracking.spend_management_endpoints import (
_get_spend_report_for_time_range,
)
@ -1662,7 +1662,7 @@ Model Info:
try:
from calendar import monthrange
from litellm.proxy.spend_tracking.spend_management_endpoints import (
from litellm_proxy.spend_tracking.spend_management_endpoints import (
_get_spend_report_for_time_range,
)

View file

@ -5,8 +5,8 @@ Utils used for slack alerting
import asyncio
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from litellm.proxy._types import AlertType
from litellm.secret_managers.main import get_secret
from litellm_proxy._types import AlertType
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _Logging
@ -17,7 +17,7 @@ else:
def process_slack_alerting_variables(
alert_to_webhook_url: Optional[Dict[AlertType, Union[List[str], str]]]
alert_to_webhook_url: Optional[Dict[AlertType, Union[List[str], str]]],
) -> Optional[Dict[AlertType, Union[List[str], str]]]:
"""
process alert_to_webhook_url

View file

@ -321,7 +321,7 @@ class AzureBlobStorageLogger(CustomBatchLogger):
"""
Checks if the user is a premium user, raises an error if not
"""
from litellm.proxy.proxy_server import CommonProxyErrors, premium_user
from litellm_proxy.proxy_server import CommonProxyErrors, premium_user
if premium_user is not True:
raise ValueError(

View file

@ -165,7 +165,7 @@ class CustomGuardrail(CustomLogger):
"""
Returns True if the user is a premium user
"""
from litellm.proxy.proxy_server import CommonProxyErrors, premium_user
from litellm_proxy.proxy_server import CommonProxyErrors, premium_user
if premium_user is not True:
verbose_logger.warning(
@ -183,7 +183,7 @@ class CustomGuardrail(CustomLogger):
"""
Builds `StandardLoggingGuardrailInformation` and adds it to the request metadata so it can be used for logging to DataDog, Langfuse, etc.
"""
from litellm.proxy.proxy_server import premium_user
from litellm_proxy.proxy_server import premium_user
if premium_user is not True:
verbose_logger.warning(

View file

@ -15,7 +15,6 @@ from typing import (
from pydantic import BaseModel
from litellm.caching.caching import DualCache
from litellm.proxy._types import UserAPIKeyAuth
from litellm.types.integrations.argilla import ArgillaItem
from litellm.types.llms.openai import AllMessageValues, ChatCompletionRequest
from litellm.types.utils import (
@ -26,6 +25,7 @@ from litellm.types.utils import (
StandardCallbackDynamicParams,
StandardLoggingPayload,
)
from litellm_proxy._types import UserAPIKeyAuth
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span

View file

@ -6,7 +6,7 @@ import os
from typing import List, Optional
from litellm._logging import verbose_logger, verbose_proxy_logger
from litellm.proxy._types import WebhookEvent
from litellm_proxy._types import WebhookEvent
# we use this for the email header, please send a test email if you change this. verify it looks good on email
LITELLM_LOGO_URL = "https://litellm-listing.s3.amazonaws.com/litellm_logo.png"
@ -19,7 +19,7 @@ async def get_all_team_member_emails(team_id: Optional[str] = None) -> list:
)
if team_id is None:
return []
from litellm.proxy.proxy_server import prisma_client
from litellm_proxy.proxy_server import prisma_client
if prisma_client is None:
raise Exception("Not connected to DB!")
@ -71,7 +71,7 @@ async def send_team_budget_alert(webhook_event: WebhookEvent) -> bool:
Send an Email Alert to All Team Members when the Team Budget is crossed
Returns -> True if sent, False if not.
"""
from litellm.proxy.utils import send_email
from litellm_proxy.utils import send_email
_team_id = webhook_event.team_id
team_alias = webhook_event.team_alias

View file

@ -9,10 +9,10 @@ from urllib.parse import quote
from litellm._logging import verbose_logger
from litellm.integrations.additional_logging_utils import AdditionalLoggingUtils
from litellm.integrations.gcs_bucket.gcs_bucket_base import GCSBucketBase
from litellm.proxy._types import CommonProxyErrors
from litellm.types.integrations.base_health_check import IntegrationHealthCheckStatus
from litellm.types.integrations.gcs_bucket import *
from litellm.types.utils import StandardLoggingPayload
from litellm_proxy._types import CommonProxyErrors
if TYPE_CHECKING:
from litellm.llms.vertex_ai.vertex_llm_base import VertexBase
@ -22,7 +22,7 @@ else:
class GCSBucketLogger(GCSBucketBase, AdditionalLoggingUtils):
def __init__(self, bucket_name: Optional[str] = None) -> None:
from litellm.proxy.proxy_server import premium_user
from litellm_proxy.proxy_server import premium_user
super().__init__(bucket_name=bucket_name)
@ -48,7 +48,7 @@ class GCSBucketLogger(GCSBucketBase, AdditionalLoggingUtils):
#### ASYNC ####
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
from litellm.proxy.proxy_server import premium_user
from litellm_proxy.proxy_server import premium_user
if premium_user is not True:
raise ValueError(

View file

@ -15,7 +15,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from litellm.types.utils import StandardLoggingPayload
if TYPE_CHECKING:
from litellm.proxy._types import SpendLogsPayload
from litellm_proxy._types import SpendLogsPayload
else:
SpendLogsPayload = Any
@ -44,7 +44,7 @@ class GcsPubSubLogger(CustomBatchLogger):
topic_id (str): Pub/Sub topic ID
credentials_path (str, optional): Path to Google Cloud credentials JSON file
"""
from litellm.proxy.utils import _premium_user_check
from litellm_proxy.utils import _premium_user_check
_premium_user_check()
@ -108,10 +108,10 @@ class GcsPubSubLogger(CustomBatchLogger):
Raises:
Raises a NON Blocking verbose_logger.exception if an error occurs
"""
from litellm.proxy.spend_tracking.spend_tracking_utils import (
from litellm_proxy.spend_tracking.spend_tracking_utils import (
get_logging_payload,
)
from litellm.proxy.utils import _premium_user_check
from litellm_proxy.utils import _premium_user_check
_premium_user_check()

View file

@ -1,7 +1,7 @@
import json
from typing import TYPE_CHECKING, Any, Union
from litellm.proxy._types import SpanAttributes
from litellm_proxy._types import SpanAttributes
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span

View file

@ -18,10 +18,10 @@ if TYPE_CHECKING:
from opentelemetry.sdk.trace.export import SpanExporter as _SpanExporter
from opentelemetry.trace import Span as _Span
from litellm.proxy._types import (
from litellm_proxy._types import (
ManagementEndpointLoggingPayload as _ManagementEndpointLoggingPayload,
)
from litellm.proxy.proxy_server import UserAPIKeyAuth as _UserAPIKeyAuth
from litellm_proxy.proxy_server import UserAPIKeyAuth as _UserAPIKeyAuth
Span = Union[_Span, Any]
SpanExporter = Union[_SpanExporter, Any]
@ -350,9 +350,9 @@ class OpenTelemetry(CustomLogger):
"""
from opentelemetry import trace
standard_callback_dynamic_params: Optional[
StandardCallbackDynamicParams
] = kwargs.get("standard_callback_dynamic_params")
standard_callback_dynamic_params: Optional[StandardCallbackDynamicParams] = (
kwargs.get("standard_callback_dynamic_params")
)
if not standard_callback_dynamic_params:
return
@ -406,7 +406,7 @@ class OpenTelemetry(CustomLogger):
def set_tools_attributes(self, span: Span, tools):
import json
from litellm.proxy._types import SpanAttributes
from litellm_proxy._types import SpanAttributes
if not tools:
return
@ -460,7 +460,7 @@ class OpenTelemetry(CustomLogger):
def _tool_calls_kv_pair(
tool_calls: List[ChatCompletionMessageToolCall],
) -> Dict[str, Any]:
from litellm.proxy._types import SpanAttributes
from litellm_proxy._types import SpanAttributes
kv_pairs: Dict[str, Any] = {}
for idx, tool_call in enumerate(tool_calls):
@ -496,7 +496,7 @@ class OpenTelemetry(CustomLogger):
span, kwargs, response_obj
)
return
from litellm.proxy._types import SpanAttributes
from litellm_proxy._types import SpanAttributes
optional_params = kwargs.get("optional_params", {})
litellm_params = kwargs.get("litellm_params", {}) or {}

View file

@ -19,7 +19,6 @@ from litellm.llms.custom_httpx.http_handler import (
get_async_httpx_client,
httpxSpecialProvider,
)
from litellm.proxy._types import UserAPIKeyAuth
from litellm.types.integrations.pagerduty import (
AlertingConfig,
PagerDutyInternalEvent,
@ -30,6 +29,7 @@ from litellm.types.utils import (
StandardLoggingPayload,
StandardLoggingPayloadErrorInformation,
)
from litellm_proxy._types import UserAPIKeyAuth
PAGERDUTY_DEFAULT_FAILURE_THRESHOLD = 60
PAGERDUTY_DEFAULT_FAILURE_THRESHOLD_WINDOW_SECONDS = 60
@ -46,7 +46,7 @@ class PagerDutyAlerting(SlackAlerting):
def __init__(
self, alerting_args: Optional[Union[AlertingConfig, dict]] = None, **kwargs
):
from litellm.proxy.proxy_server import CommonProxyErrors, premium_user
from litellm_proxy.proxy_server import CommonProxyErrors, premium_user
super().__init__()
_api_key = os.getenv("PAGERDUTY_API_KEY")

View file

@ -18,10 +18,10 @@ from typing import (
import litellm
from litellm._logging import print_verbose, verbose_logger
from litellm.integrations.custom_logger import CustomLogger
from litellm.proxy._types import LiteLLM_TeamTable, UserAPIKeyAuth
from litellm.types.integrations.prometheus import *
from litellm.types.utils import StandardLoggingPayload
from litellm.utils import get_end_user_id_for_cost_tracking
from litellm_proxy._types import LiteLLM_TeamTable, UserAPIKeyAuth
if TYPE_CHECKING:
from apscheduler.schedulers.asyncio import AsyncIOScheduler
@ -38,7 +38,7 @@ class PrometheusLogger(CustomLogger):
try:
from prometheus_client import Counter, Gauge, Histogram
from litellm.proxy.proxy_server import CommonProxyErrors, premium_user
from litellm_proxy.proxy_server import CommonProxyErrors, premium_user
if premium_user is not True:
verbose_logger.warning(
@ -456,7 +456,7 @@ class PrometheusLogger(CustomLogger):
and isinstance(user_api_key, str)
and user_api_key.startswith("sk-")
):
from litellm.proxy.utils import hash_token
from litellm_proxy.utils import hash_token
user_api_key = hash_token(user_api_key)
@ -661,7 +661,7 @@ class PrometheusLogger(CustomLogger):
kwargs: dict,
metadata: dict,
):
from litellm.proxy.common_utils.callback_utils import (
from litellm_proxy.common_utils.callback_utils import (
get_model_group_from_litellm_kwargs,
)
@ -1363,7 +1363,7 @@ class PrometheusLogger(CustomLogger):
set_metrics_function: Function to set metrics for the fetched data.
data_type: String representing the type of data ("teams" or "keys") for logging purposes.
"""
from litellm.proxy.proxy_server import prisma_client
from litellm_proxy.proxy_server import prisma_client
if prisma_client is None:
return
@ -1398,10 +1398,10 @@ class PrometheusLogger(CustomLogger):
"""
Initialize team budget metrics by reusing the generic pagination logic.
"""
from litellm.proxy.management_endpoints.team_endpoints import (
from litellm_proxy.management_endpoints.team_endpoints import (
get_paginated_teams,
)
from litellm.proxy.proxy_server import prisma_client
from litellm_proxy.proxy_server import prisma_client
if prisma_client is None:
verbose_logger.debug(
@ -1432,10 +1432,10 @@ class PrometheusLogger(CustomLogger):
from typing import Union
from litellm.constants import UI_SESSION_TOKEN_TEAM_ID
from litellm.proxy.management_endpoints.key_management_endpoints import (
from litellm_proxy.management_endpoints.key_management_endpoints import (
_list_key_helper,
)
from litellm.proxy.proxy_server import prisma_client
from litellm_proxy.proxy_server import prisma_client
if prisma_client is None:
verbose_logger.debug(
@ -1480,7 +1480,7 @@ class PrometheusLogger(CustomLogger):
- If redis cache is not available, we initialize the metrics directly.
"""
from litellm.constants import PROMETHEUS_EMIT_BUDGET_METRICS_JOB_NAME
from litellm.proxy.proxy_server import proxy_logging_obj
from litellm_proxy.proxy_server import proxy_logging_obj
pod_lock_manager = proxy_logging_obj.db_spend_update_writer.pod_lock_manager
@ -1561,8 +1561,8 @@ class PrometheusLogger(CustomLogger):
Fields not available in metadata:
- `budget_reset_at`
"""
from litellm.proxy.auth.auth_checks import get_team_object
from litellm.proxy.proxy_server import prisma_client, user_api_key_cache
from litellm_proxy.auth.auth_checks import get_team_object
from litellm_proxy.proxy_server import prisma_client, user_api_key_cache
_total_team_spend = (spend or 0) + response_cost
team_object = LiteLLM_TeamTable(
@ -1711,8 +1711,8 @@ class PrometheusLogger(CustomLogger):
"""
Assemble a UserAPIKeyAuth object
"""
from litellm.proxy.auth.auth_checks import get_key_object
from litellm.proxy.proxy_server import prisma_client, user_api_key_cache
from litellm_proxy.auth.auth_checks import get_key_object
from litellm_proxy.proxy_server import prisma_client, user_api_key_cache
_total_key_spend = (key_spend or 0) + response_cost
user_api_key_dict = UserAPIKeyAuth(
@ -1803,8 +1803,8 @@ class PrometheusLogger(CustomLogger):
from prometheus_client import make_asgi_app
from litellm._logging import verbose_proxy_logger
from litellm.proxy._types import CommonProxyErrors
from litellm.proxy.proxy_server import app
from litellm_proxy._types import CommonProxyErrors
from litellm_proxy.proxy_server import app
if premium_user is not True:
verbose_proxy_logger.warning(

View file

@ -28,7 +28,6 @@ from litellm._logging import _is_debugging_on, verbose_logger
from litellm.batches.batch_utils import _handle_completed_batch
from litellm.caching.caching import DualCache, InMemoryCache
from litellm.caching.caching_handler import LLMCachingHandler
from litellm.constants import (
DEFAULT_MOCK_RESPONSE_COMPLETION_TOKEN_COUNT,
DEFAULT_MOCK_RESPONSE_PROMPT_TOKEN_COUNT,
@ -2886,7 +2885,7 @@ def _init_custom_logger_compatible_class( # noqa: PLR0915
_in_memory_loggers.append(_otel_logger)
return _otel_logger # type: ignore
elif logging_integration == "dynamic_rate_limiter":
from litellm.proxy.hooks.dynamic_rate_limiter import (
from litellm_proxy.hooks.dynamic_rate_limiter import (
_PROXY_DynamicRateLimitHandler,
)
@ -3074,7 +3073,7 @@ def get_custom_logger_compatible_class( # noqa: PLR0915
return callback # type: ignore
elif logging_integration == "dynamic_rate_limiter":
from litellm.proxy.hooks.dynamic_rate_limiter import (
from litellm_proxy.hooks.dynamic_rate_limiter import (
_PROXY_DynamicRateLimitHandler,
)
@ -3130,7 +3129,7 @@ def _get_custom_logger_settings_from_proxy_server(callback_name: str) -> Dict:
otel:
message_logging: False
"""
from litellm.proxy.proxy_server import callback_settings
from litellm_proxy.proxy_server import callback_settings
if callback_settings:
return dict(callback_settings.get(callback_name, {}))

View file

@ -342,7 +342,7 @@ def get_format_from_file_id(file_id: Optional[str]) -> Optional[str]:
unified_file_id = litellm_proxy:{};unified_id,{}
If not a unified file id, returns 'file' as default format
"""
from litellm.proxy.hooks.managed_files import _PROXY_LiteLLMManagedFiles
from litellm_proxy.hooks.managed_files import _PROXY_LiteLLMManagedFiles
if not file_id:
return None

View file

@ -37,15 +37,15 @@ class AnthropicMessagesHandler:
"""Helper function to handle Anthropic streaming responses using the existing logging handlers"""
from datetime import datetime
from litellm.proxy.pass_through_endpoints.streaming_handler import (
PassThroughStreamingHandler,
)
from litellm.proxy.pass_through_endpoints.success_handler import (
PassThroughEndpointLogging,
)
from litellm.types.passthrough_endpoints.pass_through_endpoints import (
EndpointType,
)
from litellm_proxy.pass_through_endpoints.streaming_handler import (
PassThroughStreamingHandler,
)
from litellm_proxy.pass_through_endpoints.success_handler import (
PassThroughEndpointLogging,
)
# Create success handler object
passthrough_success_handler_obj = PassThroughEndpointLogging()

View file

@ -1,2 +0,0 @@
.env
secrets.toml

View file

@ -1,44 +0,0 @@
# litellm-proxy
A local, fast, and lightweight **OpenAI-compatible server** to call 100+ LLM APIs.
## usage
```shell
$ pip install litellm
```
```shell
$ litellm --model ollama/codellama
#INFO: Ollama running on http://0.0.0.0:8000
```
## replace openai base
```python
import openai # openai v1.0.0+
client = openai.OpenAI(api_key="anything",base_url="http://0.0.0.0:8000") # set proxy to base_url
# request sent to model set on litellm proxy, `litellm --model`
response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
{
"role": "user",
"content": "this is a test request, write a short poem"
}
])
print(response)
```
[**See how to call Huggingface,Bedrock,TogetherAI,Anthropic, etc.**](https://docs.litellm.ai/docs/simple_proxy)
---
### Folder Structure
**Routes**
- `proxy_server.py` - all openai-compatible routes - `/v1/chat/completion`, `/v1/embedding` + model info routes - `/v1/models`, `/v1/model/info`, `/v1/model_group_info` routes.
- `health_endpoints/` - `/health`, `/health/liveliness`, `/health/readiness`
- `management_endpoints/key_management_endpoints.py` - all `/key/*` routes
- `management_endpoints/team_endpoints.py` - all `/team/*` routes
- `management_endpoints/internal_user_endpoints.py` - all `/user/*` routes
- `management_endpoints/ui_sso.py` - all `/sso/*` routes

View file

@ -1 +0,0 @@
from . import *

View file

@ -1,153 +0,0 @@
"""
MCP Client Manager
This class is responsible for managing MCP SSE clients.
This is a Proxy
"""
import asyncio
import json
from typing import Any, Dict, List, Optional
from mcp import ClientSession
from mcp.client.sse import sse_client
from mcp.types import Tool as MCPTool
from litellm._logging import verbose_logger
from litellm.types.mcp_server.mcp_server_manager import MCPInfo, MCPSSEServer
class MCPServerManager:
def __init__(self):
self.mcp_servers: List[MCPSSEServer] = []
"""
eg.
[
{
"name": "zapier_mcp_server",
"url": "https://actions.zapier.com/mcp/sk-ak-2ew3bofIeQIkNoeKIdXrF1Hhhp/sse"
},
{
"name": "google_drive_mcp_server",
"url": "https://actions.zapier.com/mcp/sk-ak-2ew3bofIeQIkNoeKIdXrF1Hhhp/sse"
}
]
"""
self.tool_name_to_mcp_server_name_mapping: Dict[str, str] = {}
"""
{
"gmail_send_email": "zapier_mcp_server",
}
"""
def load_servers_from_config(self, mcp_servers_config: Dict[str, Any]):
"""
Load the MCP Servers from the config
"""
for server_name, server_config in mcp_servers_config.items():
_mcp_info: dict = server_config.get("mcp_info", None) or {}
mcp_info = MCPInfo(**_mcp_info)
mcp_info["server_name"] = server_name
self.mcp_servers.append(
MCPSSEServer(
name=server_name,
url=server_config["url"],
mcp_info=mcp_info,
)
)
verbose_logger.debug(
f"Loaded MCP Servers: {json.dumps(self.mcp_servers, indent=4, default=str)}"
)
self.initialize_tool_name_to_mcp_server_name_mapping()
async def list_tools(self) -> List[MCPTool]:
"""
List all tools available across all MCP Servers.
Returns:
List[MCPTool]: Combined list of tools from all servers
"""
list_tools_result: List[MCPTool] = []
verbose_logger.debug("SSE SERVER MANAGER LISTING TOOLS")
for server in self.mcp_servers:
tools = await self._get_tools_from_server(server)
list_tools_result.extend(tools)
return list_tools_result
async def _get_tools_from_server(self, server: MCPSSEServer) -> List[MCPTool]:
"""
Helper method to get tools from a single MCP server.
Args:
server (MCPSSEServer): The server to query tools from
Returns:
List[MCPTool]: List of tools available on the server
"""
verbose_logger.debug(f"Connecting to url: {server.url}")
async with sse_client(url=server.url) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
tools_result = await session.list_tools()
verbose_logger.debug(f"Tools from {server.name}: {tools_result}")
# Update tool to server mapping
for tool in tools_result.tools:
self.tool_name_to_mcp_server_name_mapping[tool.name] = server.name
return tools_result.tools
def initialize_tool_name_to_mcp_server_name_mapping(self):
"""
On startup, initialize the tool name to MCP server name mapping
"""
try:
if asyncio.get_running_loop():
asyncio.create_task(
self._initialize_tool_name_to_mcp_server_name_mapping()
)
except RuntimeError as e: # no running event loop
verbose_logger.exception(
f"No running event loop - skipping tool name to MCP server name mapping initialization: {str(e)}"
)
async def _initialize_tool_name_to_mcp_server_name_mapping(self):
"""
Call list_tools for each server and update the tool name to MCP server name mapping
"""
for server in self.mcp_servers:
tools = await self._get_tools_from_server(server)
for tool in tools:
self.tool_name_to_mcp_server_name_mapping[tool.name] = server.name
async def call_tool(self, name: str, arguments: Dict[str, Any]):
"""
Call a tool with the given name and arguments
"""
mcp_server = self._get_mcp_server_from_tool_name(name)
if mcp_server is None:
raise ValueError(f"Tool {name} not found")
async with sse_client(url=mcp_server.url) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
return await session.call_tool(name, arguments)
def _get_mcp_server_from_tool_name(self, tool_name: str) -> Optional[MCPSSEServer]:
"""
Get the MCP Server from the tool name
"""
if tool_name in self.tool_name_to_mcp_server_name_mapping:
for server in self.mcp_servers:
if server.name == self.tool_name_to_mcp_server_name_mapping[tool_name]:
return server
return None
global_mcp_server_manager: MCPServerManager = MCPServerManager()

View file

@ -1,309 +0,0 @@
"""
LiteLLM MCP Server Routes
"""
import asyncio
from typing import Any, Dict, List, Optional, Union
from anyio import BrokenResourceError
from fastapi import APIRouter, Depends, HTTPException, Request
from fastapi.responses import StreamingResponse
from pydantic import ConfigDict, ValidationError
from litellm._logging import verbose_logger
from litellm.constants import MCP_TOOL_NAME_PREFIX
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.proxy._types import UserAPIKeyAuth
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
from litellm.types.mcp_server.mcp_server_manager import MCPInfo
from litellm.types.utils import StandardLoggingMCPToolCall
from litellm.utils import client
# Check if MCP is available
# "mcp" requires python 3.10 or higher, but several litellm users use python 3.8
# We're making this conditional import to avoid breaking users who use python 3.8.
try:
from mcp.server import Server
MCP_AVAILABLE = True
except ImportError as e:
verbose_logger.debug(f"MCP module not found: {e}")
MCP_AVAILABLE = False
router = APIRouter(
prefix="/mcp",
tags=["mcp"],
)
if MCP_AVAILABLE:
from mcp.server import NotificationOptions, Server
from mcp.server.models import InitializationOptions
from mcp.types import EmbeddedResource as MCPEmbeddedResource
from mcp.types import ImageContent as MCPImageContent
from mcp.types import TextContent as MCPTextContent
from mcp.types import Tool as MCPTool
from .mcp_server_manager import global_mcp_server_manager
from .sse_transport import SseServerTransport
from .tool_registry import global_mcp_tool_registry
######################################################
############ MCP Tools List REST API Response Object #
# Defined here because we don't want to add `mcp` as a
# required dependency for `litellm` pip package
######################################################
class ListMCPToolsRestAPIResponseObject(MCPTool):
"""
Object returned by the /tools/list REST API route.
"""
mcp_info: Optional[MCPInfo] = None
model_config = ConfigDict(arbitrary_types_allowed=True)
########################################################
############ Initialize the MCP Server #################
########################################################
router = APIRouter(
prefix="/mcp",
tags=["mcp"],
)
server: Server = Server("litellm-mcp-server")
sse: SseServerTransport = SseServerTransport("/mcp/sse/messages")
########################################################
############### MCP Server Routes #######################
########################################################
@server.list_tools()
async def list_tools() -> list[MCPTool]:
"""
List all available tools
"""
return await _list_mcp_tools()
async def _list_mcp_tools() -> List[MCPTool]:
"""
List all available tools
"""
tools = []
for tool in global_mcp_tool_registry.list_tools():
tools.append(
MCPTool(
name=tool.name,
description=tool.description,
inputSchema=tool.input_schema,
)
)
verbose_logger.debug(
"GLOBAL MCP TOOLS: %s", global_mcp_tool_registry.list_tools()
)
sse_tools: List[MCPTool] = await global_mcp_server_manager.list_tools()
verbose_logger.debug("SSE TOOLS: %s", sse_tools)
if sse_tools is not None:
tools.extend(sse_tools)
return tools
@server.call_tool()
async def mcp_server_tool_call(
name: str, arguments: Dict[str, Any] | None
) -> List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]:
"""
Call a specific tool with the provided arguments
Args:
name (str): Name of the tool to call
arguments (Dict[str, Any] | None): Arguments to pass to the tool
Returns:
List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]: Tool execution results
Raises:
HTTPException: If tool not found or arguments missing
"""
# Validate arguments
response = await call_mcp_tool(
name=name,
arguments=arguments,
)
return response
@client
async def call_mcp_tool(
name: str, arguments: Optional[Dict[str, Any]] = None, **kwargs: Any
) -> List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]:
"""
Call a specific tool with the provided arguments
"""
if arguments is None:
raise HTTPException(
status_code=400, detail="Request arguments are required"
)
standard_logging_mcp_tool_call: StandardLoggingMCPToolCall = (
_get_standard_logging_mcp_tool_call(
name=name,
arguments=arguments,
)
)
litellm_logging_obj: Optional[LiteLLMLoggingObj] = kwargs.get(
"litellm_logging_obj", None
)
if litellm_logging_obj:
litellm_logging_obj.model_call_details["mcp_tool_call_metadata"] = (
standard_logging_mcp_tool_call
)
litellm_logging_obj.model_call_details["model"] = (
f"{MCP_TOOL_NAME_PREFIX}: {standard_logging_mcp_tool_call.get('name') or ''}"
)
litellm_logging_obj.model_call_details["custom_llm_provider"] = (
standard_logging_mcp_tool_call.get("mcp_server_name")
)
# Try managed server tool first
if name in global_mcp_server_manager.tool_name_to_mcp_server_name_mapping:
return await _handle_managed_mcp_tool(name, arguments)
# Fall back to local tool registry
return await _handle_local_mcp_tool(name, arguments)
def _get_standard_logging_mcp_tool_call(
name: str,
arguments: Dict[str, Any],
) -> StandardLoggingMCPToolCall:
mcp_server = global_mcp_server_manager._get_mcp_server_from_tool_name(name)
if mcp_server:
mcp_info = mcp_server.mcp_info or {}
return StandardLoggingMCPToolCall(
name=name,
arguments=arguments,
mcp_server_name=mcp_info.get("server_name"),
mcp_server_logo_url=mcp_info.get("logo_url"),
)
else:
return StandardLoggingMCPToolCall(
name=name,
arguments=arguments,
)
async def _handle_managed_mcp_tool(
name: str, arguments: Dict[str, Any]
) -> List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]:
"""Handle tool execution for managed server tools"""
call_tool_result = await global_mcp_server_manager.call_tool(
name=name,
arguments=arguments,
)
verbose_logger.debug("CALL TOOL RESULT: %s", call_tool_result)
return call_tool_result.content
async def _handle_local_mcp_tool(
name: str, arguments: Dict[str, Any]
) -> List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]:
"""Handle tool execution for local registry tools"""
tool = global_mcp_tool_registry.get_tool(name)
if not tool:
raise HTTPException(status_code=404, detail=f"Tool '{name}' not found")
try:
result = tool.handler(**arguments)
return [MCPTextContent(text=str(result), type="text")]
except Exception as e:
return [MCPTextContent(text=f"Error: {str(e)}", type="text")]
@router.get("/", response_class=StreamingResponse)
async def handle_sse(request: Request):
verbose_logger.info("new incoming SSE connection established")
async with sse.connect_sse(request) as streams:
try:
await server.run(streams[0], streams[1], options)
except BrokenResourceError:
pass
except asyncio.CancelledError:
pass
except ValidationError:
pass
except Exception:
raise
await request.close()
@router.post("/sse/messages")
async def handle_messages(request: Request):
verbose_logger.info("incoming SSE message received")
await sse.handle_post_message(request.scope, request.receive, request._send)
await request.close()
########################################################
############ MCP Server REST API Routes #################
########################################################
@router.get("/tools/list", dependencies=[Depends(user_api_key_auth)])
async def list_tool_rest_api() -> List[ListMCPToolsRestAPIResponseObject]:
"""
List all available tools with information about the server they belong to.
Example response:
Tools:
[
{
"name": "create_zap",
"description": "Create a new zap",
"inputSchema": "tool_input_schema",
"mcp_info": {
"server_name": "zapier",
"logo_url": "https://www.zapier.com/logo.png",
}
},
{
"name": "fetch_data",
"description": "Fetch data from a URL",
"inputSchema": "tool_input_schema",
"mcp_info": {
"server_name": "fetch",
"logo_url": "https://www.fetch.com/logo.png",
}
}
]
"""
list_tools_result: List[ListMCPToolsRestAPIResponseObject] = []
for server in global_mcp_server_manager.mcp_servers:
try:
tools = await global_mcp_server_manager._get_tools_from_server(server)
for tool in tools:
list_tools_result.append(
ListMCPToolsRestAPIResponseObject(
name=tool.name,
description=tool.description,
inputSchema=tool.inputSchema,
mcp_info=server.mcp_info,
)
)
except Exception as e:
verbose_logger.exception(f"Error getting tools from {server.name}: {e}")
continue
return list_tools_result
@router.post("/tools/call", dependencies=[Depends(user_api_key_auth)])
async def call_tool_rest_api(
request: Request,
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
):
"""
REST API to call a specific MCP tool with the provided arguments
"""
from litellm.proxy.proxy_server import add_litellm_data_to_request, proxy_config
data = await request.json()
data = await add_litellm_data_to_request(
data=data,
request=request,
user_api_key_dict=user_api_key_dict,
proxy_config=proxy_config,
)
return await call_mcp_tool(**data)
options = InitializationOptions(
server_name="litellm-mcp-server",
server_version="0.1.0",
capabilities=server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
)

View file

@ -1,150 +0,0 @@
"""
This is a modification of code from: https://github.com/SecretiveShell/MCP-Bridge/blob/master/mcp_bridge/mcp_server/sse_transport.py
Credit to the maintainers of SecretiveShell for their SSE Transport implementation
"""
from contextlib import asynccontextmanager
from typing import Any
from urllib.parse import quote
from uuid import UUID, uuid4
import anyio
import mcp.types as types
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from fastapi.requests import Request
from fastapi.responses import Response
from pydantic import ValidationError
from sse_starlette import EventSourceResponse
from starlette.types import Receive, Scope, Send
from litellm._logging import verbose_logger
class SseServerTransport:
"""
SSE server transport for MCP. This class provides _two_ ASGI applications,
suitable to be used with a framework like Starlette and a server like Hypercorn:
1. connect_sse() is an ASGI application which receives incoming GET requests,
and sets up a new SSE stream to send server messages to the client.
2. handle_post_message() is an ASGI application which receives incoming POST
requests, which should contain client messages that link to a
previously-established SSE session.
"""
_endpoint: str
_read_stream_writers: dict[
UUID, MemoryObjectSendStream[types.JSONRPCMessage | Exception]
]
def __init__(self, endpoint: str) -> None:
"""
Creates a new SSE server transport, which will direct the client to POST
messages to the relative or absolute URL given.
"""
super().__init__()
self._endpoint = endpoint
self._read_stream_writers = {}
verbose_logger.debug(
f"SseServerTransport initialized with endpoint: {endpoint}"
)
@asynccontextmanager
async def connect_sse(self, request: Request):
if request.scope["type"] != "http":
verbose_logger.error("connect_sse received non-HTTP request")
raise ValueError("connect_sse can only handle HTTP requests")
verbose_logger.debug("Setting up SSE connection")
read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception]
read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception]
write_stream: MemoryObjectSendStream[types.JSONRPCMessage]
write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage]
read_stream_writer, read_stream = anyio.create_memory_object_stream(0)
write_stream, write_stream_reader = anyio.create_memory_object_stream(0)
session_id = uuid4()
session_uri = f"{quote(self._endpoint)}?session_id={session_id.hex}"
self._read_stream_writers[session_id] = read_stream_writer
verbose_logger.debug(f"Created new session with ID: {session_id}")
sse_stream_writer: MemoryObjectSendStream[dict[str, Any]]
sse_stream_reader: MemoryObjectReceiveStream[dict[str, Any]]
sse_stream_writer, sse_stream_reader = anyio.create_memory_object_stream(
0, dict[str, Any]
)
async def sse_writer():
verbose_logger.debug("Starting SSE writer")
async with sse_stream_writer, write_stream_reader:
await sse_stream_writer.send({"event": "endpoint", "data": session_uri})
verbose_logger.debug(f"Sent endpoint event: {session_uri}")
async for message in write_stream_reader:
verbose_logger.debug(f"Sending message via SSE: {message}")
await sse_stream_writer.send(
{
"event": "message",
"data": message.model_dump_json(
by_alias=True, exclude_none=True
),
}
)
async with anyio.create_task_group() as tg:
response = EventSourceResponse(
content=sse_stream_reader, data_sender_callable=sse_writer
)
verbose_logger.debug("Starting SSE response task")
tg.start_soon(response, request.scope, request.receive, request._send)
verbose_logger.debug("Yielding read and write streams")
yield (read_stream, write_stream)
async def handle_post_message(
self, scope: Scope, receive: Receive, send: Send
) -> Response:
verbose_logger.debug("Handling POST message")
request = Request(scope, receive)
session_id_param = request.query_params.get("session_id")
if session_id_param is None:
verbose_logger.warning("Received request without session_id")
response = Response("session_id is required", status_code=400)
return response
try:
session_id = UUID(hex=session_id_param)
verbose_logger.debug(f"Parsed session ID: {session_id}")
except ValueError:
verbose_logger.warning(f"Received invalid session ID: {session_id_param}")
response = Response("Invalid session ID", status_code=400)
return response
writer = self._read_stream_writers.get(session_id)
if not writer:
verbose_logger.warning(f"Could not find session for ID: {session_id}")
response = Response("Could not find session", status_code=404)
return response
json = await request.json()
verbose_logger.debug(f"Received JSON: {json}")
try:
message = types.JSONRPCMessage.model_validate(json)
verbose_logger.debug(f"Validated client message: {message}")
except ValidationError as err:
verbose_logger.error(f"Failed to parse message: {err}")
response = Response("Could not parse message", status_code=400)
await writer.send(err)
return response
verbose_logger.debug(f"Sending message to writer: {message}")
response = Response("Accepted", status_code=202)
await writer.send(message)
return response

View file

@ -1,103 +0,0 @@
import json
from typing import Any, Callable, Dict, List, Optional
from litellm._logging import verbose_logger
from litellm.proxy.types_utils.utils import get_instance_fn
from litellm.types.mcp_server.tool_registry import MCPTool
class MCPToolRegistry:
"""
A registry for managing MCP tools
"""
def __init__(self):
# Registry to store all registered tools
self.tools: Dict[str, MCPTool] = {}
def register_tool(
self,
name: str,
description: str,
input_schema: Dict[str, Any],
handler: Callable,
) -> None:
"""
Register a new tool in the registry
"""
self.tools[name] = MCPTool(
name=name,
description=description,
input_schema=input_schema,
handler=handler,
)
verbose_logger.debug(f"Registered tool: {name}")
def get_tool(self, name: str) -> Optional[MCPTool]:
"""
Get a tool from the registry by name
"""
return self.tools.get(name)
def list_tools(self) -> List[MCPTool]:
"""
List all registered tools
"""
return list(self.tools.values())
def load_tools_from_config(
self, mcp_tools_config: Optional[Dict[str, Any]] = None
) -> None:
"""
Load and register tools from the proxy config
Args:
mcp_tools_config: The mcp_tools config from the proxy config
"""
if mcp_tools_config is None:
raise ValueError(
"mcp_tools_config is required, please set `mcp_tools` in your proxy config"
)
for tool_config in mcp_tools_config:
if not isinstance(tool_config, dict):
raise ValueError("mcp_tools_config must be a list of dictionaries")
name = tool_config.get("name")
description = tool_config.get("description")
input_schema = tool_config.get("input_schema", {})
handler_name = tool_config.get("handler")
if not all([name, description, handler_name]):
continue
# Try to resolve the handler
# First check if it's a module path (e.g., "module.submodule.function")
if handler_name is None:
raise ValueError(f"handler is required for tool {name}")
handler = get_instance_fn(handler_name)
if handler is None:
verbose_logger.warning(
f"Warning: Could not find handler {handler_name} for tool {name}"
)
continue
# Register the tool
if name is None:
raise ValueError(f"name is required for tool {name}")
if description is None:
raise ValueError(f"description is required for tool {name}")
self.register_tool(
name=name,
description=description,
input_schema=input_schema,
handler=handler,
)
verbose_logger.debug(
"all registered tools: %s", json.dumps(self.tools, indent=4, default=str)
)
global_mcp_tool_registry = MCPToolRegistry()

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[665],{84566:function(e,t,s){s.d(t,{GH$:function(){return l}});var c=s(2265);let l=({color:e="currentColor",size:t=24,className:s,...l})=>c.createElement("svg",{viewBox:"0 0 24 24",xmlns:"http://www.w3.org/2000/svg",width:t,height:t,fill:e,...l,className:"remixicon "+(s||"")},c.createElement("path",{d:"M12 22C6.47715 22 2 17.5228 2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22ZM12 20C16.4183 20 20 16.4183 20 12C20 7.58172 16.4183 4 12 4C7.58172 4 4 7.58172 4 12C4 16.4183 7.58172 20 12 20ZM11.0026 16L6.75999 11.7574L8.17421 10.3431L11.0026 13.1716L16.6595 7.51472L18.0737 8.92893L11.0026 16Z"}))}}]);

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[409],{67589:function(e,t,n){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_not-found/page",function(){return n(83634)}])},83634:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return s}}),n(47043);let i=n(57437);n(2265);let o={fontFamily:'system-ui,"Segoe UI",Roboto,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji"',height:"100vh",textAlign:"center",display:"flex",flexDirection:"column",alignItems:"center",justifyContent:"center"},l={display:"inline-block"},r={display:"inline-block",margin:"0 20px 0 0",padding:"0 23px 0 0",fontSize:24,fontWeight:500,verticalAlign:"top",lineHeight:"49px"},d={fontSize:14,fontWeight:400,lineHeight:"49px",margin:0};function s(){return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsx)("title",{children:"404: This page could not be found."}),(0,i.jsx)("div",{style:o,children:(0,i.jsxs)("div",{children:[(0,i.jsx)("style",{dangerouslySetInnerHTML:{__html:"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}),(0,i.jsx)("h1",{className:"next-error-h1",style:r,children:"404"}),(0,i.jsx)("div",{style:l,children:(0,i.jsx)("h2",{style:d,children:"This page could not be found."})})]})})]})}("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)}},function(e){e.O(0,[971,117,744],function(){return e(e.s=67589)}),_N_E=e.O()}]);

View file

@ -1 +0,0 @@
(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[185],{35677:function(n,e,t){Promise.resolve().then(t.t.bind(t,39974,23)),Promise.resolve().then(t.t.bind(t,2778,23))},2778:function(){},39974:function(n){n.exports={style:{fontFamily:"'__Inter_cf7686', '__Inter_Fallback_cf7686'",fontStyle:"normal"},className:"__className_cf7686"}}},function(n){n.O(0,[919,986,971,117,744],function(){return n(n.s=35677)}),_N_E=n.O()}]);

View file

@ -1 +0,0 @@
(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[418],{56668:function(e,n,u){Promise.resolve().then(u.bind(u,52829))},52829:function(e,n,u){"use strict";u.r(n),u.d(n,{default:function(){return f}});var t=u(57437),s=u(2265),r=u(99376),c=u(92699);function f(){let e=(0,r.useSearchParams)().get("key"),[n,u]=(0,s.useState)(null);return(0,s.useEffect)(()=>{e&&u(e)},[e]),(0,t.jsx)(c.Z,{accessToken:n,publicPage:!0,premiumUser:!1})}}},function(e){e.O(0,[42,261,250,699,971,117,744],function(){return e(e.s=56668)}),_N_E=e.O()}]);

View file

@ -1 +0,0 @@
(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[461],{23781:function(e,t,n){Promise.resolve().then(n.bind(n,12011))},12011:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return S}});var s=n(57437),o=n(2265),a=n(99376),c=n(20831),i=n(94789),l=n(12514),r=n(49804),u=n(67101),m=n(84264),d=n(49566),h=n(96761),x=n(84566),p=n(19250),f=n(14474),k=n(13634),g=n(73002),j=n(3914);function S(){let[e]=k.Z.useForm(),t=(0,a.useSearchParams)();(0,j.e)("token");let n=t.get("invitation_id"),[S,w]=(0,o.useState)(null),[Z,_]=(0,o.useState)(""),[N,b]=(0,o.useState)(""),[T,y]=(0,o.useState)(null),[E,v]=(0,o.useState)(""),[C,U]=(0,o.useState)("");return(0,o.useEffect)(()=>{n&&(0,p.W_)(n).then(e=>{let t=e.login_url;console.log("login_url:",t),v(t);let n=e.token,s=(0,f.o)(n);U(n),console.log("decoded:",s),w(s.key),console.log("decoded user email:",s.user_email),b(s.user_email),y(s.user_id)})},[n]),(0,s.jsx)("div",{className:"mx-auto w-full max-w-md mt-10",children:(0,s.jsxs)(l.Z,{children:[(0,s.jsx)(h.Z,{className:"text-sm mb-5 text-center",children:"\uD83D\uDE85 LiteLLM"}),(0,s.jsx)(h.Z,{className:"text-xl",children:"Sign up"}),(0,s.jsx)(m.Z,{children:"Claim your user account to login to Admin UI."}),(0,s.jsx)(i.Z,{className:"mt-4",title:"SSO",icon:x.GH$,color:"sky",children:(0,s.jsxs)(u.Z,{numItems:2,className:"flex justify-between items-center",children:[(0,s.jsx)(r.Z,{children:"SSO is under the Enterprise Tier."}),(0,s.jsx)(r.Z,{children:(0,s.jsx)(c.Z,{variant:"primary",className:"mb-2",children:(0,s.jsx)("a",{href:"https://forms.gle/W3U4PZpJGFHWtHyA9",target:"_blank",children:"Get Free Trial"})})})]})}),(0,s.jsxs)(k.Z,{className:"mt-10 mb-5 mx-auto",layout:"vertical",onFinish:e=>{console.log("in handle submit. accessToken:",S,"token:",C,"formValues:",e),S&&C&&(e.user_email=N,T&&n&&(0,p.m_)(S,n,T,e.password).then(e=>{let t="/ui/";t+="?login=success",document.cookie="token="+C,console.log("redirecting to:",t),window.location.href=t}))},children:[(0,s.jsxs)(s.Fragment,{children:[(0,s.jsx)(k.Z.Item,{label:"Email Address",name:"user_email",children:(0,s.jsx)(d.Z,{type:"email",disabled:!0,value:N,defaultValue:N,className:"max-w-md"})}),(0,s.jsx)(k.Z.Item,{label:"Password",name:"password",rules:[{required:!0,message:"password required to sign up"}],help:"Create a password for your account",children:(0,s.jsx)(d.Z,{placeholder:"",type:"password",className:"max-w-md"})})]}),(0,s.jsx)("div",{className:"mt-10",children:(0,s.jsx)(g.ZP,{htmlType:"submit",children:"Sign Up"})})]})]})})}},3914:function(e,t,n){"use strict";function s(){let e=window.location.hostname,t=["Lax","Strict","None"];["/","/ui"].forEach(n=>{document.cookie="token=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=".concat(n,";"),document.cookie="token=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=".concat(n,"; domain=").concat(e,";"),t.forEach(t=>{let s="None"===t?" Secure;":"";document.cookie="token=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=".concat(n,"; SameSite=").concat(t,";").concat(s),document.cookie="token=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=".concat(n,"; domain=").concat(e,"; SameSite=").concat(t,";").concat(s)})}),console.log("After clearing cookies:",document.cookie)}function o(e){let t=document.cookie.split("; ").find(t=>t.startsWith(e+"="));return t?t.split("=")[1]:null}n.d(t,{b:function(){return s},e:function(){return o}})}},function(e){e.O(0,[665,42,899,250,971,117,744],function(){return e(e.s=23781)}),_N_E=e.O()}]);

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[744],{35618:function(e,n,t){Promise.resolve().then(t.t.bind(t,12846,23)),Promise.resolve().then(t.t.bind(t,19107,23)),Promise.resolve().then(t.t.bind(t,61060,23)),Promise.resolve().then(t.t.bind(t,4707,23)),Promise.resolve().then(t.t.bind(t,80,23)),Promise.resolve().then(t.t.bind(t,36423,23))}},function(e){var n=function(n){return e(e.s=n)};e.O(0,[971,117],function(){return n(54278),n(35618)}),_N_E=e.O()}]);

View file

@ -1 +0,0 @@
(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[888],{41597:function(n,_,u){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_app",function(){return u(48141)}])}},function(n){var _=function(_){return n(n.s=_)};n.O(0,[774,179],function(){return _(41597),_(37253)}),_N_E=n.O()}]);

View file

@ -1 +0,0 @@
(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[820],{81981:function(n,_,u){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_error",function(){return u(18529)}])}},function(n){n.O(0,[888,774,179],function(){return n(n.s=81981)}),_N_E=n.O()}]);

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
!function(){"use strict";var e,t,n,r,o,u,i,c,f,a={},l={};function d(e){var t=l[e];if(void 0!==t)return t.exports;var n=l[e]={id:e,loaded:!1,exports:{}},r=!0;try{a[e].call(n.exports,n,n.exports,d),r=!1}finally{r&&delete l[e]}return n.loaded=!0,n.exports}d.m=a,e=[],d.O=function(t,n,r,o){if(n){o=o||0;for(var u=e.length;u>0&&e[u-1][2]>o;u--)e[u]=e[u-1];e[u]=[n,r,o];return}for(var i=1/0,u=0;u<e.length;u++){for(var n=e[u][0],r=e[u][1],o=e[u][2],c=!0,f=0;f<n.length;f++)i>=o&&Object.keys(d.O).every(function(e){return d.O[e](n[f])})?n.splice(f--,1):(c=!1,o<i&&(i=o));if(c){e.splice(u--,1);var a=r();void 0!==a&&(t=a)}}return t},d.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return d.d(t,{a:t}),t},n=Object.getPrototypeOf?function(e){return Object.getPrototypeOf(e)}:function(e){return e.__proto__},d.t=function(e,r){if(1&r&&(e=this(e)),8&r||"object"==typeof e&&e&&(4&r&&e.__esModule||16&r&&"function"==typeof e.then))return e;var o=Object.create(null);d.r(o);var u={};t=t||[null,n({}),n([]),n(n)];for(var i=2&r&&e;"object"==typeof i&&!~t.indexOf(i);i=n(i))Object.getOwnPropertyNames(i).forEach(function(t){u[t]=function(){return e[t]}});return u.default=function(){return e},d.d(o,u),o},d.d=function(e,t){for(var n in t)d.o(t,n)&&!d.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:t[n]})},d.f={},d.e=function(e){return Promise.all(Object.keys(d.f).reduce(function(t,n){return d.f[n](e,t),t},[]))},d.u=function(e){},d.miniCssF=function(e){},d.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||Function("return this")()}catch(e){if("object"==typeof window)return window}}(),d.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r={},o="_N_E:",d.l=function(e,t,n,u){if(r[e]){r[e].push(t);return}if(void 0!==n)for(var i,c,f=document.getElementsByTagName("script"),a=0;a<f.length;a++){var l=f[a];if(l.getAttribute("src")==e||l.getAttribute("data-webpack")==o+n){i=l;break}}i||(c=!0,(i=document.createElement("script")).charset="utf-8",i.timeout=120,d.nc&&i.setAttribute("nonce",d.nc),i.setAttribute("data-webpack",o+n),i.src=d.tu(e)),r[e]=[t];var s=function(t,n){i.onerror=i.onload=null,clearTimeout(p);var o=r[e];if(delete r[e],i.parentNode&&i.parentNode.removeChild(i),o&&o.forEach(function(e){return e(n)}),t)return t(n)},p=setTimeout(s.bind(null,void 0,{type:"timeout",target:i}),12e4);i.onerror=s.bind(null,i.onerror),i.onload=s.bind(null,i.onload),c&&document.head.appendChild(i)},d.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},d.nmd=function(e){return e.paths=[],e.children||(e.children=[]),e},d.tt=function(){return void 0===u&&(u={createScriptURL:function(e){return e}},"undefined"!=typeof trustedTypes&&trustedTypes.createPolicy&&(u=trustedTypes.createPolicy("nextjs#bundler",u))),u},d.tu=function(e){return d.tt().createScriptURL(e)},d.p="/ui/_next/",i={272:0,919:0,986:0},d.f.j=function(e,t){var n=d.o(i,e)?i[e]:void 0;if(0!==n){if(n)t.push(n[2]);else if(/^(272|919|986)$/.test(e))i[e]=0;else{var r=new Promise(function(t,r){n=i[e]=[t,r]});t.push(n[2]=r);var o=d.p+d.u(e),u=Error();d.l(o,function(t){if(d.o(i,e)&&(0!==(n=i[e])&&(i[e]=void 0),n)){var r=t&&("load"===t.type?"missing":t.type),o=t&&t.target&&t.target.src;u.message="Loading chunk "+e+" failed.\n("+r+": "+o+")",u.name="ChunkLoadError",u.type=r,u.request=o,n[1](u)}},"chunk-"+e,e)}}},d.O.j=function(e){return 0===i[e]},c=function(e,t){var n,r,o=t[0],u=t[1],c=t[2],f=0;if(o.some(function(e){return 0!==i[e]})){for(n in u)d.o(u,n)&&(d.m[n]=u[n]);if(c)var a=c(d)}for(e&&e(t);f<o.length;f++)r=o[f],d.o(i,r)&&i[r]&&i[r][0](),i[r]=0;return d.O(a)},(f=self.webpackChunk_N_E=self.webpackChunk_N_E||[]).forEach(c.bind(null,0)),f.push=c.bind(null,f.push.bind(f))}();

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
@font-face{font-family:__Inter_cf7686;font-style:normal;font-weight:100 900;font-display:swap;src:url(/ui/_next/static/media/55c55f0601d81cf3-s.woff2) format("woff2");unicode-range:u+0460-052f,u+1c80-1c8a,u+20b4,u+2de0-2dff,u+a640-a69f,u+fe2e-fe2f}@font-face{font-family:__Inter_cf7686;font-style:normal;font-weight:100 900;font-display:swap;src:url(/ui/_next/static/media/26a46d62cd723877-s.woff2) format("woff2");unicode-range:u+0301,u+0400-045f,u+0490-0491,u+04b0-04b1,u+2116}@font-face{font-family:__Inter_cf7686;font-style:normal;font-weight:100 900;font-display:swap;src:url(/ui/_next/static/media/97e0cb1ae144a2a9-s.woff2) format("woff2");unicode-range:u+1f??}@font-face{font-family:__Inter_cf7686;font-style:normal;font-weight:100 900;font-display:swap;src:url(/ui/_next/static/media/581909926a08bbc8-s.woff2) format("woff2");unicode-range:u+0370-0377,u+037a-037f,u+0384-038a,u+038c,u+038e-03a1,u+03a3-03ff}@font-face{font-family:__Inter_cf7686;font-style:normal;font-weight:100 900;font-display:swap;src:url(/ui/_next/static/media/df0a9ae256c0569c-s.woff2) format("woff2");unicode-range:u+0102-0103,u+0110-0111,u+0128-0129,u+0168-0169,u+01a0-01a1,u+01af-01b0,u+0300-0301,u+0303-0304,u+0308-0309,u+0323,u+0329,u+1ea0-1ef9,u+20ab}@font-face{font-family:__Inter_cf7686;font-style:normal;font-weight:100 900;font-display:swap;src:url(/ui/_next/static/media/6d93bde91c0c2823-s.woff2) format("woff2");unicode-range:u+0100-02ba,u+02bd-02c5,u+02c7-02cc,u+02ce-02d7,u+02dd-02ff,u+0304,u+0308,u+0329,u+1d00-1dbf,u+1e00-1e9f,u+1ef2-1eff,u+2020,u+20a0-20ab,u+20ad-20c0,u+2113,u+2c60-2c7f,u+a720-a7ff}@font-face{font-family:__Inter_cf7686;font-style:normal;font-weight:100 900;font-display:swap;src:url(/ui/_next/static/media/a34f9d1faa5f3315-s.p.woff2) format("woff2");unicode-range:u+00??,u+0131,u+0152-0153,u+02bb-02bc,u+02c6,u+02da,u+02dc,u+0304,u+0308,u+0329,u+2000-206f,u+20ac,u+2122,u+2191,u+2193,u+2212,u+2215,u+feff,u+fffd}@font-face{font-family:__Inter_Fallback_cf7686;src:local("Arial");ascent-override:90.49%;descent-override:22.56%;line-gap-override:0.00%;size-adjust:107.06%}.__className_cf7686{font-family:__Inter_cf7686,__Inter_Fallback_cf7686;font-style:normal}

View file

@ -1 +0,0 @@
self.__BUILD_MANIFEST={__rewrites:{afterFiles:[],beforeFiles:[],fallback:[]},"/_error":["static/chunks/pages/_error-28b803cb2479b966.js"],sortedPages:["/_app","/_error"]},self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB();

View file

@ -1 +0,0 @@
self.__SSG_MANIFEST=new Set([]);self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB()

View file

@ -1,5 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<svg width="46" height="46" viewBox="0 0 46 46" fill="none" xmlns="http://www.w3.org/2000/svg">
<circle cx="23" cy="23" r="23" fill="white"/>
<path d="M32.73 7h-6.945L38.45 39h6.945L32.73 7ZM12.665 7 0 39h7.082l2.59-6.72h13.25l2.59 6.72h7.082L19.929 7h-7.264Zm-.702 19.337 4.334-11.246 4.334 11.246h-8.668Z" fill="#000000"></path>
</svg>

Before

Width:  |  Height:  |  Size: 381 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 414 B

View file

@ -1,34 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 26.0.3, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.0" id="katman_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 600 450" style="enable-background:new 0 0 600 450;" xml:space="preserve">
<style type="text/css">
.st0{fill:none;}
.st1{fill-rule:evenodd;clip-rule:evenodd;fill:#343B45;}
.st2{fill-rule:evenodd;clip-rule:evenodd;fill:#F4981A;}
</style>
<g id="_x31__stroke">
<g id="Amazon_1_">
<rect x="161.2" y="86.5" class="st0" width="277.8" height="277.8"/>
<g id="Amazon">
<path class="st1" d="M315,163.7c-8,0.6-17.2,1.2-26.4,2.4c-14.1,1.9-28.2,4.3-39.8,9.8c-22.7,9.2-38,28.8-38,57.6
c0,36.2,23.3,54.6,52.7,54.6c9.8,0,17.8-1.2,25.1-3.1c11.7-3.7,21.5-10.4,33.1-22.7c6.7,9.2,8.6,13.5,20.2,23.3
c3.1,1.2,6.1,1.2,8.6-0.6c7.4-6.1,20.3-17.2,27-23.3c3.1-2.5,2.5-6.1,0.6-9.2c-6.7-8.6-13.5-16-13.5-32.5V165
c0-23.3,1.9-44.8-15.3-60.7c-14.1-12.9-36.2-17.8-53.4-17.8h-7.4c-31.2,1.8-64.3,15.3-71.7,54c-1.2,4.9,2.5,6.8,4.9,7.4l34.3,4.3
c3.7-0.6,5.5-3.7,6.1-6.7c3.1-13.5,14.1-20.2,26.3-21.5h2.5c7.4,0,15.3,3.1,19.6,9.2c4.9,7.4,4.3,17.2,4.3,25.8L315,163.7
L315,163.7z M308.2,236.7c-4.3,8.6-11.7,14.1-19.6,16c-1.2,0-3.1,0.6-4.9,0.6c-13.5,0-21.4-10.4-21.4-25.8
c0-19.6,11.6-28.8,26.3-33.1c8-1.8,17.2-2.5,26.4-2.5v7.4C315,213.4,315.6,224.4,308.2,236.7z"/>
<path class="st2" d="M398.8,311.4c-1.4,0-2.8,0.3-4.1,0.9c-1.5,0.6-3,1.3-4.4,1.9l-2.1,0.9l-2.7,1.1v0
c-29.8,12.1-61.1,19.2-90.1,19.8c-1.1,0-2.1,0-3.2,0c-45.6,0-82.8-21.1-120.3-42c-1.3-0.7-2.7-1-4-1c-1.7,0-3.4,0.6-4.7,1.8
c-1.3,1.2-2,2.9-2,4.7c0,2.3,1.2,4.4,2.9,5.7c35.2,30.6,73.8,59,125.7,59c1,0,2,0,3.1,0c33-0.7,70.3-11.9,99.3-30.1l0.2-0.1
c3.8-2.3,7.6-4.9,11.2-7.7c2.2-1.6,3.8-4.2,3.8-6.9C407.2,314.6,403.2,311.4,398.8,311.4z M439,294.5L439,294.5
c-0.1-2.9-0.7-5.1-1.9-6.9l-0.1-0.2l-0.1-0.2c-1.2-1.3-2.4-1.8-3.7-2.4c-3.8-1.5-9.3-2.3-16-2.3c-4.8,0-10.1,0.5-15.4,1.6l0-0.4
l-5.3,1.8l-0.1,0l-3,1v0.1c-3.5,1.5-6.8,3.3-9.8,5.5c-1.9,1.4-3.4,3.2-3.5,6.1c0,1.5,0.7,3.3,2,4.3c1.3,1,2.8,1.4,4.1,1.4
c0.3,0,0.6,0,0.9-0.1l0.3,0l0.2,0c2.6-0.6,6.4-0.9,10.9-1.6c3.8-0.4,7.9-0.7,11.4-0.7c2.5,0,4.7,0.2,6.3,0.5
c0.8,0.2,1.3,0.4,1.6,0.5c0.1,0,0.2,0.1,0.2,0.1c0.1,0.2,0.2,0.8,0.1,1.5c0,2.9-1.2,8.4-2.9,13.7c-1.7,5.3-3.7,10.7-5,14.2
c-0.3,0.8-0.5,1.7-0.5,2.7c0,1.4,0.6,3.2,1.8,4.3c1.2,1.1,2.8,1.6,4.1,1.6h0.1c2,0,3.6-0.8,5.1-1.9
c13.6-12.2,18.3-31.7,18.5-42.6L439,294.5z"/>
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.5 KiB

View file

@ -1 +0,0 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Bedrock</title><defs><linearGradient id="lobe-icons-bedrock-fill" x1="80%" x2="20%" y1="20%" y2="80%"><stop offset="0%" stop-color="#6350FB"></stop><stop offset="50%" stop-color="#3D8FFF"></stop><stop offset="100%" stop-color="#9AD8F8"></stop></linearGradient></defs><path d="M13.05 15.513h3.08c.214 0 .389.177.389.394v1.82a1.704 1.704 0 011.296 1.661c0 .943-.755 1.708-1.685 1.708-.931 0-1.686-.765-1.686-1.708 0-.807.554-1.484 1.297-1.662v-1.425h-2.69v4.663a.395.395 0 01-.188.338l-2.69 1.641a.385.385 0 01-.405-.002l-4.926-3.086a.395.395 0 01-.185-.336V16.3L2.196 14.87A.395.395 0 012 14.555L2 14.528V9.406c0-.14.073-.27.192-.34l2.465-1.462V4.448c0-.129.062-.249.165-.322l.021-.014L9.77 1.058a.385.385 0 01.407 0l2.69 1.675a.395.395 0 01.185.336V7.6h3.856V5.683a1.704 1.704 0 01-1.296-1.662c0-.943.755-1.708 1.685-1.708.931 0 1.685.765 1.685 1.708 0 .807-.553 1.484-1.296 1.662v2.311a.391.391 0 01-.389.394h-4.245v1.806h6.624a1.69 1.69 0 011.64-1.313c.93 0 1.685.764 1.685 1.707 0 .943-.754 1.708-1.685 1.708a1.69 1.69 0 01-1.64-1.314H13.05v1.937h4.953l.915 1.18a1.66 1.66 0 01.84-.227c.931 0 1.685.764 1.685 1.707 0 .943-.754 1.708-1.685 1.708-.93 0-1.685-.765-1.685-1.708 0-.346.102-.668.276-.937l-.724-.935H13.05v1.806zM9.973 1.856L7.93 3.122V6.09h-.778V3.604L5.435 4.669v2.945l2.11 1.36L9.712 7.61V5.334h.778V7.83c0 .136-.07.263-.184.335L7.963 9.638v2.081l1.422 1.009-.446.646-1.406-.998-1.53 1.005-.423-.66 1.605-1.055v-1.99L5.038 8.29l-2.26 1.34v1.676l1.972-1.189.398.677-2.37 1.429V14.3l2.166 1.258 2.27-1.368.397.677-2.176 1.311V19.3l1.876 1.175 2.365-1.426.398.678-2.017 1.216 1.918 1.201 2.298-1.403v-5.78l-4.758 2.893-.4-.675 5.158-3.136V3.289L9.972 1.856zM16.13 18.47a.913.913 0 00-.908.92c0 .507.406.918.908.918a.913.913 0 00.907-.919.913.913 0 00-.907-.92zm3.63-3.81a.913.913 0 00-.908.92c0 .508.406.92.907.92a.913.913 0 00.908-.92.913.913 0 00-.908-.92zm1.555-4.99a.913.913 0 00-.908.92c0 .507.407.918.908.918a.913.913 0 00.907-.919.913.913 0 00-.907-.92zM17.296 3.1a.913.913 0 00-.907.92c0 .508.406.92.907.92a.913.913 0 00.908-.92.913.913 0 00-.908-.92z" fill="url(#lobe-icons-bedrock-fill)" fill-rule="nonzero"></path></svg>

Before

Width:  |  Height:  |  Size: 2.2 KiB

View file

@ -1,89 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 26.0.3, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.0" id="katman_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 800 600" style="enable-background:new 0 0 800 600;" xml:space="preserve">
<style type="text/css">
.st0{fill-rule:evenodd;clip-rule:evenodd;fill:#F05A28;}
.st1{fill-rule:evenodd;clip-rule:evenodd;fill:#231F20;}
</style>
<g id="Contact">
<g id="Contact-us" transform="translate(-234.000000, -1114.000000)">
<g id="map" transform="translate(-6.000000, 1027.000000)">
<g id="Contact-box" transform="translate(190.000000, 36.000000)">
<g id="Group-26" transform="translate(50.000000, 51.000000)">
<g id="Group-3">
<path id="Fill-1" class="st0" d="M220.9,421c-17,0-33.1-3.4-47.8-9.5c-22-9.2-40.8-24.6-54.1-44c-13.3-19.4-21-42.7-21-67.9
c0-16.8,3.4-32.7,9.7-47.3c9.3-21.8,24.9-40.3,44.5-53.4c19.6-13.1,43.2-20.7,68.7-20.7v-18.3c-19.5,0-38.1,3.9-55.1,11
c-25.4,10.6-47,28.3-62.2,50.6c-15.3,22.3-24.2,49.2-24.2,78.1c0,19.3,4,37.7,11.1,54.4c10.7,25.1,28.7,46.4,51.2,61.5
c22.6,15.1,49.8,23.9,79.1,23.9V421z"/>
<path id="Fill-4" class="st0" d="M157.9,374.1c-11.5-9.6-20.1-21.2-25.9-33.9c-5.8-12.7-8.8-26.4-8.8-40.2
c0-11,1.9-22,5.6-32.5c3.8-10.5,9.4-20.5,17.1-29.6c9.6-11.4,21.3-20,34-25.8c12.7-5.8,26.6-8.7,40.4-8.7
c11,0,22.1,1.9,32.6,5.6c10.6,3.8,20.6,9.4,29.7,17l11.9-14.1c-10.8-9-22.8-15.8-35.4-20.2c-12.6-4.5-25.7-6.7-38.8-6.7
c-16.5,0-32.9,3.5-48.1,10.4c-15.2,6.9-29.1,17.2-40.5,30.7c-9.1,10.8-15.8,22.7-20.3,35.2c-4.5,12.5-6.7,25.6-6.7,38.7
c0,16.4,3.5,32.8,10.4,47.9c6.9,15.1,17.3,29,30.9,40.3L157.9,374.1z"/>
<path id="Fill-6" class="st0" d="M186.4,362.2c-12.1-6.4-21.6-15.7-28.1-26.6c-6.5-10.9-9.9-23.5-9.9-36.2
c0-11.2,2.6-22.5,8.3-33c6.4-12.1,15.8-21.5,26.8-27.9c11-6.5,23.6-9.9,36.4-9.9c11.2,0,22.6,2.6,33.2,8.2l8.6-16.3
c-13.3-7-27.7-10.4-41.9-10.3c-16.1,0-32,4.3-45.8,12.4c-13.8,8.1-25.7,20.1-33.7,35.2c-7,13.3-10.4,27.6-10.4,41.6
c0,16,4.3,31.8,12.5,45.5c8.2,13.8,20.2,25.5,35.4,33.5L186.4,362.2z"/>
<path id="Fill-8" class="st0" d="M221,344.6c-6.3,0-12.3-1.3-17.7-3.6c-8.2-3.4-15.1-9.2-20-16.5c-4.9-7.3-7.8-16-7.8-25.4
c0-6.3,1.3-12.3,3.6-17.7c3.4-8.1,9.2-15.1,16.5-20c7.3-4.9,16-7.8,25.4-7.8v-18.4c-8.8,0-17.2,1.8-24.9,5
c-11.5,4.9-21.2,12.9-28.1,23.1C161,273.6,157,286,157,299.2c0,8.8,1.8,17.2,5,24.9c4.9,11.5,13,21.2,23.2,28.1
C195.4,359,207.7,363,221,363V344.6z"/>
</g>
<g id="Group" transform="translate(22.000000, 13.000000)">
<path id="Fill-10" class="st1" d="M214,271.6c-2.1-2.2-4.4-4-6.7-5.3c-2.3-1.3-4.7-2-7.2-2c-3.4,0-6.3,0.6-9,1.8
c-2.6,1.2-4.9,2.8-6.8,4.9c-1.9,2-3.3,4.4-4.3,7c-1,2.6-1.4,5.4-1.4,8.2c0,2.8,0.5,5.6,1.4,8.2c1,2.6,2.4,5,4.3,7
c1.9,2,4.1,3.7,6.8,4.9c2.6,1.2,5.6,1.8,9,1.8c2.8,0,5.5-0.6,7.9-1.7c2.4-1.2,4.5-2.9,6.2-5.1l12.2,13.1
c-1.8,1.8-3.9,3.4-6.3,4.7c-2.4,1.3-4.8,2.4-7.2,3.2s-4.8,1.4-7,1.7c-2.2,0.4-4.2,0.5-5.8,0.5c-5.5,0-10.7-0.9-15.5-2.7
c-4.9-1.8-9.1-4.4-12.6-7.8c-3.6-3.3-6.4-7.4-8.5-12.1c-2.1-4.7-3.1-10-3.1-15.7c0-5.8,1-11,3.1-15.7
c2.1-4.7,4.9-8.7,8.5-12.1c3.6-3.3,7.8-5.9,12.6-7.8c4.9-1.8,10.1-2.7,15.5-2.7c4.7,0,9.4,0.9,14.1,2.7
c4.7,1.8,8.9,4.6,12.4,8.4L214,271.6z"/>
<path id="Fill-12" class="st1" d="M280.4,278.9c-0.1-5.4-1.8-9.6-5-12.7c-3.3-3.1-7.8-4.6-13.6-4.6c-5.5,0-9.8,1.6-13,4.7
c-3.2,3.1-5.2,7.4-5.9,12.6H280.4z M243,292.6c0.6,5.5,2.7,9.7,6.4,12.8c3.7,3,8.1,4.6,13.3,4.6c4.6,0,8.4-0.9,11.5-2.8
c3.1-1.9,5.8-4.2,8.2-7.1l13.1,9.9c-4.3,5.3-9,9-14.3,11.3c-5.3,2.2-10.8,3.3-16.6,3.3c-5.5,0-10.7-0.9-15.5-2.7
c-4.9-1.8-9.1-4.4-12.6-7.8c-3.6-3.3-6.4-7.4-8.5-12.1c-2.1-4.7-3.1-10-3.1-15.7c0-5.8,1-11,3.1-15.7
c2.1-4.7,4.9-8.7,8.5-12.1c3.6-3.3,7.8-5.9,12.6-7.8c4.9-1.8,10.1-2.7,15.5-2.7c5.1,0,9.7,0.9,13.9,2.7
c4.2,1.8,7.8,4.3,10.8,7.7c3,3.3,5.3,7.5,7,12.4c1.7,4.9,2.5,10.6,2.5,17v5H243z"/>
<path id="Fill-14" class="st1" d="M306.5,249.7h18.3v11.5h0.3c2-4.3,4.9-7.5,8.7-9.9c3.8-2.3,8.1-3.5,12.9-3.5
c1.1,0,2.2,0.1,3.3,0.3c1.1,0.2,2.2,0.5,3.3,0.8v17.6c-1.5-0.4-3-0.7-4.5-1c-1.5-0.3-2.9-0.4-4.3-0.4c-4.3,0-7.7,0.8-10.3,2.4
c-2.6,1.6-4.6,3.4-5.9,5.4c-1.4,2-2.3,4.1-2.7,6.1c-0.5,2-0.7,3.5-0.7,4.6v39h-18.3V249.7z"/>
<path id="Fill-16" class="st1" d="M409,278.9c-0.1-5.4-1.8-9.6-5-12.7c-3.3-3.1-7.8-4.6-13.6-4.6c-5.5,0-9.8,1.6-13,4.7
c-3.2,3.1-5.2,7.4-5.9,12.6H409z M371.6,292.6c0.6,5.5,2.7,9.7,6.4,12.8c3.7,3,8.1,4.6,13.3,4.6c4.6,0,8.4-0.9,11.5-2.8
c3.1-1.9,5.8-4.2,8.2-7.1l13.1,9.9c-4.3,5.3-9,9-14.3,11.3c-5.3,2.2-10.8,3.3-16.6,3.3c-5.5,0-10.7-0.9-15.5-2.7
c-4.9-1.8-9.1-4.4-12.6-7.8c-3.6-3.3-6.4-7.4-8.5-12.1c-2.1-4.7-3.1-10-3.1-15.7c0-5.8,1-11,3.1-15.7
c2.1-4.7,4.9-8.7,8.5-12.1c3.6-3.3,7.8-5.9,12.6-7.8c4.9-1.8,10.1-2.7,15.5-2.7c5.1,0,9.7,0.9,13.9,2.7
c4.2,1.8,7.8,4.3,10.8,7.7c3,3.3,5.3,7.5,7,12.4c1.7,4.9,2.5,10.6,2.5,17v5H371.6z"/>
<path id="Fill-18" class="st1" d="M494.6,286.2c0-2.8-0.5-5.6-1.5-8.2c-1-2.6-2.4-5-4.3-7c-1.9-2-4.2-3.7-6.9-4.9
c-2.7-1.2-5.7-1.8-9.1-1.8c-3.4,0-6.4,0.6-9.1,1.8c-2.7,1.2-5,2.8-6.9,4.9c-1.9,2-3.3,4.4-4.3,7c-1,2.6-1.5,5.4-1.5,8.2
c0,2.8,0.5,5.6,1.5,8.2c1,2.6,2.4,5,4.3,7c1.9,2,4.2,3.7,6.9,4.9c2.7,1.2,5.7,1.8,9.1,1.8c3.4,0,6.4-0.6,9.1-1.8
c2.7-1.2,5-2.8,6.9-4.9c1.9-2,3.3-4.4,4.3-7C494.1,291.8,494.6,289,494.6,286.2L494.6,286.2z M433.2,207.6h18.5v51.3h0.5
c0.9-1.2,2.1-2.5,3.5-3.7c1.4-1.3,3.2-2.5,5.2-3.6c2.1-1.1,4.4-2,7.1-2.7c2.7-0.7,5.8-1.1,9.3-1.1c5.2,0,10.1,1,14.5,3
c4.4,2,8.2,4.7,11.3,8.1c3.1,3.5,5.6,7.5,7.3,12.2c1.7,4.7,2.6,9.7,2.6,15.1c0,5.4-0.8,10.4-2.5,15.1
c-1.6,4.7-4.1,8.7-7.2,12.2c-3.2,3.5-7,6.2-11.6,8.1c-4.5,2-9.6,3-15.3,3c-5.2,0-10.1-1-14.7-3c-4.5-2-8.1-5.3-10.8-9.7h-0.3
v11h-17.6V207.6z"/>
<path id="Fill-20" class="st1" d="M520.9,249.7h18.3v11.5h0.3c2-4.3,4.9-7.5,8.7-9.9c3.8-2.3,8.1-3.5,12.9-3.5
c1.1,0,2.2,0.1,3.3,0.3c1.1,0.2,2.2,0.5,3.3,0.8v17.6c-1.5-0.4-3-0.7-4.5-1c-1.5-0.3-2.9-0.4-4.3-0.4c-4.3,0-7.7,0.8-10.3,2.4
c-2.6,1.6-4.6,3.4-5.9,5.4c-1.4,2-2.3,4.1-2.7,6.1c-0.5,2-0.7,3.5-0.7,4.6v39h-18.3V249.7z"/>
<path id="Fill-22" class="st1" d="M616,290h-3.9c-2.6,0-5.5,0.1-8.7,0.3c-3.2,0.2-6.2,0.7-9.1,1.4c-2.8,0.8-5.2,1.9-7.2,3.3
c-2,1.5-2.9,3.5-2.9,6.2c0,1.7,0.4,3.2,1.2,4.3c0.8,1.2,1.8,2.2,3,3c1.2,0.8,2.6,1.4,4.2,1.8c1.5,0.4,3.1,0.5,4.6,0.5
c6.4,0,11.1-1.5,14.2-4.5c3-3,4.6-7.1,4.6-12.2V290z M617.1,312.7h-0.5c-2.7,4.2-6.1,7.2-10.2,9.1c-4.1,1.9-8.7,2.8-13.6,2.8
c-3.4,0-6.7-0.5-10-1.4s-6.1-2.3-8.7-4.1c-2.5-1.8-4.6-4.1-6.1-6.8s-2.3-5.9-2.3-9.6c0-4,0.7-7.3,2.2-10.1
c1.4-2.8,3.4-5.1,5.8-7c2.4-1.9,5.2-3.4,8.4-4.5c3.2-1.1,6.5-2,10-2.5c3.5-0.6,6.9-0.9,10.5-1.1c3.5-0.2,6.8-0.2,9.9-0.2h4.6
v-2c0-4.6-1.6-8-4.8-10.3c-3.2-2.3-7.3-3.4-12.2-3.4c-3.9,0-7.6,0.7-11,2.1c-3.4,1.4-6.4,3.2-8.8,5.6l-9.8-9.6
c4.1-4.2,9-7.1,14.5-9c5.5-1.8,11.2-2.7,17.1-2.7c5.3,0,9.7,0.6,13.3,1.7c3.6,1.2,6.6,2.7,9,4.5c2.4,1.8,4.2,3.9,5.5,6.3
c1.3,2.4,2.2,4.8,2.8,7.2c0.6,2.4,0.9,4.8,1,7.1c0.1,2.3,0.2,4.3,0.2,6v42h-16.7V312.7z"/>
<path id="Fill-24" class="st1" d="M683.6,269.9c-3.6-5-8.4-7.5-14.4-7.5c-2.5,0-4.9,0.6-7.2,1.8c-2.4,1.2-3.5,3.2-3.5,5.9
c0,2.2,1,3.9,2.9,4.9c1.9,1,4.4,1.9,7.4,2.6c3,0.7,6.2,1.4,9.6,2.2c3.4,0.8,6.6,1.9,9.6,3.5c3,1.6,5.4,3.7,7.4,6.5
c1.9,2.7,2.9,6.5,2.9,11.3c0,4.4-0.9,8-2.8,11c-1.9,3-4.3,5.4-7.4,7.2c-3,1.8-6.4,3.1-10.2,4c-3.8,0.8-7.6,1.2-11.3,1.2
c-5.7,0-11-0.8-15.8-2.4c-4.8-1.6-9.1-4.6-12.9-8.8l12.3-11.4c2.4,2.6,4.9,4.8,7.6,6.5c2.7,1.7,6,2.5,9.9,2.5
c1.3,0,2.7-0.2,4.1-0.5c1.4-0.3,2.8-0.8,4-1.5c1.2-0.7,2.2-1.6,3-2.7c0.8-1.1,1.1-2.3,1.1-3.7c0-2.5-1-4.4-2.9-5.6
c-1.9-1.2-4.4-2.2-7.4-3c-3-0.8-6.2-1.5-9.6-2.1c-3.4-0.7-6.6-1.7-9.6-3.2c-3-1.5-5.4-3.5-7.4-6.2c-1.9-2.6-2.9-6.3-2.9-11
c0-4.1,0.8-7.6,2.5-10.6c1.7-3,3.9-5.4,6.7-7.4c2.8-1.9,5.9-3.3,9.5-4.3c3.6-0.9,7.2-1.4,10.9-1.4c4.9,0,9.8,0.8,14.6,2.5
c4.8,1.7,8.7,4.5,11.7,8.6L683.6,269.9z"/>
</g>
</g>
</g>
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 8 KiB

View file

@ -1 +0,0 @@
<svg xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg" xml:space="preserve" style="enable-background:new 0 0 75 75" viewBox="0 0 75 75" width="75" height="75" ><path d="M24.3 44.7c2 0 6-.1 11.6-2.4 6.5-2.7 19.3-7.5 28.6-12.5 6.5-3.5 9.3-8.1 9.3-14.3C73.8 7 66.9 0 58.3 0h-36C10 0 0 10 0 22.3s9.4 22.4 24.3 22.4z" style="fill-rule:evenodd;clip-rule:evenodd;fill:#39594d"/><path d="M30.4 60c0-6 3.6-11.5 9.2-13.8l11.3-4.7C62.4 36.8 75 45.2 75 57.6 75 67.2 67.2 75 57.6 75H45.3c-8.2 0-14.9-6.7-14.9-15z" style="fill-rule:evenodd;clip-rule:evenodd;fill:#d18ee2"/><path d="M12.9 47.6C5.8 47.6 0 53.4 0 60.5v1.7C0 69.2 5.8 75 12.9 75c7.1 0 12.9-5.8 12.9-12.9v-1.7c-.1-7-5.8-12.8-12.9-12.8z" style="fill:#ff7759"/></svg>

Before

Width:  |  Height:  |  Size: 742 B

View file

@ -1 +0,0 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>DBRX</title><path d="M21.821 9.894l-9.81 5.595L1.505 9.511 1 9.787v4.34l11.01 6.256 9.811-5.574v2.297l-9.81 5.596-10.506-5.979L1 17v.745L12.01 24 23 17.745v-4.34l-.505-.277-10.484 5.957-9.832-5.574v-2.298l9.832 5.574L23 10.532V6.255l-.547-.319-10.442 5.936-9.327-5.276 9.327-5.298 7.663 4.362.673-.383v-.532L12.011 0 1 6.255v.681l11.01 6.255 9.811-5.595z" fill="#EE3D2C" fill-rule="nonzero"></path></svg>

Before

Width:  |  Height:  |  Size: 528 B

View file

@ -1,25 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 25.4.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 292.6 215.3" style="enable-background:new 0 0 292.6 215.3;" xml:space="preserve">
<style type="text/css">
.st0{fill:#566AB2;}
</style>
<path class="st0" d="M191.3,123.7c-2.4,1-4.9,1.8-7.2,1.9c-3.6,0.2-7.6-1.3-9.7-3.1c-3.3-2.8-5.7-4.4-6.7-9.2
c-0.4-2.1-0.2-5.3,0.2-7.2c0.9-4-0.1-6.5-2.9-8.9c-2.3-1.9-5.2-2.4-8.4-2.4s-2.3-0.5-3.1-1c-1.3-0.7-2.4-2.3-1.4-4.4
c0.3-0.7,2-2.3,2.3-2.5c4.3-2.5,9.4-1.7,14,0.2c4.3,1.7,7.5,5,12.2,9.5c4.8,5.5,5.6,7,8.4,11.1c2.1,3.2,4.1,6.6,5.4,10.4
C195.2,120.5,194.2,122.4,191.3,123.7L191.3,123.7z M153.4,104.3c0-2.1,1.7-3.7,3.8-3.7s0.9,0.1,1.3,0.2c0.5,0.2,1,0.5,1.4,0.9
c0.7,0.7,1.1,1.6,1.1,2.6c0,2.1-1.7,3.8-3.8,3.8s-3.7-1.7-3.7-3.8H153.4z M141.2,182.8c-25.5-20-37.8-26.6-42.9-26.3
c-4.8,0.3-3.9,5.7-2.8,9.3c1.1,3.5,2.5,5.9,4.5,9c1.4,2,2.3,5.1-1.4,7.3c-8.2,5.1-22.5-1.7-23.1-2c-16.6-9.8-30.5-22.7-40.2-40.3
c-9.5-17-14.9-35.2-15.8-54.6c-0.2-4.7,1.1-6.4,5.8-7.2c6.2-1.1,12.5-1.4,18.7-0.5c26,3.8,48.1,15.4,66.7,33.8
c10.6,10.5,18.6,23,26.8,35.2c8.8,13,18.2,25.4,30.2,35.5c4.3,3.6,7.6,6.3,10.9,8.2c-9.8,1.1-26.1,1.3-37.2-7.5L141.2,182.8z
M289.5,18c-3.1-1.5-4.4,1.4-6.3,2.8c-0.6,0.5-1.1,1.1-1.7,1.7c-4.5,4.8-9.8,8-16.8,7.6c-10.1-0.6-18.7,2.6-26.4,10.4
c-1.6-9.5-7-15.2-15.2-18.9c-4.3-1.9-8.6-3.8-11.6-7.9c-2.1-2.9-2.7-6.2-3.7-9.4c-0.7-2-1.3-3.9-3.6-4.3c-2.4-0.4-3.4,1.7-4.3,3.4
c-3.8,7-5.3,14.6-5.2,22.4c0.3,17.5,7.7,31.5,22.4,41.4c1.7,1.1,2.1,2.3,1.6,3.9c-1,3.4-2.2,6.7-3.3,10.1c-0.7,2.2-1.7,2.7-4,1.7
c-8.1-3.4-15-8.4-21.2-14.4c-10.4-10.1-19.9-21.2-31.6-30c-2.8-2.1-5.5-4-8.4-5.7c-12-11.7,1.6-21.3,4.7-22.4
c3.3-1.2,1.2-5.3-9.5-5.2c-10.6,0-20.3,3.6-32.8,8.4c-1.8,0.7-3.7,1.2-5.7,1.7c-11.3-2.1-22.9-2.6-35.1-1.2
c-23,2.5-41.4,13.4-54.8,32C1,68.3-2.8,93.6,1.9,120c4.9,27.8,19.1,50.9,41,68.9c22.6,18.7,48.7,27.8,78.5,26.1
c18.1-1,38.2-3.5,60.9-22.7c5.7,2.8,11.7,4,21.7,4.8c7.7,0.7,15.1-0.4,20.8-1.5c9-1.9,8.4-10.2,5.1-11.7
c-26.3-12.3-20.5-7.3-25.7-11.3c13.3-15.8,33.5-32.2,41.3-85.4c0.6-4.2,0.1-6.9,0-10.3c0-2.1,0.4-2.9,2.8-3.1
c6.6-0.8,13-2.6,18.8-5.8c17-9.3,23.9-24.6,25.5-42.9c0.2-2.8,0-5.7-3-7.2L289.5,18z"/>
</svg>

Before

Width:  |  Height:  |  Size: 2.3 KiB

View file

@ -1 +0,0 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Fireworks</title><path clip-rule="evenodd" d="M14.8 5l-2.801 6.795L9.195 5H7.397l3.072 7.428a1.64 1.64 0 003.038.002L16.598 5H14.8zm1.196 10.352l5.124-5.244-.699-1.669-5.596 5.739a1.664 1.664 0 00-.343 1.807 1.642 1.642 0 001.516 1.012L16 17l8-.02-.699-1.669-7.303.041h-.002zM2.88 10.104l.699-1.669 5.596 5.739c.468.479.603 1.189.343 1.807a1.643 1.643 0 01-1.516 1.012l-8-.018-.002.002.699-1.669 7.303.042-5.122-5.246z" fill="#5019C5" fill-rule="evenodd"></path></svg>

Before

Width:  |  Height:  |  Size: 592 B

View file

@ -1,2 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<svg viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg" fill="none"><path fill="#4285F4" d="M14.9 8.161c0-.476-.039-.954-.121-1.422h-6.64v2.695h3.802a3.24 3.24 0 01-1.407 2.127v1.75h2.269c1.332-1.22 2.097-3.02 2.097-5.15z"/><path fill="#34A853" d="M8.14 15c1.898 0 3.499-.62 4.665-1.69l-2.268-1.749c-.631.427-1.446.669-2.395.669-1.836 0-3.393-1.232-3.952-2.888H1.85v1.803A7.044 7.044 0 008.14 15z"/><path fill="#FBBC04" d="M4.187 9.342a4.17 4.17 0 010-2.68V4.859H1.849a6.97 6.97 0 000 6.286l2.338-1.803z"/><path fill="#EA4335" d="M8.14 3.77a3.837 3.837 0 012.7 1.05l2.01-1.999a6.786 6.786 0 00-4.71-1.82 7.042 7.042 0 00-6.29 3.858L4.186 6.66c.556-1.658 2.116-2.89 3.952-2.89z"/></svg>

Before

Width:  |  Height:  |  Size: 728 B

View file

@ -1,3 +0,0 @@
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 26.3 26.3"><defs><style>.cls-1{fill:#f05237;}.cls-2{fill:#fff;}</style></defs><g id="Layer_2" data-name="Layer 2"><g id="Content"><circle class="cls-1" cx="13.15" cy="13.15" r="13.15"/><path class="cls-2" d="M13.17,6.88a4.43,4.43,0,0,0,0,8.85h1.45V14.07H13.17a2.77,2.77,0,1,1,2.77-2.76v4.07a2.74,2.74,0,0,1-4.67,2L10.1,18.51a4.37,4.37,0,0,0,3.07,1.29h.06a4.42,4.42,0,0,0,4.36-4.4V11.2a4.43,4.43,0,0,0-4.42-4.32"/></g></g></svg>

Before

Width:  |  Height:  |  Size: 619 B

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 7.2 KiB

View file

@ -1 +0,0 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Mistral</title><path d="M3.428 3.4h3.429v3.428H3.428V3.4zm13.714 0h3.43v3.428h-3.43V3.4z" fill="gold"></path><path d="M3.428 6.828h6.857v3.429H3.429V6.828zm10.286 0h6.857v3.429h-6.857V6.828z" fill="#FFAF00"></path><path d="M3.428 10.258h17.144v3.428H3.428v-3.428z" fill="#FF8205"></path><path d="M3.428 13.686h3.429v3.428H3.428v-3.428zm6.858 0h3.429v3.428h-3.429v-3.428zm6.856 0h3.43v3.428h-3.43v-3.428z" fill="#FA500F"></path><path d="M0 17.114h10.286v3.429H0v-3.429zm13.714 0H24v3.429H13.714v-3.429z" fill="#E10500"></path></svg>

Before

Width:  |  Height:  |  Size: 655 B

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 8.4 KiB

View file

@ -1,5 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<svg fill="#000000" viewBox="-2 -2 28 28" role="img" xmlns="http://www.w3.org/2000/svg">
<circle cx="12" cy="12" r="14" fill="white" />
<path d="M22.2819 9.8211a5.9847 5.9847 0 0 0-.5157-4.9108 6.0462 6.0462 0 0 0-6.5098-2.9A6.0651 6.0651 0 0 0 4.9807 4.1818a5.9847 5.9847 0 0 0-3.9977 2.9 6.0462 6.0462 0 0 0 .7427 7.0966 5.98 5.98 0 0 0 .511 4.9107 6.051 6.051 0 0 0 6.5146 2.9001A5.9847 5.9847 0 0 0 13.2599 24a6.0557 6.0557 0 0 0 5.7718-4.2058 5.9894 5.9894 0 0 0 3.9977-2.9001 6.0557 6.0557 0 0 0-.7475-7.0729zm-9.022 12.6081a4.4755 4.4755 0 0 1-2.8764-1.0408l.1419-.0804 4.7783-2.7582a.7948.7948 0 0 0 .3927-.6813v-6.7369l2.02 1.1686a.071.071 0 0 1 .038.052v5.5826a4.504 4.504 0 0 1-4.4945 4.4944zm-9.6607-4.1254a4.4708 4.4708 0 0 1-.5346-3.0137l.142.0852 4.783 2.7582a.7712.7712 0 0 0 .7806 0l5.8428-3.3685v2.3324a.0804.0804 0 0 1-.0332.0615L9.74 19.9502a4.4992 4.4992 0 0 1-6.1408-1.6464zM2.3408 7.8956a4.485 4.485 0 0 1 2.3655-1.9728V11.6a.7664.7664 0 0 0 .3879.6765l5.8144 3.3543-2.0201 1.1685a.0757.0757 0 0 1-.071 0l-4.8303-2.7865A4.504 4.504 0 0 1 2.3408 7.872zm16.5963 3.8558L13.1038 8.364 15.1192 7.2a.0757.0757 0 0 1 .071 0l4.8303 2.7913a4.4944 4.4944 0 0 1-.6765 8.1042v-5.6772a.79.79 0 0 0-.407-.667zm2.0107-3.0231l-.142-.0852-4.7735-2.7818a.7759.7759 0 0 0-.7854 0L9.409 9.2297V6.8974a.0662.0662 0 0 1 .0284-.0615l4.8303-2.7866a4.4992 4.4992 0 0 1 6.6802 4.66zM8.3065 12.863l-2.02-1.1638a.0804.0804 0 0 1-.038-.0567V6.0742a4.4992 4.4992 0 0 1 7.3757-3.4537l-.142.0805L8.704 5.459a.7948.7948 0 0 0-.3927.6813zm1.0976-2.3654l2.602-1.4998 2.6069 1.4998v2.9994l-2.5974 1.4997-2.6067-1.4997Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 1.6 KiB

View file

@ -1,39 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg id="Layer_1" xmlns="http://www.w3.org/2000/svg" version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 300 300">
<!-- Generator: Adobe Illustrator 29.2.1, SVG Export Plug-In . SVG Version: 2.1.0 Build 116) -->
<defs>
<style>
.st0 {
fill: none;
}
.st1 {
stroke-width: 52.7px;
}
.st1, .st2 {
stroke: #000;
stroke-miterlimit: 2.3;
}
.st2 {
stroke-width: .6px;
}
.st3 {
clip-path: url(#clippath);
}
</style>
<clipPath id="clippath">
<rect class="st0" width="300" height="300"/>
</clipPath>
</defs>
<g class="st3">
<g>
<path class="st1" d="M1.8,145.9c8.8,0,42.8-7.6,60.4-17.5s17.6-10,53.9-35.7c46-32.6,78.5-21.7,131.8-21.7"/>
<path class="st2" d="M299.4,71.2l-90.1,52V19.2l90.1,52Z"/>
<path class="st1" d="M0,145.9c8.8,0,42.8,7.6,60.4,17.5s17.6,10,53.9,35.7c46,32.6,78.5,21.7,131.8,21.7"/>
<path class="st2" d="M297.7,220.6l-90.1-52v104l90.1-52Z"/>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 1.1 KiB

View file

@ -1,16 +0,0 @@
<?xml version="1.0" encoding="iso-8859-1"?>
<!-- Generator: Adobe Illustrator 26.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 48 48" style="enable-background:new 0 0 48 48;" xml:space="preserve">
<linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="10.5862" y1="1.61" x2="36.0543" y2="44.1206">
<stop offset="0.002" style="stop-color:#9C55D4"/>
<stop offset="0.003" style="stop-color:#20808D"/>
<stop offset="0.3731" style="stop-color:#218F9B"/>
<stop offset="1" style="stop-color:#22B1BC"/>
</linearGradient>
<path style="fill-rule:evenodd;clip-rule:evenodd;fill:url(#SVGID_1_);" d="M11.469,4l11.39,10.494v-0.002V4.024h2.217v10.517
L36.518,4v11.965h4.697v17.258h-4.683v10.654L25.077,33.813v10.18h-2.217V33.979L11.482,44V33.224H6.785V15.965h4.685V4z
M21.188,18.155H9.002v12.878h2.477v-4.062L21.188,18.155z M13.699,27.943v11.17l9.16-8.068V19.623L13.699,27.943z M25.141,30.938
V19.612l9.163,8.321v5.291h0.012v5.775L25.141,30.938z M36.532,31.033h2.466V18.155H26.903l9.629,8.725V31.033z M34.301,15.965
V9.038l-7.519,6.927H34.301z M21.205,15.965h-7.519V9.038L21.205,15.965z"/>
</svg>

Before

Width:  |  Height:  |  Size: 1.2 KiB

View file

@ -1 +0,0 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>SambaNova</title><path d="M23 23h-1.223V8.028c0-3.118-2.568-5.806-5.744-5.806H8.027c-3.176 0-5.744 2.565-5.744 5.686 0 3.119 2.568 5.684 5.744 5.684h.794c1.346 0 2.445 1.1 2.445 2.444 0 1.346-1.1 2.446-2.445 2.446H1v-1.223h7.761c.671 0 1.223-.551 1.223-1.16 0-.67-.552-1.16-1.223-1.16h-.794C4.177 14.872 1 11.756 1 7.909 1 4.058 4.176 1 8.027 1h8.066C19.88 1 23 4.239 23 8.028V23z" fill="#EE7624"></path><path d="M8.884 12.672c1.71.06 3.361 1.588 3.361 3.422 0 1.833-1.528 3.421-3.421 3.421H1v1.223h7.761c2.568 0 4.705-2.077 4.705-4.644 0-.672-.123-1.283-.43-1.894-.245-.551-.67-1.1-1.099-1.528-.489-.429-1.039-.734-1.65-.977-.525-.175-1.048-.193-1.594-.212-.218-.008-.441-.016-.669-.034-.428 0-1.406-.245-1.956-.61a3.369 3.369 0 01-1.223-1.406c-.183-.489-.305-.977-.305-1.528A3.417 3.417 0 017.96 4.482h8.066c1.895 0 3.422 1.65 3.422 3.483v15.032h1.223V8.027c0-2.568-2.077-4.768-4.645-4.768h-8c-2.568 0-4.705 2.077-4.705 4.646 0 .67.123 1.282.43 1.894a4.45 4.45 0 001.099 1.528c.429.428 1.039.734 1.588.976.306.123.611.183.976.246.857.06 1.406.123 1.466.123h.003z" fill="#EE7624"></path><path d="M1 23h7.761v-.003c3.85 0 7.03-3.116 7.09-7.026 0-3.79-3.117-6.906-6.967-6.906H8.09c-.672 0-1.222-.552-1.222-1.16 0-.608.487-1.16 1.159-1.16h8.069c.608 0 1.159.611 1.159 1.283v14.97h1.223V8.024c0-1.345-1.1-2.505-2.445-2.505H7.967a2.451 2.451 0 00-2.445 2.445 2.45 2.45 0 002.445 2.445h.794c3.176 0 5.744 2.568 5.744 5.684s-2.568 5.684-5.744 5.684H1V23z" fill="#EE7624"></path></svg>

Before

Width:  |  Height:  |  Size: 1.6 KiB

View file

@ -1,14 +0,0 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_542_18748)">
<rect width="32" height="32" rx="5.64706" fill="#F1EFED"/>
<circle cx="22.8233" cy="9.64706" r="5.64706" fill="#D3D1D1"/>
<circle cx="22.8233" cy="22.8238" r="5.64706" fill="#D3D1D1"/>
<circle cx="9.64706" cy="22.8238" r="5.64706" fill="#D3D1D1"/>
<circle cx="9.64706" cy="9.64706" r="5.64706" fill="#0F6FFF"/>
</g>
<defs>
<clipPath id="clip0_542_18748">
<rect width="32" height="32" fill="white"/>
</clipPath>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 560 B

View file

@ -1,28 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1000 1000">
<defs>
<style>
.cls-1 {
fill: #000;
}
polygon {
fill: #fff;
}
@media ( prefers-color-scheme: dark ) {
.cls-1 {
fill: #fff;
}
polygon {
fill: #000;
}
}
</style>
</defs>
<rect class="cls-1" width="1000" height="1000"/>
<g>
<polygon points="226.83 411.15 501.31 803.15 623.31 803.15 348.82 411.15 226.83 411.15" />
<polygon points="348.72 628.87 226.69 803.15 348.77 803.15 409.76 716.05 348.72 628.87" />
<polygon points="651.23 196.85 440.28 498.12 501.32 585.29 773.31 196.85 651.23 196.85" />
<polygon points="673.31 383.25 673.31 803.15 773.31 803.15 773.31 240.44 673.31 383.25" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 937 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

View file

@ -1 +0,0 @@
<!DOCTYPE html><html id="__next_error__"><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width, initial-scale=1"/><link rel="preload" as="script" fetchPriority="low" href="/ui/_next/static/chunks/webpack-75a5453f51d60261.js"/><script src="/ui/_next/static/chunks/fd9d1056-205af899b895cbac.js" async=""></script><script src="/ui/_next/static/chunks/117-1c5bfc45bfc4237d.js" async=""></script><script src="/ui/_next/static/chunks/main-app-2b16cdb7ff4e1af7.js" async=""></script><title>LiteLLM Dashboard</title><meta name="description" content="LiteLLM Proxy Admin UI"/><link rel="icon" href="/ui/favicon.ico" type="image/x-icon" sizes="16x16"/><meta name="next-size-adjust"/><script src="/ui/_next/static/chunks/polyfills-42372ed130431b0a.js" noModule=""></script></head><body><script src="/ui/_next/static/chunks/webpack-75a5453f51d60261.js" async=""></script><script>(self.__next_f=self.__next_f||[]).push([0]);self.__next_f.push([2,null])</script><script>self.__next_f.push([1,"1:HL[\"/ui/_next/static/media/a34f9d1faa5f3315-s.p.woff2\",\"font\",{\"crossOrigin\":\"\",\"type\":\"font/woff2\"}]\n2:HL[\"/ui/_next/static/css/86f6cc749f6b8493.css\",\"style\"]\n3:HL[\"/ui/_next/static/css/005c96178151b9fd.css\",\"style\"]\n"])</script><script>self.__next_f.push([1,"4:I[12846,[],\"\"]\n6:I[19107,[],\"ClientPageRoot\"]\n7:I[76737,[\"665\",\"static/chunks/3014691f-b7b79b78e27792f3.js\",\"990\",\"static/chunks/13b76428-ebdf3012af0e4489.js\",\"42\",\"static/chunks/42-69f5b4e6a9942a9f.js\",\"261\",\"static/chunks/261-ee7f0f1f1c8c22a0.js\",\"899\",\"static/chunks/899-57685cedd1dcbc78.js\",\"466\",\"static/chunks/466-65538e7f331af98e.js\",\"250\",\"static/chunks/250-7d480872c0e251dc.js\",\"699\",\"static/chunks/699-2176ba2273e4676d.js\",\"931\",\"static/chunks/app/page-36914b80c40b5032.js\"],\"default\",1]\n8:I[4707,[],\"\"]\n9:I[36423,[],\"\"]\nb:I[61060,[],\"\"]\nc:[]\n"])</script><script>self.__next_f.push([1,"0:[\"$\",\"$L4\",null,{\"buildId\":\"fzhvjOFL6KeNsWYrLD4ya\",\"assetPrefix\":\"/ui\",\"urlParts\":[\"\",\"\"],\"initialTree\":[\"\",{\"children\":[\"__PAGE__\",{}]},\"$undefined\",\"$undefined\",true],\"initialSeedData\":[\"\",{\"children\":[\"__PAGE__\",{},[[\"$L5\",[\"$\",\"$L6\",null,{\"props\":{\"params\":{},\"searchParams\":{}},\"Component\":\"$7\"}],null],null],null]},[[[[\"$\",\"link\",\"0\",{\"rel\":\"stylesheet\",\"href\":\"/ui/_next/static/css/86f6cc749f6b8493.css\",\"precedence\":\"next\",\"crossOrigin\":\"$undefined\"}],[\"$\",\"link\",\"1\",{\"rel\":\"stylesheet\",\"href\":\"/ui/_next/static/css/005c96178151b9fd.css\",\"precedence\":\"next\",\"crossOrigin\":\"$undefined\"}]],[\"$\",\"html\",null,{\"lang\":\"en\",\"children\":[\"$\",\"body\",null,{\"className\":\"__className_cf7686\",\"children\":[\"$\",\"$L8\",null,{\"parallelRouterKey\":\"children\",\"segmentPath\":[\"children\"],\"error\":\"$undefined\",\"errorStyles\":\"$undefined\",\"errorScripts\":\"$undefined\",\"template\":[\"$\",\"$L9\",null,{}],\"templateStyles\":\"$undefined\",\"templateScripts\":\"$undefined\",\"notFound\":[[\"$\",\"title\",null,{\"children\":\"404: This page could not be found.\"}],[\"$\",\"div\",null,{\"style\":{\"fontFamily\":\"system-ui,\\\"Segoe UI\\\",Roboto,Helvetica,Arial,sans-serif,\\\"Apple Color Emoji\\\",\\\"Segoe UI Emoji\\\"\",\"height\":\"100vh\",\"textAlign\":\"center\",\"display\":\"flex\",\"flexDirection\":\"column\",\"alignItems\":\"center\",\"justifyContent\":\"center\"},\"children\":[\"$\",\"div\",null,{\"children\":[[\"$\",\"style\",null,{\"dangerouslySetInnerHTML\":{\"__html\":\"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}\"}}],[\"$\",\"h1\",null,{\"className\":\"next-error-h1\",\"style\":{\"display\":\"inline-block\",\"margin\":\"0 20px 0 0\",\"padding\":\"0 23px 0 0\",\"fontSize\":24,\"fontWeight\":500,\"verticalAlign\":\"top\",\"lineHeight\":\"49px\"},\"children\":\"404\"}],[\"$\",\"div\",null,{\"style\":{\"display\":\"inline-block\"},\"children\":[\"$\",\"h2\",null,{\"style\":{\"fontSize\":14,\"fontWeight\":400,\"lineHeight\":\"49px\",\"margin\":0},\"children\":\"This page could not be found.\"}]}]]}]}]],\"notFoundStyles\":[]}]}]}]],null],null],\"couldBeIntercepted\":false,\"initialHead\":[null,\"$La\"],\"globalErrorComponent\":\"$b\",\"missingSlots\":\"$Wc\"}]\n"])</script><script>self.__next_f.push([1,"a:[[\"$\",\"meta\",\"0\",{\"name\":\"viewport\",\"content\":\"width=device-width, initial-scale=1\"}],[\"$\",\"meta\",\"1\",{\"charSet\":\"utf-8\"}],[\"$\",\"title\",\"2\",{\"children\":\"LiteLLM Dashboard\"}],[\"$\",\"meta\",\"3\",{\"name\":\"description\",\"content\":\"LiteLLM Proxy Admin UI\"}],[\"$\",\"link\",\"4\",{\"rel\":\"icon\",\"href\":\"/ui/favicon.ico\",\"type\":\"image/x-icon\",\"sizes\":\"16x16\"}],[\"$\",\"meta\",\"5\",{\"name\":\"next-size-adjust\"}]]\n5:null\n"])</script></body></html>

View file

@ -1,7 +0,0 @@
2:I[19107,[],"ClientPageRoot"]
3:I[76737,["665","static/chunks/3014691f-b7b79b78e27792f3.js","990","static/chunks/13b76428-ebdf3012af0e4489.js","42","static/chunks/42-69f5b4e6a9942a9f.js","261","static/chunks/261-ee7f0f1f1c8c22a0.js","899","static/chunks/899-57685cedd1dcbc78.js","466","static/chunks/466-65538e7f331af98e.js","250","static/chunks/250-7d480872c0e251dc.js","699","static/chunks/699-2176ba2273e4676d.js","931","static/chunks/app/page-36914b80c40b5032.js"],"default",1]
4:I[4707,[],""]
5:I[36423,[],""]
0:["fzhvjOFL6KeNsWYrLD4ya",[[["",{"children":["__PAGE__",{}]},"$undefined","$undefined",true],["",{"children":["__PAGE__",{},[["$L1",["$","$L2",null,{"props":{"params":{},"searchParams":{}},"Component":"$3"}],null],null],null]},[[[["$","link","0",{"rel":"stylesheet","href":"/ui/_next/static/css/86f6cc749f6b8493.css","precedence":"next","crossOrigin":"$undefined"}],["$","link","1",{"rel":"stylesheet","href":"/ui/_next/static/css/005c96178151b9fd.css","precedence":"next","crossOrigin":"$undefined"}]],["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_cf7686","children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[]}]}]}]],null],null],["$L6",null]]]]
6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"LiteLLM Dashboard"}],["$","meta","3",{"name":"description","content":"LiteLLM Proxy Admin UI"}],["$","link","4",{"rel":"icon","href":"/ui/favicon.ico","type":"image/x-icon","sizes":"16x16"}],["$","meta","5",{"name":"next-size-adjust"}]]
1:null

View file

@ -1,7 +0,0 @@
2:I[19107,[],"ClientPageRoot"]
3:I[52829,["42","static/chunks/42-69f5b4e6a9942a9f.js","261","static/chunks/261-ee7f0f1f1c8c22a0.js","250","static/chunks/250-7d480872c0e251dc.js","699","static/chunks/699-2176ba2273e4676d.js","418","static/chunks/app/model_hub/page-a965e43ba9638156.js"],"default",1]
4:I[4707,[],""]
5:I[36423,[],""]
0:["fzhvjOFL6KeNsWYrLD4ya",[[["",{"children":["model_hub",{"children":["__PAGE__",{}]}]},"$undefined","$undefined",true],["",{"children":["model_hub",{"children":["__PAGE__",{},[["$L1",["$","$L2",null,{"props":{"params":{},"searchParams":{}},"Component":"$3"}],null],null],null]},[null,["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children","model_hub","children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":"$undefined","notFoundStyles":"$undefined"}]],null]},[[[["$","link","0",{"rel":"stylesheet","href":"/ui/_next/static/css/86f6cc749f6b8493.css","precedence":"next","crossOrigin":"$undefined"}],["$","link","1",{"rel":"stylesheet","href":"/ui/_next/static/css/005c96178151b9fd.css","precedence":"next","crossOrigin":"$undefined"}]],["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_cf7686","children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[]}]}]}]],null],null],["$L6",null]]]]
6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"LiteLLM Dashboard"}],["$","meta","3",{"name":"description","content":"LiteLLM Proxy Admin UI"}],["$","link","4",{"rel":"icon","href":"/ui/favicon.ico","type":"image/x-icon","sizes":"16x16"}],["$","meta","5",{"name":"next-size-adjust"}]]
1:null

View file

@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 394 80"><path fill="#000" d="M262 0h68.5v12.7h-27.2v66.6h-13.6V12.7H262V0ZM149 0v12.7H94v20.4h44.3v12.6H94v21h55v12.6H80.5V0h68.7zm34.3 0h-17.8l63.8 79.4h17.9l-32-39.7 32-39.6h-17.9l-23 28.6-23-28.6zm18.3 56.7-9-11-27.1 33.7h17.8l18.3-22.7z"/><path fill="#000" d="M81 79.3 17 0H0v79.3h13.6V17l50.2 62.3H81Zm252.6-.4c-1 0-1.8-.4-2.5-1s-1.1-1.6-1.1-2.6.3-1.8 1-2.5 1.6-1 2.6-1 1.8.3 2.5 1a3.4 3.4 0 0 1 .6 4.3 3.7 3.7 0 0 1-3 1.8zm23.2-33.5h6v23.3c0 2.1-.4 4-1.3 5.5a9.1 9.1 0 0 1-3.8 3.5c-1.6.8-3.5 1.3-5.7 1.3-2 0-3.7-.4-5.3-1s-2.8-1.8-3.7-3.2c-.9-1.3-1.4-3-1.4-5h6c.1.8.3 1.6.7 2.2s1 1.2 1.6 1.5c.7.4 1.5.5 2.4.5 1 0 1.8-.2 2.4-.6a4 4 0 0 0 1.6-1.8c.3-.8.5-1.8.5-3V45.5zm30.9 9.1a4.4 4.4 0 0 0-2-3.3 7.5 7.5 0 0 0-4.3-1.1c-1.3 0-2.4.2-3.3.5-.9.4-1.6 1-2 1.6a3.5 3.5 0 0 0-.3 4c.3.5.7.9 1.3 1.2l1.8 1 2 .5 3.2.8c1.3.3 2.5.7 3.7 1.2a13 13 0 0 1 3.2 1.8 8.1 8.1 0 0 1 3 6.5c0 2-.5 3.7-1.5 5.1a10 10 0 0 1-4.4 3.5c-1.8.8-4.1 1.2-6.8 1.2-2.6 0-4.9-.4-6.8-1.2-2-.8-3.4-2-4.5-3.5a10 10 0 0 1-1.7-5.6h6a5 5 0 0 0 3.5 4.6c1 .4 2.2.6 3.4.6 1.3 0 2.5-.2 3.5-.6 1-.4 1.8-1 2.4-1.7a4 4 0 0 0 .8-2.4c0-.9-.2-1.6-.7-2.2a11 11 0 0 0-2.1-1.4l-3.2-1-3.8-1c-2.8-.7-5-1.7-6.6-3.2a7.2 7.2 0 0 1-2.4-5.7 8 8 0 0 1 1.7-5 10 10 0 0 1 4.3-3.5c2-.8 4-1.2 6.4-1.2 2.3 0 4.4.4 6.2 1.2 1.8.8 3.2 2 4.3 3.4 1 1.4 1.5 3 1.5 5h-5.8z"/></svg>

Before

Width:  |  Height:  |  Size: 1.3 KiB

File diff suppressed because one or more lines are too long

View file

@ -1,7 +0,0 @@
2:I[19107,[],"ClientPageRoot"]
3:I[12011,["665","static/chunks/3014691f-b7b79b78e27792f3.js","42","static/chunks/42-69f5b4e6a9942a9f.js","899","static/chunks/899-57685cedd1dcbc78.js","250","static/chunks/250-7d480872c0e251dc.js","461","static/chunks/app/onboarding/page-9598003bc1e91371.js"],"default",1]
4:I[4707,[],""]
5:I[36423,[],""]
0:["fzhvjOFL6KeNsWYrLD4ya",[[["",{"children":["onboarding",{"children":["__PAGE__",{}]}]},"$undefined","$undefined",true],["",{"children":["onboarding",{"children":["__PAGE__",{},[["$L1",["$","$L2",null,{"props":{"params":{},"searchParams":{}},"Component":"$3"}],null],null],null]},[null,["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children","onboarding","children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":"$undefined","notFoundStyles":"$undefined"}]],null]},[[[["$","link","0",{"rel":"stylesheet","href":"/ui/_next/static/css/86f6cc749f6b8493.css","precedence":"next","crossOrigin":"$undefined"}],["$","link","1",{"rel":"stylesheet","href":"/ui/_next/static/css/005c96178151b9fd.css","precedence":"next","crossOrigin":"$undefined"}]],["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_cf7686","children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[]}]}]}]],null],null],["$L6",null]]]]
6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"LiteLLM Dashboard"}],["$","meta","3",{"name":"description","content":"LiteLLM Proxy Admin UI"}],["$","link","4",{"rel":"icon","href":"/ui/favicon.ico","type":"image/x-icon","sizes":"16x16"}],["$","meta","5",{"name":"next-size-adjust"}]]
1:null

View file

@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 283 64"><path fill="black" d="M141 16c-11 0-19 7-19 18s9 18 20 18c7 0 13-3 16-7l-7-5c-2 3-6 4-9 4-5 0-9-3-10-7h28v-3c0-11-8-18-19-18zm-9 15c1-4 4-7 9-7s8 3 9 7h-18zm117-15c-11 0-19 7-19 18s9 18 20 18c6 0 12-3 16-7l-8-5c-2 3-5 4-8 4-5 0-9-3-11-7h28l1-3c0-11-8-18-19-18zm-10 15c2-4 5-7 10-7s8 3 9 7h-19zm-39 3c0 6 4 10 10 10 4 0 7-2 9-5l8 5c-3 5-9 8-17 8-11 0-19-7-19-18s8-18 19-18c8 0 14 3 17 8l-8 5c-2-3-5-5-9-5-6 0-10 4-10 10zm83-29v46h-9V5h9zM37 0l37 64H0L37 0zm92 5-27 48L74 5h10l18 30 17-30h10zm59 12v10l-3-1c-6 0-10 4-10 10v15h-9V17h9v9c0-5 6-9 13-9z"/></svg>

Before

Width:  |  Height:  |  Size: 629 B

View file

@ -1,4 +0,0 @@
def my_custom_rule(input): # receives the model response
# if len(input) < 5: # trigger fallback if the model response is too short
return False
return True

View file

@ -1,40 +0,0 @@
### DEPRECATED ###
## unused file. initially written for json logging on proxy.
import json
import logging
import os
from logging import Formatter
from litellm import json_logs
# Set default log level to INFO
log_level = os.getenv("LITELLM_LOG", "INFO")
numeric_level: str = getattr(logging, log_level.upper())
class JsonFormatter(Formatter):
def __init__(self):
super(JsonFormatter, self).__init__()
def format(self, record):
json_record = {
"message": record.getMessage(),
"level": record.levelname,
"timestamp": self.formatTime(record, self.datefmt),
}
return json.dumps(json_record)
logger = logging.root
handler = logging.StreamHandler()
if json_logs:
handler.setFormatter(JsonFormatter())
else:
formatter = logging.Formatter(
"\033[92m%(asctime)s - %(name)s:%(levelname)s\033[0m: %(filename)s:%(lineno)s - %(message)s",
datefmt="%H:%M:%S",
)
handler.setFormatter(formatter)
logger.handlers = [handler]
logger.setLevel(numeric_level)

View file

@ -1,14 +0,0 @@
model_list:
- model_name: bedrock-claude
litellm_params:
model: bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0
aws_region_name: us-east-1
aws_access_key_id: os.environ/AWS_ACCESS_KEY_ID
aws_secret_access_key: os.environ/AWS_SECRET_ACCESS_KEY
litellm_settings:
callbacks: ["datadog"] # logs llm success + failure logs on datadog
service_callback: ["datadog"] # logs redis, postgres failures on datadog
general_settings:
store_prompts_in_spend_logs: true

View file

@ -1,44 +0,0 @@
model_list:
- model_name: "gpt-4o-azure"
litellm_params:
model: azure/gpt-4o
api_key: os.environ/AZURE_API_KEY
api_base: os.environ/AZURE_API_BASE
- model_name: "gpt-4o-mini-openai"
litellm_params:
model: gpt-4o-mini
api_key: os.environ/OPENAI_API_KEY
- model_name: "bedrock-nova"
litellm_params:
model: us.amazon.nova-pro-v1:0
- model_name: openrouter_model
litellm_params:
model: openrouter/openrouter_model
api_key: os.environ/OPENROUTER_API_KEY
api_base: http://0.0.0.0:8090
- model_name: "claude-3-7-sonnet"
litellm_params:
model: databricks/databricks-claude-3-7-sonnet
api_key: os.environ/DATABRICKS_API_KEY
api_base: os.environ/DATABRICKS_API_BASE
- model_name: "gpt-4.1"
litellm_params:
model: azure/gpt-4.1
api_key: os.environ/AZURE_API_KEY_REALTIME
api_base: https://krris-m2f9a9i7-eastus2.openai.azure.com/
- model_name: "xai/*"
litellm_params:
model: xai/*
api_key: os.environ/XAI_API_KEY
litellm_settings:
num_retries: 0
callbacks: ["datadog_llm_observability"]
check_provider_endpoint: true
files_settings:
- custom_llm_provider: gemini
api_key: os.environ/GEMINI_API_KEY
general_settings:
store_prompts_in_spend_logs: true

View file

@ -1,110 +0,0 @@
model_list:
- model_name: claude-3-5-sonnet
litellm_params:
model: claude-3-haiku-20240307
# - model_name: gemini-1.5-flash-gemini
# litellm_params:
# model: vertex_ai_beta/gemini-1.5-flash
# api_base: https://gateway.ai.cloudflare.com/v1/fa4cdcab1f32b95ca3b53fd36043d691/test/google-vertex-ai/v1/projects/adroit-crow-413218/locations/us-central1/publishers/google/models/gemini-1.5-flash
- litellm_params:
api_base: http://0.0.0.0:8080
api_key: ''
model: gpt-4o
rpm: 800
input_cost_per_token: 300
model_name: gpt-4o
- model_name: llama3-70b-8192
litellm_params:
model: groq/llama3-70b-8192
- model_name: fake-openai-endpoint
litellm_params:
model: predibase/llama-3-8b-instruct
api_key: os.environ/PREDIBASE_API_KEY
tenant_id: os.environ/PREDIBASE_TENANT_ID
max_new_tokens: 256
# - litellm_params:
# api_base: https://my-endpoint-europe-berri-992.openai.azure.com/
# api_key: os.environ/AZURE_EUROPE_API_KEY
# model: azure/gpt-35-turbo
# rpm: 10
# model_name: gpt-3.5-turbo-fake-model
- litellm_params:
api_base: https://openai-gpt-4-test-v-1.openai.azure.com
api_key: os.environ/AZURE_API_KEY
api_version: 2024-02-15-preview
model: azure/chatgpt-v-2
tpm: 100
model_name: gpt-3.5-turbo
- litellm_params:
model: anthropic.claude-3-sonnet-20240229-v1:0
model_name: bedrock-anthropic-claude-3
- litellm_params:
model: claude-3-haiku-20240307
model_name: anthropic-claude-3
- litellm_params:
api_base: https://openai-gpt-4-test-v-1.openai.azure.com/
api_key: os.environ/AZURE_API_KEY
api_version: 2024-02-15-preview
model: azure/chatgpt-v-2
drop_params: True
tpm: 100
model_name: gpt-3.5-turbo
- model_name: tts
litellm_params:
model: openai/tts-1
- model_name: gpt-4-turbo-preview
litellm_params:
api_base: https://openai-france-1234.openai.azure.com
api_key: os.environ/AZURE_FRANCE_API_KEY
api_version: 2024-02-15-preview
model: azure/gpt-turbo
- model_name: text-embedding
litellm_params:
model: textembedding-gecko-multilingual@001
vertex_project: my-project-9d5c
vertex_location: us-central1
- model_name: lbl/command-r-plus
litellm_params:
model: openai/lbl/command-r-plus
api_key: "os.environ/VLLM_API_KEY"
api_base: http://vllm-command:8000/v1
rpm: 1000
input_cost_per_token: 0
output_cost_per_token: 0
model_info:
max_input_tokens: 80920
# litellm_settings:
# callbacks: ["dynamic_rate_limiter"]
# # success_callback: ["langfuse"]
# # failure_callback: ["langfuse"]
# # default_team_settings:
# # - team_id: proj1
# # success_callback: ["langfuse"]
# # langfuse_public_key: pk-lf-a65841e9-5192-4397-a679-cfff029fd5b0
# # langfuse_secret: sk-lf-d58c2891-3717-4f98-89dd-df44826215fd
# # langfuse_host: https://us.cloud.langfuse.com
# # - team_id: proj2
# # success_callback: ["langfuse"]
# # langfuse_public_key: pk-lf-3d789fd1-f49f-4e73-a7d9-1b4e11acbf9a
# # langfuse_secret: sk-lf-11b13aca-b0d4-4cde-9d54-721479dace6d
# # langfuse_host: https://us.cloud.langfuse.com
assistant_settings:
custom_llm_provider: openai
litellm_params:
api_key: os.environ/OPENAI_API_KEY
router_settings:
enable_pre_call_checks: true
litellm_settings:
callbacks: ["s3"]
# general_settings:
# # alerting: ["slack"]
# enable_jwt_auth: True
# litellm_jwtauth:
# team_id_jwt_field: "client_id"

File diff suppressed because it is too large Load diff

View file

@ -1,106 +0,0 @@
#### Analytics Endpoints #####
from datetime import datetime
from typing import List, Optional
import fastapi
from fastapi import APIRouter, Depends, HTTPException, status
from litellm.proxy._types import *
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
router = APIRouter()
@router.get(
"/global/activity/cache_hits",
tags=["Budget & Spend Tracking"],
dependencies=[Depends(user_api_key_auth)],
responses={
200: {"model": List[LiteLLM_SpendLogs]},
},
include_in_schema=False,
)
async def get_global_activity(
start_date: Optional[str] = fastapi.Query(
default=None,
description="Time from which to start viewing spend",
),
end_date: Optional[str] = fastapi.Query(
default=None,
description="Time till which to view spend",
),
):
"""
Get number of cache hits, vs misses
{
"daily_data": [
const chartdata = [
{
date: 'Jan 22',
cache_hits: 10,
llm_api_calls: 2000
},
{
date: 'Jan 23',
cache_hits: 10,
llm_api_calls: 12
},
],
"sum_cache_hits": 20,
"sum_llm_api_calls": 2012
}
"""
if start_date is None or end_date is None:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail={"error": "Please provide start_date and end_date"},
)
start_date_obj = datetime.strptime(start_date, "%Y-%m-%d")
end_date_obj = datetime.strptime(end_date, "%Y-%m-%d")
from litellm.proxy.proxy_server import prisma_client
try:
if prisma_client is None:
raise ValueError(
"Database not connected. Connect a database to your proxy - https://docs.litellm.ai/docs/simple_proxy#managing-auth---virtual-keys"
)
sql_query = """
SELECT
CASE
WHEN vt."key_alias" IS NOT NULL THEN vt."key_alias"
ELSE 'Unnamed Key'
END AS api_key,
sl."call_type",
sl."model",
COUNT(*) AS total_rows,
SUM(CASE WHEN sl."cache_hit" = 'True' THEN 1 ELSE 0 END) AS cache_hit_true_rows,
SUM(CASE WHEN sl."cache_hit" = 'True' THEN sl."completion_tokens" ELSE 0 END) AS cached_completion_tokens,
SUM(CASE WHEN sl."cache_hit" != 'True' THEN sl."completion_tokens" ELSE 0 END) AS generated_completion_tokens
FROM "LiteLLM_SpendLogs" sl
LEFT JOIN "LiteLLM_VerificationToken" vt ON sl."api_key" = vt."token"
WHERE
sl."startTime" BETWEEN $1::date AND $2::date + interval '1 day'
GROUP BY
vt."key_alias",
sl."call_type",
sl."model"
"""
db_response = await prisma_client.db.query_raw(
sql_query, start_date_obj, end_date_obj
)
if db_response is None:
return []
return db_response
except Exception as e:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail={"error": str(e)},
)

View file

@ -1,252 +0,0 @@
"""
Unified /v1/messages endpoint - (Anthropic Spec)
"""
import asyncio
import json
import time
import traceback
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
from fastapi.responses import StreamingResponse
import litellm
from litellm._logging import verbose_proxy_logger
from litellm.proxy._types import *
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
from litellm.proxy.common_request_processing import ProxyBaseLLMRequestProcessing
from litellm.proxy.common_utils.http_parsing_utils import _read_request_body
from litellm.proxy.litellm_pre_call_utils import add_litellm_data_to_request
from litellm.proxy.utils import ProxyLogging
router = APIRouter()
async def async_data_generator_anthropic(
response,
user_api_key_dict: UserAPIKeyAuth,
request_data: dict,
proxy_logging_obj: ProxyLogging,
):
verbose_proxy_logger.debug("inside generator")
try:
time.time()
async for chunk in response:
verbose_proxy_logger.debug(
"async_data_generator: received streaming chunk - {}".format(chunk)
)
### CALL HOOKS ### - modify outgoing data
chunk = await proxy_logging_obj.async_post_call_streaming_hook(
user_api_key_dict=user_api_key_dict, response=chunk
)
yield chunk
except Exception as e:
verbose_proxy_logger.exception(
"litellm.proxy.proxy_server.async_data_generator(): Exception occured - {}".format(
str(e)
)
)
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict,
original_exception=e,
request_data=request_data,
)
verbose_proxy_logger.debug(
f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`"
)
if isinstance(e, HTTPException):
raise e
else:
error_traceback = traceback.format_exc()
error_msg = f"{str(e)}\n\n{error_traceback}"
proxy_exception = ProxyException(
message=getattr(e, "message", error_msg),
type=getattr(e, "type", "None"),
param=getattr(e, "param", "None"),
code=getattr(e, "status_code", 500),
)
error_returned = json.dumps({"error": proxy_exception.to_dict()})
yield f"data: {error_returned}\n\n"
@router.post(
"/v1/messages",
tags=["[beta] Anthropic `/v1/messages`"],
dependencies=[Depends(user_api_key_auth)],
include_in_schema=False,
)
async def anthropic_response( # noqa: PLR0915
fastapi_response: Response,
request: Request,
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
):
"""
Use `{PROXY_BASE_URL}/anthropic/v1/messages` instead - [Docs](https://docs.litellm.ai/docs/anthropic_completion).
This was a BETA endpoint that calls 100+ LLMs in the anthropic format.
"""
from litellm.proxy.proxy_server import (
general_settings,
llm_router,
proxy_config,
proxy_logging_obj,
user_api_base,
user_max_tokens,
user_model,
user_request_timeout,
user_temperature,
version,
)
request_data = await _read_request_body(request=request)
data: dict = {**request_data}
try:
data["model"] = (
general_settings.get("completion_model", None) # server default
or user_model # model name passed via cli args
or data.get("model", None) # default passed in http request
)
if user_model:
data["model"] = user_model
data = await add_litellm_data_to_request(
data=data, # type: ignore
request=request,
general_settings=general_settings,
user_api_key_dict=user_api_key_dict,
version=version,
proxy_config=proxy_config,
)
# override with user settings, these are params passed via cli
if user_temperature:
data["temperature"] = user_temperature
if user_request_timeout:
data["request_timeout"] = user_request_timeout
if user_max_tokens:
data["max_tokens"] = user_max_tokens
if user_api_base:
data["api_base"] = user_api_base
### MODEL ALIAS MAPPING ###
# check if model name in model alias map
# get the actual model name
if data["model"] in litellm.model_alias_map:
data["model"] = litellm.model_alias_map[data["model"]]
### CALL HOOKS ### - modify incoming data before calling the model
data = await proxy_logging_obj.pre_call_hook( # type: ignore
user_api_key_dict=user_api_key_dict, data=data, call_type="text_completion"
)
### ROUTE THE REQUESTs ###
router_model_names = llm_router.model_names if llm_router is not None else []
# skip router if user passed their key
if (
llm_router is not None and data["model"] in router_model_names
): # model in router model list
llm_response = asyncio.create_task(llm_router.aanthropic_messages(**data))
elif (
llm_router is not None
and llm_router.model_group_alias is not None
and data["model"] in llm_router.model_group_alias
): # model set in model_group_alias
llm_response = asyncio.create_task(llm_router.aanthropic_messages(**data))
elif (
llm_router is not None and data["model"] in llm_router.deployment_names
): # model in router deployments, calling a specific deployment on the router
llm_response = asyncio.create_task(
llm_router.aanthropic_messages(**data, specific_deployment=True)
)
elif (
llm_router is not None and data["model"] in llm_router.get_model_ids()
): # model in router model list
llm_response = asyncio.create_task(llm_router.aanthropic_messages(**data))
elif (
llm_router is not None
and data["model"] not in router_model_names
and (
llm_router.default_deployment is not None
or len(llm_router.pattern_router.patterns) > 0
)
): # model in router deployments, calling a specific deployment on the router
llm_response = asyncio.create_task(llm_router.aanthropic_messages(**data))
elif user_model is not None: # `litellm --model <your-model-name>`
llm_response = asyncio.create_task(litellm.anthropic_messages(**data))
else:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail={
"error": "completion: Invalid model name passed in model="
+ data.get("model", "")
},
)
# Await the llm_response task
response = await llm_response
hidden_params = getattr(response, "_hidden_params", {}) or {}
model_id = hidden_params.get("model_id", None) or ""
cache_key = hidden_params.get("cache_key", None) or ""
api_base = hidden_params.get("api_base", None) or ""
response_cost = hidden_params.get("response_cost", None) or ""
### ALERTING ###
asyncio.create_task(
proxy_logging_obj.update_request_status(
litellm_call_id=data.get("litellm_call_id", ""), status="success"
)
)
verbose_proxy_logger.debug("final response: %s", response)
fastapi_response.headers.update(
ProxyBaseLLMRequestProcessing.get_custom_headers(
user_api_key_dict=user_api_key_dict,
model_id=model_id,
cache_key=cache_key,
api_base=api_base,
version=version,
response_cost=response_cost,
request_data=data,
hidden_params=hidden_params,
)
)
if (
"stream" in data and data["stream"] is True
): # use generate_responses to stream responses
selected_data_generator = async_data_generator_anthropic(
response=response,
user_api_key_dict=user_api_key_dict,
request_data=data,
proxy_logging_obj=proxy_logging_obj,
)
return StreamingResponse(
selected_data_generator, # type: ignore
media_type="text/event-stream",
)
verbose_proxy_logger.info("\nResponse from Litellm:\n{}".format(response))
return response
except Exception as e:
await proxy_logging_obj.post_call_failure_hook(
user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data
)
verbose_proxy_logger.exception(
"litellm.proxy.proxy_server.anthropic_response(): Exception occured - {}".format(
str(e)
)
)
error_msg = f"{str(e)}"
raise ProxyException(
message=getattr(e, "message", error_msg),
type=getattr(e, "type", "None"),
param=getattr(e, "param", "None"),
code=getattr(e, "status_code", 500),
)

File diff suppressed because it is too large Load diff

View file

@ -1,163 +0,0 @@
"""
Auth Checks for Organizations
"""
from typing import Dict, List, Optional, Tuple
from fastapi import status
from litellm.proxy._types import *
def organization_role_based_access_check(
request_body: dict,
user_object: Optional[LiteLLM_UserTable],
route: str,
):
"""
Role based access control checks only run if a user is part of an Organization
Organization Checks:
ONLY RUN IF user_object.organization_memberships is not None
1. Only Proxy Admins can access /organization/new
2. IF route is a LiteLLMRoutes.org_admin_only_routes, then check if user is an Org Admin for that organization
"""
if user_object is None:
return
passed_organization_id: Optional[str] = request_body.get("organization_id", None)
if route == "/organization/new":
if user_object.user_role != LitellmUserRoles.PROXY_ADMIN.value:
raise ProxyException(
message=f"Only proxy admins can create new organizations. You are {user_object.user_role}",
type=ProxyErrorTypes.auth_error.value,
param="user_role",
code=status.HTTP_401_UNAUTHORIZED,
)
if user_object.user_role == LitellmUserRoles.PROXY_ADMIN.value:
return
# Checks if route is an Org Admin Only Route
if route in LiteLLMRoutes.org_admin_only_routes.value:
(
_user_organizations,
_user_organization_role_mapping,
) = get_user_organization_info(user_object)
if user_object.organization_memberships is None:
raise ProxyException(
message=f"Tried to access route={route} but you are not a member of any organization. Please contact the proxy admin to request access.",
type=ProxyErrorTypes.auth_error.value,
param="organization_id",
code=status.HTTP_401_UNAUTHORIZED,
)
if passed_organization_id is None:
raise ProxyException(
message="Passed organization_id is None, please pass an organization_id in your request",
type=ProxyErrorTypes.auth_error.value,
param="organization_id",
code=status.HTTP_401_UNAUTHORIZED,
)
user_role: Optional[LitellmUserRoles] = _user_organization_role_mapping.get(
passed_organization_id
)
if user_role is None:
raise ProxyException(
message=f"You do not have a role within the selected organization. Passed organization_id: {passed_organization_id}. Please contact the organization admin to request access.",
type=ProxyErrorTypes.auth_error.value,
param="organization_id",
code=status.HTTP_401_UNAUTHORIZED,
)
if user_role != LitellmUserRoles.ORG_ADMIN.value:
raise ProxyException(
message=f"You do not have the required role to perform {route} in Organization {passed_organization_id}. Your role is {user_role} in Organization {passed_organization_id}",
type=ProxyErrorTypes.auth_error.value,
param="user_role",
code=status.HTTP_401_UNAUTHORIZED,
)
elif route == "/team/new":
# if user is part of multiple teams, then they need to specify the organization_id
(
_user_organizations,
_user_organization_role_mapping,
) = get_user_organization_info(user_object)
if (
user_object.organization_memberships is not None
and len(user_object.organization_memberships) > 0
):
if passed_organization_id is None:
raise ProxyException(
message=f"Passed organization_id is None, please specify the organization_id in your request. You are part of multiple organizations: {_user_organizations}",
type=ProxyErrorTypes.auth_error.value,
param="organization_id",
code=status.HTTP_401_UNAUTHORIZED,
)
_user_role_in_passed_org = _user_organization_role_mapping.get(
passed_organization_id
)
if _user_role_in_passed_org != LitellmUserRoles.ORG_ADMIN.value:
raise ProxyException(
message=f"You do not have the required role to call {route}. Your role is {_user_role_in_passed_org} in Organization {passed_organization_id}",
type=ProxyErrorTypes.auth_error.value,
param="user_role",
code=status.HTTP_401_UNAUTHORIZED,
)
def get_user_organization_info(
user_object: LiteLLM_UserTable,
) -> Tuple[List[str], Dict[str, Optional[LitellmUserRoles]]]:
"""
Helper function to extract user organization information.
Args:
user_object (LiteLLM_UserTable): The user object containing organization memberships.
Returns:
Tuple[List[str], Dict[str, Optional[LitellmUserRoles]]]: A tuple containing:
- List of organization IDs the user is a member of
- Dictionary mapping organization IDs to user roles
"""
_user_organizations: List[str] = []
_user_organization_role_mapping: Dict[str, Optional[LitellmUserRoles]] = {}
if user_object.organization_memberships is not None:
for _membership in user_object.organization_memberships:
if _membership.organization_id is not None:
_user_organizations.append(_membership.organization_id)
_user_organization_role_mapping[_membership.organization_id] = _membership.user_role # type: ignore
return _user_organizations, _user_organization_role_mapping
def _user_is_org_admin(
request_data: dict,
user_object: Optional[LiteLLM_UserTable] = None,
) -> bool:
"""
Helper function to check if user is an org admin for the passed organization_id
"""
if request_data.get("organization_id", None) is None:
return False
if user_object is None:
return False
if user_object.organization_memberships is None:
return False
for _membership in user_object.organization_memberships:
if _membership.organization_id == request_data.get("organization_id", None):
if _membership.user_role == LitellmUserRoles.ORG_ADMIN.value:
return True
return False

View file

@ -1,124 +0,0 @@
"""
Handles Authentication Errors
"""
import asyncio
from typing import TYPE_CHECKING, Any, Optional, Union
from fastapi import HTTPException, Request, status
import litellm
from litellm._logging import verbose_proxy_logger
from litellm.proxy._types import ProxyErrorTypes, ProxyException, UserAPIKeyAuth
from litellm.proxy.auth.auth_utils import _get_request_ip_address
from litellm.proxy.db.exception_handler import PrismaDBExceptionHandler
from litellm.types.services import ServiceTypes
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
Span = Union[_Span, Any]
else:
Span = Any
class UserAPIKeyAuthExceptionHandler:
@staticmethod
async def _handle_authentication_error(
e: Exception,
request: Request,
request_data: dict,
route: str,
parent_otel_span: Optional[Span],
api_key: str,
) -> UserAPIKeyAuth:
"""
Handles Connection Errors when reading a Virtual Key from LiteLLM DB
Use this if you don't want failed DB queries to block LLM API reqiests
Reliability scenarios this covers:
- DB is down and having an outage
- Unable to read / recover a key from the DB
Returns:
- UserAPIKeyAuth: If general_settings.allow_requests_on_db_unavailable is True
Raises:
- Orignal Exception in all other cases
"""
from litellm.proxy.proxy_server import (
general_settings,
litellm_proxy_admin_name,
proxy_logging_obj,
)
if (
PrismaDBExceptionHandler.should_allow_request_on_db_unavailable()
and PrismaDBExceptionHandler.is_database_connection_error(e)
):
# log this as a DB failure on prometheus
proxy_logging_obj.service_logging_obj.service_failure_hook(
service=ServiceTypes.DB,
call_type="get_key_object",
error=e,
duration=0.0,
)
return UserAPIKeyAuth(
key_name="failed-to-connect-to-db",
token="failed-to-connect-to-db",
user_id=litellm_proxy_admin_name,
request_route=route,
)
else:
# raise the exception to the caller
requester_ip = _get_request_ip_address(
request=request,
use_x_forwarded_for=general_settings.get("use_x_forwarded_for", False),
)
verbose_proxy_logger.exception(
"litellm.proxy.proxy_server.user_api_key_auth(): Exception occured - {}\nRequester IP Address:{}".format(
str(e),
requester_ip,
),
extra={"requester_ip": requester_ip},
)
# Log this exception to OTEL, Datadog etc
user_api_key_dict = UserAPIKeyAuth(
parent_otel_span=parent_otel_span,
api_key=api_key,
request_route=route,
)
asyncio.create_task(
proxy_logging_obj.post_call_failure_hook(
request_data=request_data,
original_exception=e,
user_api_key_dict=user_api_key_dict,
error_type=ProxyErrorTypes.auth_error,
route=route,
)
)
if isinstance(e, litellm.BudgetExceededError):
raise ProxyException(
message=e.message,
type=ProxyErrorTypes.budget_exceeded,
param=None,
code=400,
)
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
type=ProxyErrorTypes.auth_error,
param=getattr(e, "param", "None"),
code=getattr(e, "status_code", status.HTTP_401_UNAUTHORIZED),
)
elif isinstance(e, ProxyException):
raise e
raise ProxyException(
message="Authentication Error, " + str(e),
type=ProxyErrorTypes.auth_error,
param=getattr(e, "param", "None"),
code=status.HTTP_401_UNAUTHORIZED,
)

View file

@ -1,513 +0,0 @@
import os
import re
import sys
from typing import Any, List, Optional, Tuple
from fastapi import HTTPException, Request, status
from litellm import Router, provider_list
from litellm._logging import verbose_proxy_logger
from litellm.proxy._types import *
from litellm.types.router import CONFIGURABLE_CLIENTSIDE_AUTH_PARAMS
def _get_request_ip_address(
request: Request, use_x_forwarded_for: Optional[bool] = False
) -> Optional[str]:
client_ip = None
if use_x_forwarded_for is True and "x-forwarded-for" in request.headers:
client_ip = request.headers["x-forwarded-for"]
elif request.client is not None:
client_ip = request.client.host
else:
client_ip = ""
return client_ip
def _check_valid_ip(
allowed_ips: Optional[List[str]],
request: Request,
use_x_forwarded_for: Optional[bool] = False,
) -> Tuple[bool, Optional[str]]:
"""
Returns if ip is allowed or not
"""
if allowed_ips is None: # if not set, assume true
return True, None
# if general_settings.get("use_x_forwarded_for") is True then use x-forwarded-for
client_ip = _get_request_ip_address(
request=request, use_x_forwarded_for=use_x_forwarded_for
)
# Check if IP address is allowed
if client_ip not in allowed_ips:
return False, client_ip
return True, client_ip
def check_complete_credentials(request_body: dict) -> bool:
"""
if 'api_base' in request body. Check if complete credentials given. Prevent malicious attacks.
"""
given_model: Optional[str] = None
given_model = request_body.get("model")
if given_model is None:
return False
if (
"sagemaker" in given_model
or "bedrock" in given_model
or "vertex_ai" in given_model
or "vertex_ai_beta" in given_model
):
# complex credentials - easier to make a malicious request
return False
if "api_key" in request_body:
return True
return False
def check_regex_or_str_match(request_body_value: Any, regex_str: str) -> bool:
"""
Check if request_body_value matches the regex_str or is equal to param
"""
if re.match(regex_str, request_body_value) or regex_str == request_body_value:
return True
return False
def _is_param_allowed(
param: str,
request_body_value: Any,
configurable_clientside_auth_params: CONFIGURABLE_CLIENTSIDE_AUTH_PARAMS,
) -> bool:
"""
Check if param is a str or dict and if request_body_value is in the list of allowed values
"""
if configurable_clientside_auth_params is None:
return False
for item in configurable_clientside_auth_params:
if isinstance(item, str) and param == item:
return True
elif isinstance(item, Dict):
if param == "api_base" and check_regex_or_str_match(
request_body_value=request_body_value,
regex_str=item["api_base"],
): # assume param is a regex
return True
return False
def _allow_model_level_clientside_configurable_parameters(
model: str, param: str, request_body_value: Any, llm_router: Optional[Router]
) -> bool:
"""
Check if model is allowed to use configurable client-side params
- get matching model
- check if 'clientside_configurable_parameters' is set for model
-
"""
if llm_router is None:
return False
# check if model is set
model_info = llm_router.get_model_group_info(model_group=model)
if model_info is None:
# check if wildcard model is set
if model.split("/", 1)[0] in provider_list:
model_info = llm_router.get_model_group_info(
model_group=model.split("/", 1)[0]
)
if model_info is None:
return False
if model_info is None or model_info.configurable_clientside_auth_params is None:
return False
return _is_param_allowed(
param=param,
request_body_value=request_body_value,
configurable_clientside_auth_params=model_info.configurable_clientside_auth_params,
)
def is_request_body_safe(
request_body: dict, general_settings: dict, llm_router: Optional[Router], model: str
) -> bool:
"""
Check if the request body is safe.
A malicious user can set the api_base to their own domain and invoke POST /chat/completions to intercept and steal the OpenAI API key.
Relevant issue: https://huntr.com/bounties/4001e1a2-7b7a-4776-a3ae-e6692ec3d997
"""
banned_params = ["api_base", "base_url"]
for param in banned_params:
if (
param in request_body
and not check_complete_credentials( # allow client-credentials to be passed to proxy
request_body=request_body
)
):
if general_settings.get("allow_client_side_credentials") is True:
return True
elif (
_allow_model_level_clientside_configurable_parameters(
model=model,
param=param,
request_body_value=request_body[param],
llm_router=llm_router,
)
is True
):
return True
raise ValueError(
f"Rejected Request: {param} is not allowed in request body. "
"Enable with `general_settings::allow_client_side_credentials` on proxy config.yaml. "
"Relevant Issue: https://huntr.com/bounties/4001e1a2-7b7a-4776-a3ae-e6692ec3d997",
)
return True
async def pre_db_read_auth_checks(
request: Request,
request_data: dict,
route: str,
):
"""
1. Checks if request size is under max_request_size_mb (if set)
2. Check if request body is safe (example user has not set api_base in request body)
3. Check if IP address is allowed (if set)
4. Check if request route is an allowed route on the proxy (if set)
Returns:
- True
Raises:
- HTTPException if request fails initial auth checks
"""
from litellm.proxy.proxy_server import general_settings, llm_router, premium_user
# Check 1. request size
await check_if_request_size_is_safe(request=request)
# Check 2. Request body is safe
is_request_body_safe(
request_body=request_data,
general_settings=general_settings,
llm_router=llm_router,
model=request_data.get(
"model", ""
), # [TODO] use model passed in url as well (azure openai routes)
)
# Check 3. Check if IP address is allowed
is_valid_ip, passed_in_ip = _check_valid_ip(
allowed_ips=general_settings.get("allowed_ips", None),
use_x_forwarded_for=general_settings.get("use_x_forwarded_for", False),
request=request,
)
if not is_valid_ip:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=f"Access forbidden: IP address {passed_in_ip} not allowed.",
)
# Check 4. Check if request route is an allowed route on the proxy
if "allowed_routes" in general_settings:
_allowed_routes = general_settings["allowed_routes"]
if premium_user is not True:
verbose_proxy_logger.error(
f"Trying to set allowed_routes. This is an Enterprise feature. {CommonProxyErrors.not_premium_user.value}"
)
if route not in _allowed_routes:
verbose_proxy_logger.error(
f"Route {route} not in allowed_routes={_allowed_routes}"
)
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=f"Access forbidden: Route {route} not allowed",
)
def route_in_additonal_public_routes(current_route: str):
"""
Helper to check if the user defined public_routes on config.yaml
Parameters:
- current_route: str - the route the user is trying to call
Returns:
- bool - True if the route is defined in public_routes
- bool - False if the route is not defined in public_routes
In order to use this the litellm config.yaml should have the following in general_settings:
```yaml
general_settings:
master_key: sk-1234
public_routes: ["LiteLLMRoutes.public_routes", "/spend/calculate"]
```
"""
# check if user is premium_user - if not do nothing
from litellm.proxy.proxy_server import general_settings, premium_user
try:
if premium_user is not True:
return False
# check if this is defined on the config
if general_settings is None:
return False
routes_defined = general_settings.get("public_routes", [])
if current_route in routes_defined:
return True
return False
except Exception as e:
verbose_proxy_logger.error(f"route_in_additonal_public_routes: {str(e)}")
return False
def get_request_route(request: Request) -> str:
"""
Helper to get the route from the request
remove base url from path if set e.g. `/genai/chat/completions` -> `/chat/completions
"""
try:
if hasattr(request, "base_url") and request.url.path.startswith(
request.base_url.path
):
# remove base_url from path
return request.url.path[len(request.base_url.path) - 1 :]
else:
return request.url.path
except Exception as e:
verbose_proxy_logger.debug(
f"error on get_request_route: {str(e)}, defaulting to request.url.path={request.url.path}"
)
return request.url.path
async def check_if_request_size_is_safe(request: Request) -> bool:
"""
Enterprise Only:
- Checks if the request size is within the limit
Args:
request (Request): The incoming request.
Returns:
bool: True if the request size is within the limit
Raises:
ProxyException: If the request size is too large
"""
from litellm.proxy.proxy_server import general_settings, premium_user
max_request_size_mb = general_settings.get("max_request_size_mb", None)
if max_request_size_mb is not None:
# Check if premium user
if premium_user is not True:
verbose_proxy_logger.warning(
f"using max_request_size_mb - not checking - this is an enterprise only feature. {CommonProxyErrors.not_premium_user.value}"
)
return True
# Get the request body
content_length = request.headers.get("content-length")
if content_length:
header_size = int(content_length)
header_size_mb = bytes_to_mb(bytes_value=header_size)
verbose_proxy_logger.debug(
f"content_length request size in MB={header_size_mb}"
)
if header_size_mb > max_request_size_mb:
raise ProxyException(
message=f"Request size is too large. Request size is {header_size_mb} MB. Max size is {max_request_size_mb} MB",
type=ProxyErrorTypes.bad_request_error.value,
code=400,
param="content-length",
)
else:
# If Content-Length is not available, read the body
body = await request.body()
body_size = len(body)
request_size_mb = bytes_to_mb(bytes_value=body_size)
verbose_proxy_logger.debug(
f"request body request size in MB={request_size_mb}"
)
if request_size_mb > max_request_size_mb:
raise ProxyException(
message=f"Request size is too large. Request size is {request_size_mb} MB. Max size is {max_request_size_mb} MB",
type=ProxyErrorTypes.bad_request_error.value,
code=400,
param="content-length",
)
return True
async def check_response_size_is_safe(response: Any) -> bool:
"""
Enterprise Only:
- Checks if the response size is within the limit
Args:
response (Any): The response to check.
Returns:
bool: True if the response size is within the limit
Raises:
ProxyException: If the response size is too large
"""
from litellm.proxy.proxy_server import general_settings, premium_user
max_response_size_mb = general_settings.get("max_response_size_mb", None)
if max_response_size_mb is not None:
# Check if premium user
if premium_user is not True:
verbose_proxy_logger.warning(
f"using max_response_size_mb - not checking - this is an enterprise only feature. {CommonProxyErrors.not_premium_user.value}"
)
return True
response_size_mb = bytes_to_mb(bytes_value=sys.getsizeof(response))
verbose_proxy_logger.debug(f"response size in MB={response_size_mb}")
if response_size_mb > max_response_size_mb:
raise ProxyException(
message=f"Response size is too large. Response size is {response_size_mb} MB. Max size is {max_response_size_mb} MB",
type=ProxyErrorTypes.bad_request_error.value,
code=400,
param="content-length",
)
return True
def bytes_to_mb(bytes_value: int):
"""
Helper to convert bytes to MB
"""
return bytes_value / (1024 * 1024)
# helpers used by parallel request limiter to handle model rpm/tpm limits for a given api key
def get_key_model_rpm_limit(
user_api_key_dict: UserAPIKeyAuth,
) -> Optional[Dict[str, int]]:
if user_api_key_dict.metadata:
if "model_rpm_limit" in user_api_key_dict.metadata:
return user_api_key_dict.metadata["model_rpm_limit"]
elif user_api_key_dict.model_max_budget:
model_rpm_limit: Dict[str, Any] = {}
for model, budget in user_api_key_dict.model_max_budget.items():
if "rpm_limit" in budget and budget["rpm_limit"] is not None:
model_rpm_limit[model] = budget["rpm_limit"]
return model_rpm_limit
return None
def get_key_model_tpm_limit(
user_api_key_dict: UserAPIKeyAuth,
) -> Optional[Dict[str, int]]:
if user_api_key_dict.metadata:
if "model_tpm_limit" in user_api_key_dict.metadata:
return user_api_key_dict.metadata["model_tpm_limit"]
elif user_api_key_dict.model_max_budget:
if "tpm_limit" in user_api_key_dict.model_max_budget:
return user_api_key_dict.model_max_budget["tpm_limit"]
return None
def is_pass_through_provider_route(route: str) -> bool:
PROVIDER_SPECIFIC_PASS_THROUGH_ROUTES = [
"vertex-ai",
]
# check if any of the prefixes are in the route
for prefix in PROVIDER_SPECIFIC_PASS_THROUGH_ROUTES:
if prefix in route:
return True
return False
def should_run_auth_on_pass_through_provider_route(route: str) -> bool:
"""
Use this to decide if the rest of the LiteLLM Virtual Key auth checks should run on /vertex-ai/{endpoint} routes
Use this to decide if the rest of the LiteLLM Virtual Key auth checks should run on provider pass through routes
ex /vertex-ai/{endpoint} routes
Run virtual key auth if the following is try:
- User is premium_user
- User has enabled litellm_setting.use_client_credentials_pass_through_routes
"""
from litellm.proxy.proxy_server import general_settings, premium_user
if premium_user is not True:
return False
# premium use has opted into using client credentials
if (
general_settings.get("use_client_credentials_pass_through_routes", False)
is True
):
return False
# only enabled for LiteLLM Enterprise
return True
def _has_user_setup_sso():
"""
Check if the user has set up single sign-on (SSO) by verifying the presence of Microsoft client ID, Google client ID or generic client ID and UI username environment variables.
Returns a boolean indicating whether SSO has been set up.
"""
microsoft_client_id = os.getenv("MICROSOFT_CLIENT_ID", None)
google_client_id = os.getenv("GOOGLE_CLIENT_ID", None)
generic_client_id = os.getenv("GENERIC_CLIENT_ID", None)
sso_setup = (
(microsoft_client_id is not None)
or (google_client_id is not None)
or (generic_client_id is not None)
)
return sso_setup
def get_end_user_id_from_request_body(request_body: dict) -> Optional[str]:
# openai - check 'user'
if "user" in request_body and request_body["user"] is not None:
return str(request_body["user"])
# anthropic - check 'litellm_metadata'
end_user_id = request_body.get("litellm_metadata", {}).get("user", None)
if end_user_id:
return str(end_user_id)
metadata = request_body.get("metadata")
if metadata and "user_id" in metadata and metadata["user_id"] is not None:
return str(metadata["user_id"])
return None

View file

@ -1,998 +0,0 @@
"""
Supports using JWT's for authenticating into the proxy.
Currently only supports admin.
JWT token must have 'litellm_proxy_admin' in scope.
"""
import json
import os
from typing import Any, List, Literal, Optional, Set, Tuple, cast
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from fastapi import HTTPException
from litellm._logging import verbose_proxy_logger
from litellm.caching.caching import DualCache
from litellm.litellm_core_utils.dot_notation_indexing import get_nested_value
from litellm.llms.custom_httpx.httpx_handler import HTTPHandler
from litellm.proxy._types import (
RBAC_ROLES,
JWKKeyValue,
JWTAuthBuilderResult,
JWTKeyItem,
LiteLLM_EndUserTable,
LiteLLM_JWTAuth,
LiteLLM_OrganizationTable,
LiteLLM_TeamTable,
LiteLLM_UserTable,
LitellmUserRoles,
ScopeMapping,
Span,
)
from litellm.proxy.auth.auth_checks import can_team_access_model
from litellm.proxy.utils import PrismaClient, ProxyLogging
from .auth_checks import (
_allowed_routes_check,
allowed_routes_check,
get_actual_routes,
get_end_user_object,
get_org_object,
get_role_based_models,
get_role_based_routes,
get_team_object,
get_user_object,
)
class JWTHandler:
"""
- treat the sub id passed in as the user id
- return an error if id making request doesn't exist in proxy user table
- track spend against the user id
- if role="litellm_proxy_user" -> allow making calls + info. Can not edit budgets
"""
prisma_client: Optional[PrismaClient]
user_api_key_cache: DualCache
def __init__(
self,
) -> None:
self.http_handler = HTTPHandler()
self.leeway = 0
def update_environment(
self,
prisma_client: Optional[PrismaClient],
user_api_key_cache: DualCache,
litellm_jwtauth: LiteLLM_JWTAuth,
leeway: int = 0,
) -> None:
self.prisma_client = prisma_client
self.user_api_key_cache = user_api_key_cache
self.litellm_jwtauth = litellm_jwtauth
self.leeway = leeway
def is_jwt(self, token: str):
parts = token.split(".")
return len(parts) == 3
def _rbac_role_from_role_mapping(self, token: dict) -> Optional[RBAC_ROLES]:
"""
Returns the RBAC role the token 'belongs' to based on role mappings.
Args:
token (dict): The JWT token containing role information
Returns:
Optional[RBAC_ROLES]: The mapped internal RBAC role if a mapping exists,
None otherwise
Note:
The function handles both single string roles and lists of roles from the JWT.
If multiple mappings match the JWT roles, the first matching mapping is returned.
"""
if self.litellm_jwtauth.role_mappings is None:
return None
jwt_role = self.get_jwt_role(token=token, default_value=None)
if not jwt_role:
return None
jwt_role_set = set(jwt_role)
for role_mapping in self.litellm_jwtauth.role_mappings:
# Check if the mapping role matches any of the JWT roles
if role_mapping.role in jwt_role_set:
return role_mapping.internal_role
return None
def get_rbac_role(self, token: dict) -> Optional[RBAC_ROLES]:
"""
Returns the RBAC role the token 'belongs' to.
RBAC roles allowed to make requests:
- PROXY_ADMIN: can make requests to all routes
- TEAM: can make requests to routes associated with a team
- INTERNAL_USER: can make requests to routes associated with a user
Resolves: https://github.com/BerriAI/litellm/issues/6793
Returns:
- PROXY_ADMIN: if token is admin
- TEAM: if token is associated with a team
- INTERNAL_USER: if token is associated with a user
- None: if token is not associated with a team or user
"""
scopes = self.get_scopes(token=token)
is_admin = self.is_admin(scopes=scopes)
user_roles = self.get_user_roles(token=token, default_value=None)
if is_admin:
return LitellmUserRoles.PROXY_ADMIN
elif self.get_team_id(token=token, default_value=None) is not None:
return LitellmUserRoles.TEAM
elif self.get_user_id(token=token, default_value=None) is not None:
return LitellmUserRoles.INTERNAL_USER
elif user_roles is not None and self.is_allowed_user_role(
user_roles=user_roles
):
return LitellmUserRoles.INTERNAL_USER
elif rbac_role := self._rbac_role_from_role_mapping(token=token):
return rbac_role
return None
def is_admin(self, scopes: list) -> bool:
if self.litellm_jwtauth.admin_jwt_scope in scopes:
return True
return False
def get_team_ids_from_jwt(self, token: dict) -> List[str]:
if (
self.litellm_jwtauth.team_ids_jwt_field is not None
and token.get(self.litellm_jwtauth.team_ids_jwt_field) is not None
):
return token[self.litellm_jwtauth.team_ids_jwt_field]
return []
def get_end_user_id(
self, token: dict, default_value: Optional[str]
) -> Optional[str]:
try:
if self.litellm_jwtauth.end_user_id_jwt_field is not None:
user_id = token[self.litellm_jwtauth.end_user_id_jwt_field]
else:
user_id = None
except KeyError:
user_id = default_value
return user_id
def is_required_team_id(self) -> bool:
"""
Returns:
- True: if 'team_id_jwt_field' is set
- False: if not
"""
if self.litellm_jwtauth.team_id_jwt_field is None:
return False
return True
def is_enforced_email_domain(self) -> bool:
"""
Returns:
- True: if 'user_allowed_email_domain' is set
- False: if 'user_allowed_email_domain' is None
"""
if self.litellm_jwtauth.user_allowed_email_domain is not None and isinstance(
self.litellm_jwtauth.user_allowed_email_domain, str
):
return True
return False
def get_team_id(self, token: dict, default_value: Optional[str]) -> Optional[str]:
try:
if self.litellm_jwtauth.team_id_jwt_field is not None:
team_id = token[self.litellm_jwtauth.team_id_jwt_field]
elif self.litellm_jwtauth.team_id_default is not None:
team_id = self.litellm_jwtauth.team_id_default
else:
team_id = None
except KeyError:
team_id = default_value
return team_id
def is_upsert_user_id(self, valid_user_email: Optional[bool] = None) -> bool:
"""
Returns:
- True: if 'user_id_upsert' is set AND valid_user_email is not False
- False: if not
"""
if valid_user_email is False:
return False
return self.litellm_jwtauth.user_id_upsert
def get_user_id(self, token: dict, default_value: Optional[str]) -> Optional[str]:
try:
if self.litellm_jwtauth.user_id_jwt_field is not None:
user_id = token[self.litellm_jwtauth.user_id_jwt_field]
else:
user_id = default_value
except KeyError:
user_id = default_value
return user_id
def get_user_roles(
self, token: dict, default_value: Optional[List[str]]
) -> Optional[List[str]]:
"""
Returns the user role from the token.
Set via 'user_roles_jwt_field' in the config.
"""
try:
if self.litellm_jwtauth.user_roles_jwt_field is not None:
user_roles = get_nested_value(
data=token,
key_path=self.litellm_jwtauth.user_roles_jwt_field,
default=default_value,
)
else:
user_roles = default_value
except KeyError:
user_roles = default_value
return user_roles
def get_jwt_role(
self, token: dict, default_value: Optional[List[str]]
) -> Optional[List[str]]:
"""
Generic implementation of `get_user_roles` that can be used for both user and team roles.
Returns the jwt role from the token.
Set via 'roles_jwt_field' in the config.
"""
try:
if self.litellm_jwtauth.roles_jwt_field is not None:
user_roles = get_nested_value(
data=token,
key_path=self.litellm_jwtauth.roles_jwt_field,
default=default_value,
)
else:
user_roles = default_value
except KeyError:
user_roles = default_value
return user_roles
def is_allowed_user_role(self, user_roles: Optional[List[str]]) -> bool:
"""
Returns the user role from the token.
Set via 'user_allowed_roles' in the config.
"""
if (
user_roles is not None
and self.litellm_jwtauth.user_allowed_roles is not None
and any(
role in self.litellm_jwtauth.user_allowed_roles for role in user_roles
)
):
return True
return False
def get_user_email(
self, token: dict, default_value: Optional[str]
) -> Optional[str]:
try:
if self.litellm_jwtauth.user_email_jwt_field is not None:
user_email = token[self.litellm_jwtauth.user_email_jwt_field]
else:
user_email = None
except KeyError:
user_email = default_value
return user_email
def get_object_id(self, token: dict, default_value: Optional[str]) -> Optional[str]:
try:
if self.litellm_jwtauth.object_id_jwt_field is not None:
object_id = token[self.litellm_jwtauth.object_id_jwt_field]
else:
object_id = default_value
except KeyError:
object_id = default_value
return object_id
def get_org_id(self, token: dict, default_value: Optional[str]) -> Optional[str]:
try:
if self.litellm_jwtauth.org_id_jwt_field is not None:
org_id = token[self.litellm_jwtauth.org_id_jwt_field]
else:
org_id = None
except KeyError:
org_id = default_value
return org_id
def get_scopes(self, token: dict) -> List[str]:
try:
if isinstance(token["scope"], str):
# Assuming the scopes are stored in 'scope' claim and are space-separated
scopes = token["scope"].split()
elif isinstance(token["scope"], list):
scopes = token["scope"]
else:
raise Exception(
f"Unmapped scope type - {type(token['scope'])}. Supported types - list, str."
)
except KeyError:
scopes = []
return scopes
async def get_public_key(self, kid: Optional[str]) -> dict:
keys_url = os.getenv("JWT_PUBLIC_KEY_URL")
if keys_url is None:
raise Exception("Missing JWT Public Key URL from environment.")
keys_url_list = [url.strip() for url in keys_url.split(",")]
for key_url in keys_url_list:
cache_key = f"litellm_jwt_auth_keys_{key_url}"
cached_keys = await self.user_api_key_cache.async_get_cache(cache_key)
if cached_keys is None:
response = await self.http_handler.get(key_url)
response_json = response.json()
if "keys" in response_json:
keys: JWKKeyValue = response.json()["keys"]
else:
keys = response_json
await self.user_api_key_cache.async_set_cache(
key=cache_key,
value=keys,
ttl=self.litellm_jwtauth.public_key_ttl, # cache for 10 mins
)
else:
keys = cached_keys
public_key = self.parse_keys(keys=keys, kid=kid)
if public_key is not None:
return cast(dict, public_key)
raise Exception(
f"No matching public key found. keys={keys_url_list}, kid={kid}"
)
def parse_keys(self, keys: JWKKeyValue, kid: Optional[str]) -> Optional[JWTKeyItem]:
public_key: Optional[JWTKeyItem] = None
if len(keys) == 1:
if isinstance(keys, dict) and (keys.get("kid", None) == kid or kid is None):
public_key = keys
elif isinstance(keys, list) and (
keys[0].get("kid", None) == kid or kid is None
):
public_key = keys[0]
elif len(keys) > 1:
for key in keys:
if isinstance(key, dict):
key_kid = key.get("kid", None)
else:
key_kid = None
if (
kid is not None
and isinstance(key, dict)
and key_kid is not None
and key_kid == kid
):
public_key = key
return public_key
def is_allowed_domain(self, user_email: str) -> bool:
if self.litellm_jwtauth.user_allowed_email_domain is None:
return True
email_domain = user_email.split("@")[-1] # Extract domain from email
if email_domain == self.litellm_jwtauth.user_allowed_email_domain:
return True
else:
return False
async def auth_jwt(self, token: str) -> dict:
# Supported algos: https://pyjwt.readthedocs.io/en/stable/algorithms.html
# "Warning: Make sure not to mix symmetric and asymmetric algorithms that interpret
# the key in different ways (e.g. HS* and RS*)."
algorithms = ["RS256", "RS384", "RS512", "PS256", "PS384", "PS512"]
audience = os.getenv("JWT_AUDIENCE")
decode_options = None
if audience is None:
decode_options = {"verify_aud": False}
import jwt
from jwt.algorithms import RSAAlgorithm
header = jwt.get_unverified_header(token)
verbose_proxy_logger.debug("header: %s", header)
kid = header.get("kid", None)
public_key = await self.get_public_key(kid=kid)
if public_key is not None and isinstance(public_key, dict):
jwk = {}
if "kty" in public_key:
jwk["kty"] = public_key["kty"]
if "kid" in public_key:
jwk["kid"] = public_key["kid"]
if "n" in public_key:
jwk["n"] = public_key["n"]
if "e" in public_key:
jwk["e"] = public_key["e"]
public_key_rsa = RSAAlgorithm.from_jwk(json.dumps(jwk))
try:
# decode the token using the public key
payload = jwt.decode(
token,
public_key_rsa, # type: ignore
algorithms=algorithms,
options=decode_options,
audience=audience,
leeway=self.leeway, # allow testing of expired tokens
)
return payload
except jwt.ExpiredSignatureError:
# the token is expired, do something to refresh it
raise Exception("Token Expired")
except Exception as e:
raise Exception(f"Validation fails: {str(e)}")
elif public_key is not None and isinstance(public_key, str):
try:
cert = x509.load_pem_x509_certificate(
public_key.encode(), default_backend()
)
# Extract public key
key = cert.public_key().public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
# decode the token using the public key
payload = jwt.decode(
token,
key,
algorithms=algorithms,
audience=audience,
options=decode_options,
)
return payload
except jwt.ExpiredSignatureError:
# the token is expired, do something to refresh it
raise Exception("Token Expired")
except Exception as e:
raise Exception(f"Validation fails: {str(e)}")
raise Exception("Invalid JWT Submitted")
async def close(self):
await self.http_handler.close()
class JWTAuthManager:
"""Manages JWT authentication and authorization operations"""
@staticmethod
def can_rbac_role_call_route(
rbac_role: RBAC_ROLES,
general_settings: dict,
route: str,
) -> Literal[True]:
"""
Checks if user is allowed to access the route, based on their role.
"""
role_based_routes = get_role_based_routes(
rbac_role=rbac_role, general_settings=general_settings
)
if role_based_routes is None or route is None:
return True
is_allowed = _allowed_routes_check(
user_route=route,
allowed_routes=role_based_routes,
)
if not is_allowed:
raise HTTPException(
status_code=403,
detail=f"Role={rbac_role} not allowed to call route={route}. Allowed routes={role_based_routes}",
)
return True
@staticmethod
def can_rbac_role_call_model(
rbac_role: RBAC_ROLES,
general_settings: dict,
model: Optional[str],
) -> Literal[True]:
"""
Checks if user is allowed to access the model, based on their role.
"""
role_based_models = get_role_based_models(
rbac_role=rbac_role, general_settings=general_settings
)
if role_based_models is None or model is None:
return True
if model not in role_based_models:
raise HTTPException(
status_code=403,
detail=f"Role={rbac_role} not allowed to call model={model}. Allowed models={role_based_models}",
)
return True
@staticmethod
def check_scope_based_access(
scope_mappings: List[ScopeMapping],
scopes: List[str],
request_data: dict,
general_settings: dict,
) -> None:
"""
Check if scope allows access to the requested model
"""
if not scope_mappings:
return None
allowed_models = []
for sm in scope_mappings:
if sm.scope in scopes and sm.models:
allowed_models.extend(sm.models)
requested_model = request_data.get("model")
if not requested_model:
return None
if requested_model not in allowed_models:
raise HTTPException(
status_code=403,
detail={
"error": "model={} not allowed. Allowed_models={}".format(
requested_model, allowed_models
)
},
)
return None
@staticmethod
async def check_rbac_role(
jwt_handler: JWTHandler,
jwt_valid_token: dict,
general_settings: dict,
request_data: dict,
route: str,
rbac_role: Optional[RBAC_ROLES],
) -> None:
"""Validate RBAC role and model access permissions"""
if jwt_handler.litellm_jwtauth.enforce_rbac is True:
if rbac_role is None:
raise HTTPException(
status_code=403,
detail="Unmatched token passed in. enforce_rbac is set to True. Token must belong to a proxy admin, team, or user.",
)
JWTAuthManager.can_rbac_role_call_model(
rbac_role=rbac_role,
general_settings=general_settings,
model=request_data.get("model"),
)
JWTAuthManager.can_rbac_role_call_route(
rbac_role=rbac_role,
general_settings=general_settings,
route=route,
)
@staticmethod
async def check_admin_access(
jwt_handler: JWTHandler,
scopes: list,
route: str,
user_id: Optional[str],
org_id: Optional[str],
api_key: str,
) -> Optional[JWTAuthBuilderResult]:
"""Check admin status and route access permissions"""
if not jwt_handler.is_admin(scopes=scopes):
return None
is_allowed = allowed_routes_check(
user_role=LitellmUserRoles.PROXY_ADMIN,
user_route=route,
litellm_proxy_roles=jwt_handler.litellm_jwtauth,
)
if not is_allowed:
allowed_routes: List[Any] = jwt_handler.litellm_jwtauth.admin_allowed_routes
actual_routes = get_actual_routes(allowed_routes=allowed_routes)
raise Exception(
f"Admin not allowed to access this route. Route={route}, Allowed Routes={actual_routes}"
)
return JWTAuthBuilderResult(
is_proxy_admin=True,
team_object=None,
user_object=None,
end_user_object=None,
org_object=None,
token=api_key,
team_id=None,
user_id=user_id,
end_user_id=None,
org_id=org_id,
)
@staticmethod
async def find_and_validate_specific_team_id(
jwt_handler: JWTHandler,
jwt_valid_token: dict,
prisma_client: Optional[PrismaClient],
user_api_key_cache: DualCache,
parent_otel_span: Optional[Span],
proxy_logging_obj: ProxyLogging,
) -> Tuple[Optional[str], Optional[LiteLLM_TeamTable]]:
"""Find and validate specific team ID"""
individual_team_id = jwt_handler.get_team_id(
token=jwt_valid_token, default_value=None
)
if not individual_team_id and jwt_handler.is_required_team_id() is True:
raise Exception(
f"No team id found in token. Checked team_id field '{jwt_handler.litellm_jwtauth.team_id_jwt_field}'"
)
## VALIDATE TEAM OBJECT ###
team_object: Optional[LiteLLM_TeamTable] = None
if individual_team_id:
team_object = await get_team_object(
team_id=individual_team_id,
prisma_client=prisma_client,
user_api_key_cache=user_api_key_cache,
parent_otel_span=parent_otel_span,
proxy_logging_obj=proxy_logging_obj,
team_id_upsert=jwt_handler.litellm_jwtauth.team_id_upsert,
)
return individual_team_id, team_object
@staticmethod
def get_all_team_ids(jwt_handler: JWTHandler, jwt_valid_token: dict) -> Set[str]:
"""Get combined team IDs from groups and individual team_id"""
team_ids_from_groups = jwt_handler.get_team_ids_from_jwt(token=jwt_valid_token)
all_team_ids = set(team_ids_from_groups)
return all_team_ids
@staticmethod
async def find_team_with_model_access(
team_ids: Set[str],
requested_model: Optional[str],
route: str,
jwt_handler: JWTHandler,
prisma_client: Optional[PrismaClient],
user_api_key_cache: DualCache,
parent_otel_span: Optional[Span],
proxy_logging_obj: ProxyLogging,
) -> Tuple[Optional[str], Optional[LiteLLM_TeamTable]]:
"""Find first team with access to the requested model"""
if not team_ids:
if jwt_handler.litellm_jwtauth.enforce_team_based_model_access:
raise HTTPException(
status_code=403,
detail="No teams found in token. `enforce_team_based_model_access` is set to True. Token must belong to a team.",
)
return None, None
for team_id in team_ids:
try:
team_object = await get_team_object(
team_id=team_id,
prisma_client=prisma_client,
user_api_key_cache=user_api_key_cache,
parent_otel_span=parent_otel_span,
proxy_logging_obj=proxy_logging_obj,
)
if team_object and team_object.models is not None:
team_models = team_object.models
if isinstance(team_models, list) and (
not requested_model
or can_team_access_model(
model=requested_model,
team_object=team_object,
llm_router=None,
team_model_aliases=None,
)
):
is_allowed = allowed_routes_check(
user_role=LitellmUserRoles.TEAM,
user_route=route,
litellm_proxy_roles=jwt_handler.litellm_jwtauth,
)
if is_allowed:
return team_id, team_object
except Exception:
continue
if requested_model:
raise HTTPException(
status_code=403,
detail=f"No team has access to the requested model: {requested_model}. Checked teams={team_ids}. Check `/models` to see all available models.",
)
return None, None
@staticmethod
async def get_user_info(
jwt_handler: JWTHandler,
jwt_valid_token: dict,
) -> Tuple[Optional[str], Optional[str], Optional[bool]]:
"""Get user email and validation status"""
user_email = jwt_handler.get_user_email(
token=jwt_valid_token, default_value=None
)
valid_user_email = None
if jwt_handler.is_enforced_email_domain():
valid_user_email = (
False
if user_email is None
else jwt_handler.is_allowed_domain(user_email=user_email)
)
user_id = jwt_handler.get_user_id(
token=jwt_valid_token, default_value=user_email
)
return user_id, user_email, valid_user_email
@staticmethod
async def get_objects(
user_id: Optional[str],
user_email: Optional[str],
org_id: Optional[str],
end_user_id: Optional[str],
valid_user_email: Optional[bool],
jwt_handler: JWTHandler,
prisma_client: Optional[PrismaClient],
user_api_key_cache: DualCache,
parent_otel_span: Optional[Span],
proxy_logging_obj: ProxyLogging,
) -> Tuple[
Optional[LiteLLM_UserTable],
Optional[LiteLLM_OrganizationTable],
Optional[LiteLLM_EndUserTable],
]:
"""Get user, org, and end user objects"""
org_object: Optional[LiteLLM_OrganizationTable] = None
if org_id:
org_object = (
await get_org_object(
org_id=org_id,
prisma_client=prisma_client,
user_api_key_cache=user_api_key_cache,
parent_otel_span=parent_otel_span,
proxy_logging_obj=proxy_logging_obj,
)
if org_id
else None
)
user_object: Optional[LiteLLM_UserTable] = None
if user_id:
user_object = (
await get_user_object(
user_id=user_id,
prisma_client=prisma_client,
user_api_key_cache=user_api_key_cache,
user_id_upsert=jwt_handler.is_upsert_user_id(
valid_user_email=valid_user_email
),
parent_otel_span=parent_otel_span,
proxy_logging_obj=proxy_logging_obj,
user_email=user_email,
sso_user_id=user_id,
)
if user_id
else None
)
end_user_object: Optional[LiteLLM_EndUserTable] = None
if end_user_id:
end_user_object = (
await get_end_user_object(
end_user_id=end_user_id,
prisma_client=prisma_client,
user_api_key_cache=user_api_key_cache,
parent_otel_span=parent_otel_span,
proxy_logging_obj=proxy_logging_obj,
)
if end_user_id
else None
)
return user_object, org_object, end_user_object
@staticmethod
def validate_object_id(
user_id: Optional[str],
team_id: Optional[str],
enforce_rbac: bool,
is_proxy_admin: bool,
) -> Literal[True]:
"""If enforce_rbac is true, validate that a valid rbac id is returned for spend tracking"""
if enforce_rbac and not is_proxy_admin and not user_id and not team_id:
raise HTTPException(
status_code=403,
detail="No user or team id found in token. enforce_rbac is set to True. Token must belong to a proxy admin, team, or user.",
)
return True
@staticmethod
async def auth_builder(
api_key: str,
jwt_handler: JWTHandler,
request_data: dict,
general_settings: dict,
route: str,
prisma_client: Optional[PrismaClient],
user_api_key_cache: DualCache,
parent_otel_span: Optional[Span],
proxy_logging_obj: ProxyLogging,
) -> JWTAuthBuilderResult:
"""Main authentication and authorization builder"""
jwt_valid_token: dict = await jwt_handler.auth_jwt(token=api_key)
# Check custom validate
if jwt_handler.litellm_jwtauth.custom_validate:
if not jwt_handler.litellm_jwtauth.custom_validate(jwt_valid_token):
raise HTTPException(
status_code=403,
detail="Invalid JWT token",
)
# Check RBAC
rbac_role = jwt_handler.get_rbac_role(token=jwt_valid_token)
await JWTAuthManager.check_rbac_role(
jwt_handler,
jwt_valid_token,
general_settings,
request_data,
route,
rbac_role,
)
# Check Scope Based Access
scopes = jwt_handler.get_scopes(token=jwt_valid_token)
if (
jwt_handler.litellm_jwtauth.enforce_scope_based_access
and jwt_handler.litellm_jwtauth.scope_mappings
):
JWTAuthManager.check_scope_based_access(
scope_mappings=jwt_handler.litellm_jwtauth.scope_mappings,
scopes=scopes,
request_data=request_data,
general_settings=general_settings,
)
object_id = jwt_handler.get_object_id(token=jwt_valid_token, default_value=None)
# Get basic user info
scopes = jwt_handler.get_scopes(token=jwt_valid_token)
user_id, user_email, valid_user_email = await JWTAuthManager.get_user_info(
jwt_handler, jwt_valid_token
)
# Get IDs
org_id = jwt_handler.get_org_id(token=jwt_valid_token, default_value=None)
end_user_id = jwt_handler.get_end_user_id(
token=jwt_valid_token, default_value=None
)
team_id: Optional[str] = None
team_object: Optional[LiteLLM_TeamTable] = None
object_id = jwt_handler.get_object_id(token=jwt_valid_token, default_value=None)
if rbac_role and object_id:
if rbac_role == LitellmUserRoles.TEAM:
team_id = object_id
elif rbac_role == LitellmUserRoles.INTERNAL_USER:
user_id = object_id
# Check admin access
admin_result = await JWTAuthManager.check_admin_access(
jwt_handler, scopes, route, user_id, org_id, api_key
)
if admin_result:
return admin_result
# Get team with model access
## SPECIFIC TEAM ID
if not team_id:
(
team_id,
team_object,
) = await JWTAuthManager.find_and_validate_specific_team_id(
jwt_handler,
jwt_valid_token,
prisma_client,
user_api_key_cache,
parent_otel_span,
proxy_logging_obj,
)
if not team_object and not team_id:
## CHECK USER GROUP ACCESS
all_team_ids = JWTAuthManager.get_all_team_ids(jwt_handler, jwt_valid_token)
team_id, team_object = await JWTAuthManager.find_team_with_model_access(
team_ids=all_team_ids,
requested_model=request_data.get("model"),
route=route,
jwt_handler=jwt_handler,
prisma_client=prisma_client,
user_api_key_cache=user_api_key_cache,
parent_otel_span=parent_otel_span,
proxy_logging_obj=proxy_logging_obj,
)
# Get other objects
user_object, org_object, end_user_object = await JWTAuthManager.get_objects(
user_id=user_id,
user_email=user_email,
org_id=org_id,
end_user_id=end_user_id,
valid_user_email=valid_user_email,
jwt_handler=jwt_handler,
prisma_client=prisma_client,
user_api_key_cache=user_api_key_cache,
parent_otel_span=parent_otel_span,
proxy_logging_obj=proxy_logging_obj,
)
# Validate that a valid rbac id is returned for spend tracking
JWTAuthManager.validate_object_id(
user_id=user_id,
team_id=team_id,
enforce_rbac=general_settings.get("enforce_rbac", False),
is_proxy_admin=False,
)
return JWTAuthBuilderResult(
is_proxy_admin=False,
team_id=team_id,
team_object=team_object,
user_id=user_id,
user_object=user_object,
org_id=org_id,
org_object=org_object,
end_user_id=end_user_id,
end_user_object=end_user_object,
token=api_key,
)

Some files were not shown because too many files have changed in this diff Show more