diff --git a/litellm/proxy/auth/auth_utils.py b/litellm/proxy/auth/auth_utils.py index f9be71c35..d1e1b1709 100644 --- a/litellm/proxy/auth/auth_utils.py +++ b/litellm/proxy/auth/auth_utils.py @@ -80,6 +80,25 @@ def is_llm_api_route(route: str) -> bool: return False +def get_request_route(request: Request) -> str: + """ + Helper to get the route from the request + + remove base url from path if set e.g. `/genai/chat/completions` -> `/chat/completions + """ + try: + if request.url.path.startswith(request.base_url.path): + # remove base_url from path + return request.url.path[len(request.base_url.path) - 1 :] + else: + return request.url.path + except Exception as e: + verbose_proxy_logger.warning( + f"error on get_request_route: {str(e)}, defaulting to request.url.path" + ) + return request.url.path + + async def check_if_request_size_is_safe(request: Request) -> bool: """ Enterprise Only: diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index b8be22605..119249042 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -58,6 +58,7 @@ from litellm.proxy.auth.auth_checks import ( ) from litellm.proxy.auth.auth_utils import ( check_if_request_size_is_safe, + get_request_route, is_llm_api_route, route_in_additonal_public_routes, ) @@ -115,7 +116,7 @@ async def user_api_key_auth( ) try: - route: str = request.url.path + route: str = get_request_route(request=request) ### LiteLLM Enterprise Security Checks # Check 1. Check if request size is under max_request_size_mb diff --git a/litellm/tests/test_proxy_routes.py b/litellm/tests/test_proxy_routes.py index 03f112a5e..90fda07a1 100644 --- a/litellm/tests/test_proxy_routes.py +++ b/litellm/tests/test_proxy_routes.py @@ -16,10 +16,12 @@ import asyncio import logging import pytest +from fastapi import Request +from starlette.datastructures import URL, Headers, QueryParams import litellm from litellm.proxy._types import LiteLLMRoutes -from litellm.proxy.auth.auth_utils import is_llm_api_route +from litellm.proxy.auth.auth_utils import get_request_route, is_llm_api_route from litellm.proxy.proxy_server import app # Configure logging @@ -98,3 +100,52 @@ def test_is_llm_api_route_similar_but_false(route: str): def test_anthropic_api_routes(): # allow non proxy admins to call anthropic api routes assert is_llm_api_route(route="/v1/messages") is True + + +def create_request(path: str, base_url: str = "http://testserver") -> Request: + return Request( + { + "type": "http", + "method": "GET", + "scheme": "http", + "server": ("testserver", 80), + "path": path, + "query_string": b"", + "headers": Headers().raw, + "client": ("testclient", 50000), + "root_path": URL(base_url).path, + } + ) + + +def test_get_request_route_with_base_url(): + request = create_request( + path="/genai/chat/completions", base_url="http://testserver/genai" + ) + result = get_request_route(request) + assert result == "/chat/completions" + + +def test_get_request_route_without_base_url(): + request = create_request("/chat/completions") + result = get_request_route(request) + assert result == "/chat/completions" + + +def test_get_request_route_with_nested_path(): + request = create_request(path="/embeddings", base_url="http://testserver/ishaan") + result = get_request_route(request) + assert result == "/embeddings" + + +def test_get_request_route_with_query_params(): + request = create_request(path="/genai/test", base_url="http://testserver/genai") + request.scope["query_string"] = b"param=value" + result = get_request_route(request) + assert result == "/test" + + +def test_get_request_route_with_base_url_not_at_start(): + request = create_request("/api/genai/test") + result = get_request_route(request) + assert result == "/api/genai/test"