Merge pull request #5050 from BerriAI/litellm_use_base_path

[Proxy-Fix] Requests that are incorrectly flagged as admin-only paths
This commit is contained in:
Ishaan Jaff 2024-08-05 11:04:39 -07:00 committed by GitHub
commit bf28b893d8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 73 additions and 2 deletions

View file

@ -80,6 +80,25 @@ def is_llm_api_route(route: str) -> bool:
return False return False
def get_request_route(request: Request) -> str:
"""
Helper to get the route from the request
remove base url from path if set e.g. `/genai/chat/completions` -> `/chat/completions
"""
try:
if request.url.path.startswith(request.base_url.path):
# remove base_url from path
return request.url.path[len(request.base_url.path) - 1 :]
else:
return request.url.path
except Exception as e:
verbose_proxy_logger.warning(
f"error on get_request_route: {str(e)}, defaulting to request.url.path"
)
return request.url.path
async def check_if_request_size_is_safe(request: Request) -> bool: async def check_if_request_size_is_safe(request: Request) -> bool:
""" """
Enterprise Only: Enterprise Only:

View file

@ -58,6 +58,7 @@ from litellm.proxy.auth.auth_checks import (
) )
from litellm.proxy.auth.auth_utils import ( from litellm.proxy.auth.auth_utils import (
check_if_request_size_is_safe, check_if_request_size_is_safe,
get_request_route,
is_llm_api_route, is_llm_api_route,
route_in_additonal_public_routes, route_in_additonal_public_routes,
) )
@ -115,7 +116,7 @@ async def user_api_key_auth(
) )
try: try:
route: str = request.url.path route: str = get_request_route(request=request)
### LiteLLM Enterprise Security Checks ### LiteLLM Enterprise Security Checks
# Check 1. Check if request size is under max_request_size_mb # Check 1. Check if request size is under max_request_size_mb

View file

@ -16,10 +16,12 @@ import asyncio
import logging import logging
import pytest import pytest
from fastapi import Request
from starlette.datastructures import URL, Headers, QueryParams
import litellm import litellm
from litellm.proxy._types import LiteLLMRoutes from litellm.proxy._types import LiteLLMRoutes
from litellm.proxy.auth.auth_utils import is_llm_api_route from litellm.proxy.auth.auth_utils import get_request_route, is_llm_api_route
from litellm.proxy.proxy_server import app from litellm.proxy.proxy_server import app
# Configure logging # Configure logging
@ -98,3 +100,52 @@ def test_is_llm_api_route_similar_but_false(route: str):
def test_anthropic_api_routes(): def test_anthropic_api_routes():
# allow non proxy admins to call anthropic api routes # allow non proxy admins to call anthropic api routes
assert is_llm_api_route(route="/v1/messages") is True assert is_llm_api_route(route="/v1/messages") is True
def create_request(path: str, base_url: str = "http://testserver") -> Request:
return Request(
{
"type": "http",
"method": "GET",
"scheme": "http",
"server": ("testserver", 80),
"path": path,
"query_string": b"",
"headers": Headers().raw,
"client": ("testclient", 50000),
"root_path": URL(base_url).path,
}
)
def test_get_request_route_with_base_url():
request = create_request(
path="/genai/chat/completions", base_url="http://testserver/genai"
)
result = get_request_route(request)
assert result == "/chat/completions"
def test_get_request_route_without_base_url():
request = create_request("/chat/completions")
result = get_request_route(request)
assert result == "/chat/completions"
def test_get_request_route_with_nested_path():
request = create_request(path="/embeddings", base_url="http://testserver/ishaan")
result = get_request_route(request)
assert result == "/embeddings"
def test_get_request_route_with_query_params():
request = create_request(path="/genai/test", base_url="http://testserver/genai")
request.scope["query_string"] = b"param=value"
result = get_request_route(request)
assert result == "/test"
def test_get_request_route_with_base_url_not_at_start():
request = create_request("/api/genai/test")
result = get_request_route(request)
assert result == "/api/genai/test"