diff --git a/litellm/proxy/auth/route_checks.py b/litellm/proxy/auth/route_checks.py index c75c1e66c..9496776a8 100644 --- a/litellm/proxy/auth/route_checks.py +++ b/litellm/proxy/auth/route_checks.py @@ -192,6 +192,10 @@ class RouteChecks: return True if "/langfuse/" in route: return True + if "/anthropic/" in route: + return True + if "/azure/" in route: + return True return False @staticmethod diff --git a/litellm/proxy/vertex_ai_endpoints/google_ai_studio_endpoints.py b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py similarity index 98% rename from litellm/proxy/vertex_ai_endpoints/google_ai_studio_endpoints.py rename to litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py index c4a64fa21..0834102b3 100644 --- a/litellm/proxy/vertex_ai_endpoints/google_ai_studio_endpoints.py +++ b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py @@ -2,10 +2,8 @@ What is this? Provider-specific Pass-Through Endpoints -""" -""" -1. Create pass-through endpoints for any LITELLM_BASE_URL/gemini/ map to https://generativelanguage.googleapis.com/ +Use litellm with Anthropic SDK, Vertex AI SDK, Cohere SDK, etc. """ import ast diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 1551330d1..9d7c120a7 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -203,6 +203,9 @@ from litellm.proxy.openai_files_endpoints.files_endpoints import ( router as openai_files_router, ) from litellm.proxy.openai_files_endpoints.files_endpoints import set_files_config +from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( + router as llm_passthrough_router, +) from litellm.proxy.pass_through_endpoints.pass_through_endpoints import ( initialize_pass_through_endpoints, ) @@ -233,9 +236,6 @@ from litellm.proxy.utils import ( reset_budget, update_spend, ) -from litellm.proxy.vertex_ai_endpoints.google_ai_studio_endpoints import ( - router as gemini_router, -) from litellm.proxy.vertex_ai_endpoints.langfuse_endpoints import ( router as langfuse_router, ) @@ -9128,7 +9128,7 @@ app.include_router(router) app.include_router(rerank_router) app.include_router(fine_tuning_router) app.include_router(vertex_router) -app.include_router(gemini_router) +app.include_router(llm_passthrough_router) app.include_router(langfuse_router) app.include_router(pass_through_router) app.include_router(health_router) diff --git a/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py b/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py index 001cc0640..a8bba211f 100644 --- a/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py +++ b/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py @@ -27,6 +27,9 @@ from fastapi import HTTPException, Request import pytest from litellm.proxy.auth.route_checks import RouteChecks from litellm.proxy._types import LiteLLM_UserTable, LitellmUserRoles, UserAPIKeyAuth +from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( + router as llm_passthrough_router, +) # Replace the actual hash_token function with our mock import litellm.proxy.auth.route_checks @@ -56,12 +59,21 @@ def test_is_llm_api_route(): assert RouteChecks.is_llm_api_route("/vertex-ai/text") is True assert RouteChecks.is_llm_api_route("/gemini/generate") is True assert RouteChecks.is_llm_api_route("/cohere/generate") is True + assert RouteChecks.is_llm_api_route("/anthropic/messages") is True + assert RouteChecks.is_llm_api_route("/anthropic/v1/messages") is True + assert RouteChecks.is_llm_api_route("/azure/endpoint") is True # check non-matching routes assert RouteChecks.is_llm_api_route("/some/random/route") is False assert RouteChecks.is_llm_api_route("/key/regenerate/82akk800000000jjsk") is False assert RouteChecks.is_llm_api_route("/key/82akk800000000jjsk/delete") is False + # check all routes in llm_passthrough_router, ensure they are considered llm api routes + for route in llm_passthrough_router.routes: + route_path = str(route.path) + print("route_path", route_path) + assert RouteChecks.is_llm_api_route(route_path) is True + # Test _route_matches_pattern def test_route_matches_pattern(): diff --git a/tests/proxy_unit_tests/test_proxy_server.py b/tests/proxy_unit_tests/test_proxy_server.py index b1c00ce75..d70962858 100644 --- a/tests/proxy_unit_tests/test_proxy_server.py +++ b/tests/proxy_unit_tests/test_proxy_server.py @@ -1794,7 +1794,7 @@ async def test_add_callback_via_key_litellm_pre_call_utils_langsmith( async def test_gemini_pass_through_endpoint(): from starlette.datastructures import URL - from litellm.proxy.vertex_ai_endpoints.google_ai_studio_endpoints import ( + from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( Request, Response, gemini_proxy_route,