fix re-add virtual key auth checks on vertex ai pass thru endpoints (#5827)

This commit is contained in:
Ishaan Jaff 2024-09-21 17:34:10 -07:00 committed by GitHub
parent e4f309d0e7
commit 16b0d38c11
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 49 additions and 3 deletions

View file

@ -792,7 +792,8 @@ general_settings:
"alerting": [ "alerting": [
"string" "string"
], ],
"alerting_threshold": 0 "alerting_threshold": 0,
"use_client_credentials_pass_through_routes" : "boolean", # use client credentials for all pass through routes like "/vertex-ai", /bedrock/. When this is True Virtual Key auth will not be applied on these endpoints" https://docs.litellm.ai/docs/pass_through/vertex_ai
} }
} }
``` ```

View file

@ -354,9 +354,26 @@ def is_pass_through_provider_route(route: str) -> bool:
def should_run_auth_on_pass_through_provider_route(route: str) -> bool: def should_run_auth_on_pass_through_provider_route(route: str) -> bool:
""" """
Use this to decide if the rest of the LiteLLM Virtual Key auth checks should run on /vertex-ai/{endpoint} routes Use this to decide if the rest of the LiteLLM Virtual Key auth checks should run on /vertex-ai/{endpoint} routes
Use this to decide if the rest of the LiteLLM Virtual Key auth checks should run on provider pass through routes
ex /vertex-ai/{endpoint} routes
Run virtual key auth if the following is try:
- User is premium_user
- User has enabled litellm_setting.use_client_credentials_pass_through_routes
""" """
# by default we do not run virtual key auth checks on /vertex-ai/{endpoint} routes from litellm.proxy.proxy_server import general_settings, premium_user
return False
if premium_user is not True:
return False
# premium use has opted into using client credentials
if (
general_settings.get("use_client_credentials_pass_through_routes", False)
is True
):
return False
# only enabled for LiteLLM Enterprise
return True
def _has_user_setup_sso(): def _has_user_setup_sso():

View file

@ -3221,3 +3221,31 @@ async def test_key_list_unsupported_params(prisma_client):
error_str = str(e.message) error_str = str(e.message)
assert "Unsupported parameter" in error_str assert "Unsupported parameter" in error_str
pass pass
@pytest.mark.asyncio
async def test_auth_vertex_ai_route(prisma_client):
"""
If user is premium user and vertex-ai route is used. Assert Virtual Key checks are run
"""
litellm.set_verbose = True
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
setattr(litellm.proxy.proxy_server, "premium_user", True)
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
await litellm.proxy.proxy_server.prisma_client.connect()
route = "/vertex-ai/publishers/google/models/gemini-1.5-flash-001:generateContent"
request = Request(scope={"type": "http"})
request._url = URL(url=route)
request._headers = {"Authorization": "Bearer sk-12345"}
try:
await user_api_key_auth(request=request, api_key="Bearer " + "sk-12345")
pytest.fail("Expected this call to fail. User is over limit.")
except Exception as e:
print(vars(e))
print("error str=", str(e.message))
error_str = str(e.message)
assert e.code == "401"
assert "Invalid proxy server token passed" in error_str
pass