mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
[Feat-Proxy] add service accounts backend (#5852)
* service_account_settings on config * add service account checks * call service_account_checks * add testing for service accounts
This commit is contained in:
parent
41ee0efb1a
commit
d33a33d283
4 changed files with 119 additions and 2 deletions
|
@ -3249,3 +3249,61 @@ async def test_auth_vertex_ai_route(prisma_client):
|
|||
assert "Invalid proxy server token passed" in error_str
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_service_accounts(prisma_client):
|
||||
"""
|
||||
Do not delete
|
||||
this is the Admin UI flow
|
||||
"""
|
||||
# Make a call to a key with model = `all-proxy-models` this is an Alias from LiteLLM Admin UI
|
||||
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||
setattr(
|
||||
litellm.proxy.proxy_server,
|
||||
"general_settings",
|
||||
{"service_account_settings": {"enforced_params": ["user"]}},
|
||||
)
|
||||
|
||||
await litellm.proxy.proxy_server.prisma_client.connect()
|
||||
|
||||
request = GenerateKeyRequest(
|
||||
metadata={"service_account_id": f"prod-service-{uuid.uuid4()}"},
|
||||
)
|
||||
response = await generate_key_fn(
|
||||
data=request,
|
||||
)
|
||||
|
||||
print("key generated=", response)
|
||||
generated_key = response.key
|
||||
bearer_token = "Bearer " + generated_key
|
||||
# make a bad /chat/completions call expect it to fail
|
||||
|
||||
request = Request(scope={"type": "http"})
|
||||
request._url = URL(url="/chat/completions")
|
||||
|
||||
async def return_body():
|
||||
return b'{"model": "gemini-pro-vision"}'
|
||||
|
||||
request.body = return_body
|
||||
|
||||
# use generated key to auth in
|
||||
print("Bearer token being sent to user_api_key_auth() - {}".format(bearer_token))
|
||||
try:
|
||||
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||
pytest.fail("Expected this call to fail. Bad request using service account")
|
||||
except Exception as e:
|
||||
print("error str=", str(e.message))
|
||||
assert "This is a required param for service account" in str(e.message)
|
||||
|
||||
# make a good /chat/completions call it should pass
|
||||
async def good_return_body():
|
||||
return b'{"model": "gemini-pro-vision", "user": "foo"}'
|
||||
|
||||
request.body = good_return_body
|
||||
|
||||
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||
print("response from user_api_key_auth", result)
|
||||
|
||||
setattr(litellm.proxy.proxy_server, "general_settings", {})
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue