mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(fix) support all-models
alias on backend
This commit is contained in:
parent
d46f77fd58
commit
73ef4780f7
2 changed files with 45 additions and 0 deletions
|
@ -637,6 +637,12 @@ async def user_api_key_auth(
|
||||||
len(valid_token.models) == 0
|
len(valid_token.models) == 0
|
||||||
): # assume an empty model list means all models are allowed to be called
|
): # assume an empty model list means all models are allowed to be called
|
||||||
pass
|
pass
|
||||||
|
elif (
|
||||||
|
isinstance(valid_token.models, list)
|
||||||
|
and "all-models" in valid_token.models
|
||||||
|
):
|
||||||
|
# Admin UI - Special alias to allow `all_models`
|
||||||
|
pass
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
data = await request.json()
|
data = await request.json()
|
||||||
|
|
|
@ -49,6 +49,7 @@ from litellm.proxy.proxy_server import (
|
||||||
spend_key_fn,
|
spend_key_fn,
|
||||||
view_spend_logs,
|
view_spend_logs,
|
||||||
user_info,
|
user_info,
|
||||||
|
info_key_fn,
|
||||||
)
|
)
|
||||||
from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token
|
from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token
|
||||||
from litellm._logging import verbose_proxy_logger
|
from litellm._logging import verbose_proxy_logger
|
||||||
|
@ -245,6 +246,44 @@ def test_call_with_valid_model(prisma_client):
|
||||||
pytest.fail(f"An exception occurred - {str(e)}")
|
pytest.fail(f"An exception occurred - {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
def test_call_with_valid_model_using_all_models(prisma_client):
|
||||||
|
# Make a call to a key with model = `all-models` this is an Alias from LiteLLM Admin UI
|
||||||
|
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||||
|
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||||
|
try:
|
||||||
|
|
||||||
|
async def test():
|
||||||
|
await litellm.proxy.proxy_server.prisma_client.connect()
|
||||||
|
request = GenerateKeyRequest(models=["all-models"])
|
||||||
|
key = await generate_key_fn(data=request)
|
||||||
|
print(key)
|
||||||
|
|
||||||
|
generated_key = key.key
|
||||||
|
bearer_token = "Bearer " + generated_key
|
||||||
|
|
||||||
|
request = Request(scope={"type": "http"})
|
||||||
|
request._url = URL(url="/chat/completions")
|
||||||
|
|
||||||
|
async def return_body():
|
||||||
|
return b'{"model": "mistral"}'
|
||||||
|
|
||||||
|
request.body = return_body
|
||||||
|
|
||||||
|
# use generated key to auth in
|
||||||
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
print("result from user auth with new key", result)
|
||||||
|
|
||||||
|
# call /key/info for key - models == "all-models"
|
||||||
|
key_info = await info_key_fn(key=generated_key)
|
||||||
|
print("key_info", key_info)
|
||||||
|
models = key_info["info"]["models"]
|
||||||
|
assert models == ["all-models"]
|
||||||
|
|
||||||
|
asyncio.run(test())
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"An exception occurred - {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
def test_call_with_user_over_budget(prisma_client):
|
def test_call_with_user_over_budget(prisma_client):
|
||||||
# 5. Make a call with a key over budget, expect to fail
|
# 5. Make a call with a key over budget, expect to fail
|
||||||
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue