diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py index 1b5faf3f9c..68663ec01c 100644 --- a/litellm/proxy/_types.py +++ b/litellm/proxy/_types.py @@ -1714,6 +1714,7 @@ class WebhookEvent(CallInfo): class SpecialModelNames(enum.Enum): all_team_models = "all-team-models" all_proxy_models = "all-proxy-models" + no_default_models = "no-default-models" class InvitationNew(LiteLLMPydanticObjectBase): diff --git a/litellm/proxy/auth/auth_checks.py b/litellm/proxy/auth/auth_checks.py index 0590bcb50a..183b5609d0 100644 --- a/litellm/proxy/auth/auth_checks.py +++ b/litellm/proxy/auth/auth_checks.py @@ -38,6 +38,7 @@ from litellm.proxy._types import ( ProxyErrorTypes, ProxyException, RoleBasedPermissions, + SpecialModelNames, UserAPIKeyAuth, ) from litellm.proxy.auth.route_checks import RouteChecks @@ -1083,6 +1084,14 @@ async def can_user_call_model( if user_object is None: return True + if SpecialModelNames.no_default_models.value in user_object.models: + raise ProxyException( + message=f"User not allowed to access model. No default model access, only team models allowed. Tried to access {model}", + type=ProxyErrorTypes.key_model_access_denied, + param="model", + code=status.HTTP_401_UNAUTHORIZED, + ) + return await _can_object_call_model( model=model, llm_router=llm_router, diff --git a/litellm/proxy/management_endpoints/internal_user_endpoints.py b/litellm/proxy/management_endpoints/internal_user_endpoints.py index a414f48847..36c12ada3f 100644 --- a/litellm/proxy/management_endpoints/internal_user_endpoints.py +++ b/litellm/proxy/management_endpoints/internal_user_endpoints.py @@ -127,7 +127,7 @@ async def new_user( - user_role: Optional[str] - Specify a user role - "proxy_admin", "proxy_admin_viewer", "internal_user", "internal_user_viewer", "team", "customer". Info about each role here: `https://github.com/BerriAI/litellm/litellm/proxy/_types.py#L20` - max_budget: Optional[float] - Specify max budget for a given user. - budget_duration: Optional[str] - Budget is reset at the end of specified duration. If not set, budget is never reset. You can set duration as seconds ("30s"), minutes ("30m"), hours ("30h"), days ("30d"), months ("1mo"). - - models: Optional[list] - Model_name's a user is allowed to call. (if empty, key is allowed to call all models) + - models: Optional[list] - Model_name's a user is allowed to call. (if empty, key is allowed to call all models). Set to ['no-default-models'] to block all model access. Restricting user to only team-based model access. - tpm_limit: Optional[int] - Specify tpm limit for a given user (Tokens per minute) - rpm_limit: Optional[int] - Specify rpm limit for a given user (Requests per minute) - auto_create_key: bool - Default=True. Flag used for returning a key as part of the /user/new response diff --git a/tests/proxy_unit_tests/test_auth_checks.py b/tests/proxy_unit_tests/test_auth_checks.py index 0a8ebbe018..8e9618297e 100644 --- a/tests/proxy_unit_tests/test_auth_checks.py +++ b/tests/proxy_unit_tests/test_auth_checks.py @@ -550,6 +550,30 @@ async def test_can_user_call_model(): await can_user_call_model(**args) +@pytest.mark.asyncio +async def test_can_user_call_model_with_no_default_models(): + from litellm.proxy.auth.auth_checks import can_user_call_model + from litellm.proxy._types import ProxyException, SpecialModelNames + from unittest.mock import MagicMock + + args = { + "model": "anthropic-claude", + "llm_router": MagicMock(), + "user_object": LiteLLM_UserTable( + user_id="testuser21@mycompany.com", + max_budget=None, + spend=0.0042295, + model_max_budget={}, + model_spend={}, + user_email="testuser@mycompany.com", + models=[SpecialModelNames.no_default_models.value], + ), + } + + with pytest.raises(ProxyException) as e: + await can_user_call_model(**args) + + @pytest.mark.asyncio async def test_get_fuzzy_user_object(): from litellm.proxy.auth.auth_checks import _get_fuzzy_user_object diff --git a/ui/litellm-dashboard/src/components/chat_ui.tsx b/ui/litellm-dashboard/src/components/chat_ui.tsx index db4499f9ff..c505a954b8 100644 --- a/ui/litellm-dashboard/src/components/chat_ui.tsx +++ b/ui/litellm-dashboard/src/components/chat_ui.tsx @@ -93,20 +93,27 @@ const ChatUI: React.FC = ({ const [selectedModel, setSelectedModel] = useState( undefined ); + const [showCustomModelInput, setShowCustomModelInput] = useState(false); const [modelInfo, setModelInfo] = useState([]); + const customModelTimeout = useRef(null); const chatEndRef = useRef(null); useEffect(() => { - if (!accessToken || !token || !userRole || !userID) { + let useApiKey = apiKeySource === 'session' ? accessToken : apiKey; + console.log("useApiKey:", useApiKey); + if (!useApiKey || !token || !userRole || !userID) { + console.log("useApiKey or token or userRole or userID is missing = ", useApiKey, token, userRole, userID); return; } + + // Fetch model info and set the default selected model const fetchModelInfo = async () => { try { const fetchedAvailableModels = await modelAvailableCall( - accessToken, + useApiKey ?? '', // Use empty string if useApiKey is null, userID, userRole ); @@ -139,7 +146,7 @@ const ChatUI: React.FC = ({ }; fetchModelInfo(); - }, [accessToken, userID, userRole]); + }, [accessToken, userID, userRole, apiKeySource, apiKey]); useEffect(() => { @@ -234,6 +241,7 @@ const ChatUI: React.FC = ({ const onChange = (value: string) => { console.log(`selected ${value}`); setSelectedModel(value); + setShowCustomModelInput(value === 'custom'); }; return ( @@ -276,10 +284,29 @@ const ChatUI: React.FC = ({ {teams ? ( teams.map((team: any) => ( @@ -307,6 +314,39 @@ const Createuser: React.FC = ({ + + + Personal Key Creation + + + + Models{' '} + + + + + } name="models" help="Models user has access to, outside of team scope."> + + + All Proxy Models + + {userModels.map((model) => ( + + {getModelDisplayName(model)} + + ))} + + + +