diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 9022558c43..e6842a416d 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -2064,7 +2064,9 @@ "input_cost_per_token": 0.00000059, "output_cost_per_token": 0.00000079, "litellm_provider": "groq", - "mode": "chat" + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama-3.3-70b-specdec": { "max_tokens": 8192, diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index b3969844a7..d493b3c7ba 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -12,4 +12,6 @@ model_list: num_retries: 0 litellm_settings: - callbacks: ["langsmith"] \ No newline at end of file + callbacks: ["langsmith"] + default_internal_user_params: + available_teams: ["litellm_dashboard_54a81fa9-9c69-45e8-b256-0c36bf104e5f", "a29a2dc6-1347-4ebc-a428-e6b56bbba611", "test-group-12"] \ No newline at end of file diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py index e68d92cee6..8ac0bc019a 100644 --- a/litellm/proxy/_types.py +++ b/litellm/proxy/_types.py @@ -253,6 +253,7 @@ class LiteLLMRoutes(enum.Enum): "/key/health", "/team/info", "/team/list", + "/team/available", "/user/info", "/model/info", "/v2/model/info", @@ -284,6 +285,7 @@ class LiteLLMRoutes(enum.Enum): "/team/info", "/team/block", "/team/unblock", + "/team/available", # model "/model/new", "/model/update", @@ -1563,6 +1565,7 @@ class LiteLLM_UserTable(LiteLLMPydanticObjectBase): rpm_limit: Optional[int] = None user_role: Optional[str] = None organization_memberships: Optional[List[LiteLLM_OrganizationMembershipTable]] = None + teams: List[str] = [] @model_validator(mode="before") @classmethod @@ -1571,6 +1574,8 @@ class LiteLLM_UserTable(LiteLLMPydanticObjectBase): values.update({"spend": 0.0}) if values.get("models") is None: values.update({"models": []}) + if values.get("teams") is None: + values.update({"teams": []}) return values model_config = ConfigDict(protected_namespaces=()) diff --git a/litellm/proxy/management_endpoints/internal_user_endpoints.py b/litellm/proxy/management_endpoints/internal_user_endpoints.py index e4a1740ba5..e20f71e64c 100644 --- a/litellm/proxy/management_endpoints/internal_user_endpoints.py +++ b/litellm/proxy/management_endpoints/internal_user_endpoints.py @@ -49,7 +49,9 @@ def _update_internal_new_user_params(data_json: dict, data: NewUserRequest) -> d is_internal_user = True if litellm.default_internal_user_params: for key, value in litellm.default_internal_user_params.items(): - if key not in data_json or data_json[key] is None: + if key == "available_teams": + continue + elif key not in data_json or data_json[key] is None: data_json[key] = value elif ( key == "models" diff --git a/litellm/proxy/management_endpoints/team_endpoints.py b/litellm/proxy/management_endpoints/team_endpoints.py index 7ead5ca0d4..c20bbcb959 100644 --- a/litellm/proxy/management_endpoints/team_endpoints.py +++ b/litellm/proxy/management_endpoints/team_endpoints.py @@ -73,6 +73,14 @@ def _is_user_team_admin( return False +def _is_available_team(team_id: str, user_api_key_dict: UserAPIKeyAuth) -> bool: + if litellm.default_internal_user_params is None: + return False + if "available_teams" in litellm.default_internal_user_params: + return team_id in litellm.default_internal_user_params["available_teams"] + return False + + async def get_all_team_memberships( prisma_client: PrismaClient, team_id: List[str], user_id: Optional[str] = None ) -> List[LiteLLM_TeamMembership]: @@ -656,6 +664,10 @@ async def team_member_add( and not _is_user_team_admin( user_api_key_dict=user_api_key_dict, team_obj=complete_team_data ) + and not _is_available_team( + team_id=complete_team_data.team_id, + user_api_key_dict=user_api_key_dict, + ) ): raise HTTPException( status_code=403, @@ -1363,6 +1375,62 @@ async def unblock_team( return record +@router.get("/team/available") +async def list_available_teams( + http_request: Request, + user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), + response_model=List[LiteLLM_TeamTable], +): + from litellm.proxy.proxy_server import prisma_client + + if prisma_client is None: + raise HTTPException( + status_code=400, + detail={"error": CommonProxyErrors.db_not_connected_error.value}, + ) + + available_teams = cast( + Optional[List[str]], + ( + litellm.default_internal_user_params.get("available_teams") + if litellm.default_internal_user_params is not None + else None + ), + ) + if available_teams is None: + raise HTTPException( + status_code=400, + detail={ + "error": "No available teams for user to join. See how to set available teams here: https://docs.litellm.ai/docs/proxy/self_serve#all-settings-for-self-serve--sso-flow" + }, + ) + + # filter out teams that the user is already a member of + user_info = await prisma_client.db.litellm_usertable.find_unique( + where={"user_id": user_api_key_dict.user_id} + ) + if user_info is None: + raise HTTPException( + status_code=404, + detail={"error": "User not found"}, + ) + user_info_correct_type = LiteLLM_UserTable(**user_info.model_dump()) + + available_teams = [ + team for team in available_teams if team not in user_info_correct_type.teams + ] + + available_teams_db = await prisma_client.db.litellm_teamtable.find_many( + where={"team_id": {"in": available_teams}} + ) + + available_teams_correct_type = [ + LiteLLM_TeamTable(**team.model_dump()) for team in available_teams_db + ] + + return available_teams_correct_type + + @router.get( "/team/list", tags=["team management"], dependencies=[Depends(user_api_key_auth)] ) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 9022558c43..e6842a416d 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -2064,7 +2064,9 @@ "input_cost_per_token": 0.00000059, "output_cost_per_token": 0.00000079, "litellm_provider": "groq", - "mode": "chat" + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true }, "groq/llama-3.3-70b-specdec": { "max_tokens": 8192, diff --git a/ui/litellm-dashboard/src/components/navbar.tsx b/ui/litellm-dashboard/src/components/navbar.tsx index c4dd20fbd0..e0c68ee702 100644 --- a/ui/litellm-dashboard/src/components/navbar.tsx +++ b/ui/litellm-dashboard/src/components/navbar.tsx @@ -112,7 +112,7 @@ const Navbar: React.FC = ({