diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py index b1af153e81..204405cbfd 100644 --- a/litellm/proxy/_types.py +++ b/litellm/proxy/_types.py @@ -77,6 +77,16 @@ class LiteLLM_UpperboundKeyGenerateParams(LiteLLMBase): class LiteLLMRoutes(enum.Enum): + openai_route_names: List = [ + "chat_completion", + "completion", + "embeddings", + "image_generation", + "audio_transcriptions", + "moderations", + "model_info_v1", + "model_info_v2", + ] openai_routes: List = [ # chat completions "/openai/deployments/{model}/chat/completions", diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index cc5327b758..edfe69fa6e 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -1073,6 +1073,8 @@ async def user_api_key_auth( if not _is_user_proxy_admin(user_id_information): # if non-admin if route in LiteLLMRoutes.openai_routes.value: pass + elif request['route'].name in LiteLLMRoutes.openai_route_names.value: + pass elif ( route in LiteLLMRoutes.info_routes.value ): # check if user allowed to call an info route