diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 42bddca955..28f8acd21c 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -4453,6 +4453,42 @@ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", "supports_tool_choice": true }, + "gemini-2.5-pro-exp-03-25": { + "max_tokens": 65536, + "max_input_tokens": 1048576, + "max_output_tokens": 65536, + "max_images_per_prompt": 3000, + "max_videos_per_prompt": 10, + "max_video_length": 1, + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_pdf_size_mb": 30, + "input_cost_per_image": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_token": 0, + "input_cost_per_character": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_character": 0, + "output_cost_per_token_above_128k_tokens": 0, + "output_cost_per_character_above_128k_tokens": 0, + "litellm_provider": "vertex_ai-language-models", + "mode": "chat", + "supports_system_messages": true, + "supports_function_calling": true, + "supports_vision": true, + "supports_audio_input": true, + "supports_video_input": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, "gemini-2.0-pro-exp-02-05": { "max_tokens": 8192, "max_input_tokens": 2097152, diff --git a/litellm/proxy/auth/service_account_checks.py b/litellm/proxy/auth/service_account_checks.py deleted file mode 100644 index 87d7d66854..0000000000 --- a/litellm/proxy/auth/service_account_checks.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -Checks for LiteLLM service account keys - -""" - -from litellm.proxy._types import ProxyErrorTypes, ProxyException, UserAPIKeyAuth - - -def check_if_token_is_service_account(valid_token: UserAPIKeyAuth) -> bool: - """ - Checks if the token is a service account - - Returns: - bool: True if token is a service account - - """ - if valid_token.metadata: - if "service_account_id" in valid_token.metadata: - return True - return False - - -async def service_account_checks( - valid_token: UserAPIKeyAuth, request_data: dict -) -> bool: - """ - If a virtual key is a service account, checks it's a valid service account - - A token is a service account if it has a service_account_id in its metadata - - Service Account Specific Checks: - - Check if required_params is set - """ - - if check_if_token_is_service_account(valid_token) is not True: - return True - - from litellm.proxy.proxy_server import general_settings - - if "service_account_settings" in general_settings: - service_account_settings = general_settings["service_account_settings"] - if "enforced_params" in service_account_settings: - _enforced_params = service_account_settings["enforced_params"] - for param in _enforced_params: - if param not in request_data: - raise ProxyException( - type=ProxyErrorTypes.bad_request_error.value, - code=400, - param=param, - message=f"BadRequest please pass param={param} in request body. This is a required param for service account", - ) - - return True diff --git a/litellm/proxy/litellm_pre_call_utils.py b/litellm/proxy/litellm_pre_call_utils.py index 6427be5a6e..dade6c933e 100644 --- a/litellm/proxy/litellm_pre_call_utils.py +++ b/litellm/proxy/litellm_pre_call_utils.py @@ -747,7 +747,10 @@ def _get_enforced_params( enforced_params: Optional[list] = None if general_settings is not None: enforced_params = general_settings.get("enforced_params") - if "service_account_settings" in general_settings: + if ( + "service_account_settings" in general_settings + and check_if_token_is_service_account(user_api_key_dict) is True + ): service_account_settings = general_settings["service_account_settings"] if "enforced_params" in service_account_settings: if enforced_params is None: @@ -760,6 +763,20 @@ def _get_enforced_params( return enforced_params +def check_if_token_is_service_account(valid_token: UserAPIKeyAuth) -> bool: + """ + Checks if the token is a service account + + Returns: + bool: True if token is a service account + + """ + if valid_token.metadata: + if "service_account_id" in valid_token.metadata: + return True + return False + + def _enforced_params_check( request_body: dict, general_settings: Optional[dict], diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index c0107d7ea6..9cdd1e7236 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -4,12 +4,6 @@ model_list: model: openai/gpt-4o api_key: sk-xxxxxxx -mcp_servers: - { - "zapier_mcp": { - "url": "https://actions.zapier.com/mcp/sk-akxxxxx/sse" - }, - "fetch": { - "url": "http://localhost:8000/sse" - } - } +general_settings: + service_account_settings: + enforced_params: ["user"] # this means the "user" param is enforced for all requests made through any service account keys \ No newline at end of file