(feat) UI - Disable Usage Tab once SpendLogs is 1M+ Rows (#7208)

* use utils to set proxy spend logs row count

* store proxy state variables

* fix check for _has_user_setup_sso

* fix proxyStateVariables

* fix dup code

* rename getProxyUISettings

* add fixes

* ui emit num spend logs rows

* test_proxy_server_prisma_setup

* use MAX_SPENDLOG_ROWS_TO_QUERY to constants

* test_get_ui_settings_spend_logs_threshold
This commit is contained in:
Ishaan Jaff 2024-12-12 18:43:17 -08:00 committed by GitHub
parent ce69357e9d
commit b889d7c72f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
14 changed files with 230 additions and 41 deletions

View file

@ -164,6 +164,7 @@ from litellm.proxy.common_utils.load_config_utils import (
from litellm.proxy.common_utils.openai_endpoint_utils import (
remove_sensitive_info_from_deployment,
)
from litellm.proxy.common_utils.proxy_state import ProxyState
from litellm.proxy.common_utils.swagger_utils import ERROR_RESPONSES
from litellm.proxy.fine_tuning_endpoints.endpoints import router as fine_tuning_router
from litellm.proxy.fine_tuning_endpoints.endpoints import set_fine_tuning_config
@ -327,6 +328,7 @@ premium_user: bool = _license_check.is_premium()
global_max_parallel_request_retries_env: Optional[str] = os.getenv(
"LITELLM_GLOBAL_MAX_PARALLEL_REQUEST_RETRIES"
)
proxy_state = ProxyState()
if global_max_parallel_request_retries_env is None:
global_max_parallel_request_retries: int = 3
else:
@ -3047,6 +3049,10 @@ class ProxyStartupEvent:
prisma_client.check_view_exists()
) # check if all necessary views exist. Don't block execution
asyncio.create_task(
prisma_client._set_spend_logs_row_count_in_proxy_state()
) # set the spend logs row count in proxy state. Don't block execution
# run a health check to ensure the DB is ready
await prisma_client.health_check()
return prisma_client