mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Allow editing model api key + provider on UI (#8406)
* fix(parallel_request_limiter.py): add back parallel request information to max parallel request limiter Resolves https://github.com/BerriAI/litellm/issues/8392 * test: mark flaky test to handle time based tracking issues * feat(model_management_endpoints.py): expose new patch `/model/{model_id}/update` endpoint Allows updating specific values of a model in db - makes it easy for admin to know this by calling it a PA TCH * feat(edit_model_modal.tsx): allow user to update llm provider + api key on the ui * fix: fix linting error
This commit is contained in:
parent
0d2e723e95
commit
e4411e4815
7 changed files with 285 additions and 11 deletions
|
@ -93,7 +93,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
|
|||
else:
|
||||
raise HTTPException(
|
||||
status_code=429,
|
||||
detail=f"LiteLLM Rate Limit Handler for rate limit type = {rate_limit_type}. Crossed TPM, RPM Limit. current rpm: {current['current_rpm']}, rpm limit: {rpm_limit}, current tpm: {current['current_tpm']}, tpm limit: {tpm_limit}",
|
||||
detail=f"LiteLLM Rate Limit Handler for rate limit type = {rate_limit_type}. Crossed TPM / RPM / Max Parallel Request Limit. current rpm: {current['current_rpm']}, rpm limit: {rpm_limit}, current tpm: {current['current_tpm']}, tpm limit: {tpm_limit}, current max_parallel_requests: {current['current_requests']}, max_parallel_requests: {max_parallel_requests}",
|
||||
headers={"retry-after": str(self.time_to_next_minute())},
|
||||
)
|
||||
return new_val
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue