LiteLLM minor fixes + improvements (31/08/2024) (#5464)

* fix(vertex_endpoints.py): fix vertex ai pass through endpoints

* test(test_streaming.py): skip model due to end of life

* feat(custom_logger.py): add special callback for model hitting tpm/rpm limits

Closes https://github.com/BerriAI/litellm/issues/4096
This commit is contained in:
Krish Dholakia 2024-09-01 13:31:42 -07:00 committed by GitHub
parent 7778fa0146
commit e0d81434ed
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 174 additions and 13 deletions

View file

@ -59,6 +59,11 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
pass
#### Fallback Events - router/proxy only ####
async def log_model_group_rate_limit_error(
self, exception: Exception, original_model_group: Optional[str], kwargs: dict
):
pass
async def log_success_fallback_event(self, original_model_group: str, kwargs: dict):
pass