mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
LiteLLM minor fixes + improvements (31/08/2024) (#5464)
* fix(vertex_endpoints.py): fix vertex ai pass through endpoints * test(test_streaming.py): skip model due to end of life * feat(custom_logger.py): add special callback for model hitting tpm/rpm limits Closes https://github.com/BerriAI/litellm/issues/4096
This commit is contained in:
parent
1c9a82771a
commit
ca4e746545
8 changed files with 174 additions and 13 deletions
|
@ -59,6 +59,11 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
|
|||
pass
|
||||
|
||||
#### Fallback Events - router/proxy only ####
|
||||
async def log_model_group_rate_limit_error(
|
||||
self, exception: Exception, original_model_group: Optional[str], kwargs: dict
|
||||
):
|
||||
pass
|
||||
|
||||
async def log_success_fallback_event(self, original_model_group: str, kwargs: dict):
|
||||
pass
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue