fix(utils.py): handle failed hf tokenizer request during calls (#8032)

* fix(utils.py): handle failed hf tokenizer request during calls

prevents proxy from failing due to bad hf tokenizer calls

* fix(utils.py): convert failure callback str to custom logger class

Fixes https://github.com/BerriAI/litellm/issues/8013

* test(test_utils.py): fix test - avoid adding mlflow dep on ci/cd

* fix: add missing env vars to test

* test: cleanup redundant test
This commit is contained in:
Krish Dholakia 2025-01-28 17:20:36 -08:00 committed by GitHub
parent 312d155930
commit 4929521bbd
3 changed files with 136 additions and 23 deletions

View file

@ -1529,6 +1529,34 @@ def test_add_custom_logger_callback_to_specific_event_e2e(monkeypatch):
assert len(litellm.failure_callback) == curr_len_failure_callback
def test_add_custom_logger_callback_to_specific_event_e2e_failure(monkeypatch):
from litellm.integrations.openmeter import OpenMeterLogger
monkeypatch.setattr(litellm, "success_callback", [])
monkeypatch.setattr(litellm, "failure_callback", [])
monkeypatch.setattr(litellm, "callbacks", [])
monkeypatch.setenv("OPENMETER_API_KEY", "wedlwe")
monkeypatch.setenv("OPENMETER_API_URL", "https://openmeter.dev")
litellm.failure_callback = ["openmeter"]
curr_len_success_callback = len(litellm.success_callback)
curr_len_failure_callback = len(litellm.failure_callback)
litellm.completion(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "Hello, world!"}],
mock_response="Testing langfuse",
)
assert len(litellm.success_callback) == curr_len_success_callback
assert len(litellm.failure_callback) == curr_len_failure_callback
assert any(
isinstance(callback, OpenMeterLogger) for callback in litellm.failure_callback
)
@pytest.mark.asyncio
async def test_wrapper_kwargs_passthrough():
from litellm.utils import client