forked from phoenix/litellm-mirror
LiteLLM Minor Fixes & Improvement (11/14/2024) (#6730)
* fix(ollama.py): fix get model info request Fixes https://github.com/BerriAI/litellm/issues/6703 * feat(anthropic/chat/transformation.py): support passing user id to anthropic via openai 'user' param * docs(anthropic.md): document all supported openai params for anthropic * test: fix tests * fix: fix tests * feat(jina_ai/): add rerank support Closes https://github.com/BerriAI/litellm/issues/6691 * test: handle service unavailable error * fix(handler.py): refactor together ai rerank call * test: update test to handle overloaded error * test: fix test * Litellm router trace (#6742) * feat(router.py): add trace_id to parent functions - allows tracking retry/fallbacks * feat(router.py): log trace id across retry/fallback logic allows grouping llm logs for the same request * test: fix tests * fix: fix test * fix(transformation.py): only set non-none stop_sequences * Litellm router disable fallbacks (#6743) * bump: version 1.52.6 → 1.52.7 * feat(router.py): enable dynamically disabling fallbacks Allows for enabling/disabling fallbacks per key * feat(litellm_pre_call_utils.py): support setting 'disable_fallbacks' on litellm key * test: fix test * fix(exception_mapping_utils.py): map 'model is overloaded' to internal server error * test: handle gemini error * test: fix test * fix: new run
This commit is contained in:
parent
f8e700064e
commit
e9aa492af3
35 changed files with 853 additions and 246 deletions
|
@ -1624,3 +1624,55 @@ async def test_standard_logging_payload_stream_usage(sync_mode):
|
|||
print(f"standard_logging_object usage: {built_response.usage}")
|
||||
except litellm.InternalServerError:
|
||||
pass
|
||||
|
||||
|
||||
def test_standard_logging_retries():
|
||||
"""
|
||||
know if a request was retried.
|
||||
"""
|
||||
from litellm.types.utils import StandardLoggingPayload
|
||||
from litellm.router import Router
|
||||
|
||||
customHandler = CompletionCustomHandler()
|
||||
litellm.callbacks = [customHandler]
|
||||
|
||||
router = Router(
|
||||
model_list=[
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "openai/gpt-3.5-turbo",
|
||||
"api_key": "test-api-key",
|
||||
},
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
customHandler, "log_failure_event", new=MagicMock()
|
||||
) as mock_client:
|
||||
try:
|
||||
router.completion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "Hey, how's it going?"}],
|
||||
num_retries=1,
|
||||
mock_response="litellm.RateLimitError",
|
||||
)
|
||||
except litellm.RateLimitError:
|
||||
pass
|
||||
|
||||
assert mock_client.call_count == 2
|
||||
assert (
|
||||
mock_client.call_args_list[0].kwargs["kwargs"]["standard_logging_object"][
|
||||
"trace_id"
|
||||
]
|
||||
is not None
|
||||
)
|
||||
assert (
|
||||
mock_client.call_args_list[0].kwargs["kwargs"]["standard_logging_object"][
|
||||
"trace_id"
|
||||
]
|
||||
== mock_client.call_args_list[1].kwargs["kwargs"][
|
||||
"standard_logging_object"
|
||||
]["trace_id"]
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue