forked from phoenix/litellm-mirror
LiteLLM Minor Fixes & Improvement (11/14/2024) (#6730)
* fix(ollama.py): fix get model info request Fixes https://github.com/BerriAI/litellm/issues/6703 * feat(anthropic/chat/transformation.py): support passing user id to anthropic via openai 'user' param * docs(anthropic.md): document all supported openai params for anthropic * test: fix tests * fix: fix tests * feat(jina_ai/): add rerank support Closes https://github.com/BerriAI/litellm/issues/6691 * test: handle service unavailable error * fix(handler.py): refactor together ai rerank call * test: update test to handle overloaded error * test: fix test * Litellm router trace (#6742) * feat(router.py): add trace_id to parent functions - allows tracking retry/fallbacks * feat(router.py): log trace id across retry/fallback logic allows grouping llm logs for the same request * test: fix tests * fix: fix test * fix(transformation.py): only set non-none stop_sequences * Litellm router disable fallbacks (#6743) * bump: version 1.52.6 → 1.52.7 * feat(router.py): enable dynamically disabling fallbacks Allows for enabling/disabling fallbacks per key * feat(litellm_pre_call_utils.py): support setting 'disable_fallbacks' on litellm key * test: fix test * fix(exception_mapping_utils.py): map 'model is overloaded' to internal server error * test: handle gemini error * test: fix test * fix: new run
This commit is contained in:
parent
f8e700064e
commit
e9aa492af3
35 changed files with 853 additions and 246 deletions
|
@ -1455,3 +1455,46 @@ async def test_router_fallbacks_default_and_model_specific_fallbacks(sync_mode):
|
|||
assert isinstance(
|
||||
exc_info.value, litellm.AuthenticationError
|
||||
), f"Expected AuthenticationError, but got {type(exc_info.value).__name__}"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_router_disable_fallbacks_dynamically():
|
||||
from litellm.router import run_async_fallback
|
||||
|
||||
router = Router(
|
||||
model_list=[
|
||||
{
|
||||
"model_name": "bad-model",
|
||||
"litellm_params": {
|
||||
"model": "openai/my-bad-model",
|
||||
"api_key": "my-bad-api-key",
|
||||
},
|
||||
},
|
||||
{
|
||||
"model_name": "good-model",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
},
|
||||
},
|
||||
],
|
||||
fallbacks=[{"bad-model": ["good-model"]}],
|
||||
default_fallbacks=["good-model"],
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
router,
|
||||
"log_retry",
|
||||
new=MagicMock(return_value=None),
|
||||
) as mock_client:
|
||||
try:
|
||||
resp = await router.acompletion(
|
||||
model="bad-model",
|
||||
messages=[{"role": "user", "content": "Hey, how's it going?"}],
|
||||
disable_fallbacks=True,
|
||||
)
|
||||
print(resp)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
mock_client.assert_not_called()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue