forked from phoenix/litellm-mirror
(test) router-fallbacks
This commit is contained in:
parent
d0442ae0f2
commit
0196ac6376
1 changed files with 85 additions and 73 deletions
|
@ -701,6 +701,11 @@ async def test_async_fallbacks_max_retries_per_request():
|
|||
|
||||
|
||||
def test_usage_based_routing_fallbacks():
|
||||
try:
|
||||
# [Prod Test]
|
||||
# IT tests Usage Based Routing with fallbacks
|
||||
# The Request should fail azure/gpt-4-fast. Then fallback -> "azure/gpt-4-basic" -> "openai-gpt-4"
|
||||
# It should work with "openai-gpt-4"
|
||||
import os
|
||||
import litellm
|
||||
from litellm import Router
|
||||
|
@ -780,7 +785,14 @@ def test_usage_based_routing_fallbacks():
|
|||
]
|
||||
|
||||
response = router.completion(
|
||||
model="azure/gpt-4-fast", messages=messages, n=10, timeout=5
|
||||
model="azure/gpt-4-fast", messages=messages, timeout=5
|
||||
)
|
||||
|
||||
print("response: ", response)
|
||||
print("response._hidden_params: ", response._hidden_params)
|
||||
|
||||
# in this test, we expect azure/gpt-4 fast to fail, then azure-gpt-4 basic to fail and then openai-gpt-4 to pass
|
||||
# the token count of this message is > AZURE_FAST_TPM, > AZURE_BASIC_TPM
|
||||
assert response._hidden_params["custom_llm_provider"] == "openai"
|
||||
|
||||
except Exception as e:
|
||||
pytest.fail(f"An exception occurred {e}")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue