litellm-mirror/tests/local_testing/test_router_cooldowns.py
Krish Dholakia 0c0498dd60
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 11s
Litellm dev 12 07 2024 (#7086)
* fix(main.py): support passing max retries to azure/openai embedding integrations

Fixes https://github.com/BerriAI/litellm/issues/7003

* feat(team_endpoints.py): allow updating team model aliases

Closes https://github.com/BerriAI/litellm/issues/6956

* feat(router.py): allow specifying model id as fallback - skips any cooldown check

Allows a default model to be checked if all models in cooldown

s/o @micahjsmith

* docs(reliability.md): add fallback to specific model to docs

* fix(utils.py): new 'is_prompt_caching_valid_prompt' helper util

Allows user to identify if messages/tools have prompt caching

Related issue: https://github.com/BerriAI/litellm/issues/6784

* feat(router.py): store model id for prompt caching valid prompt

Allows routing to that model id on subsequent requests

* fix(router.py): only cache if prompt is valid prompt caching prompt

prevents storing unnecessary items in cache

* feat(router.py): support routing prompt caching enabled models to previous deployments

Closes https://github.com/BerriAI/litellm/issues/6784

* test: fix linting errors

* feat(databricks/): convert basemodel to dict and exclude none values

allow passing pydantic message to databricks

* fix(utils.py): ensure all chat completion messages are dict

* (feat) Track `custom_llm_provider` in LiteLLMSpendLogs (#7081)

* add custom_llm_provider to SpendLogsPayload

* add custom_llm_provider to SpendLogs

* add custom llm provider to SpendLogs payload

* test_spend_logs_payload

* Add MLflow to the side bar (#7031)

Signed-off-by: B-Step62 <yuki.watanabe@databricks.com>

* (bug fix) SpendLogs update DB catch all possible DB errors for retrying  (#7082)

* catch DB_CONNECTION_ERROR_TYPES

* fix DB retry mechanism for SpendLog updates

* use DB_CONNECTION_ERROR_TYPES in auth checks

* fix exp back off for writing SpendLogs

* use _raise_failed_update_spend_exception to ensure errors print as NON blocking

* test_update_spend_logs_multiple_batches_with_failure

* (Feat) Add StructuredOutputs support for Fireworks.AI (#7085)

* fix model cost map fireworks ai "supports_response_schema": true,

* fix supports_response_schema

* fix map openai params fireworks ai

* test_map_response_format

* test_map_response_format

* added deepinfra/Meta-Llama-3.1-405B-Instruct (#7084)

* bump: version 1.53.9 → 1.54.0

* fix deepinfra

* litellm db fixes LiteLLM_UserTable (#7089)

* ci/cd queue new release

* fix llama-3.3-70b-versatile

* refactor - use consistent file naming convention `AI21/` -> `ai21`  (#7090)

* fix refactor - use consistent file naming convention

* ci/cd run again

* fix naming structure

* fix use consistent naming (#7092)

---------

Signed-off-by: B-Step62 <yuki.watanabe@databricks.com>
Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: Yuki Watanabe <31463517+B-Step62@users.noreply.github.com>
Co-authored-by: ali sayyah <ali.sayyah2@gmail.com>
2024-12-08 00:30:33 -08:00

593 lines
18 KiB
Python

#### What this tests ####
# This tests calling router with fallback models
import asyncio
import os
import random
import sys
import time
import traceback
import pytest
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
from unittest.mock import AsyncMock, MagicMock, patch
import httpx
import openai
import litellm
from litellm import Router
from litellm.integrations.custom_logger import CustomLogger
from litellm.router_utils.cooldown_handlers import _async_get_cooldown_deployments
from litellm.types.router import DeploymentTypedDict, LiteLLMParamsTypedDict
@pytest.mark.asyncio
async def test_cooldown_badrequest_error():
"""
Test 1. It SHOULD NOT cooldown a deployment on a BadRequestError
"""
router = litellm.Router(
model_list=[
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "azure/chatgpt-v-2",
"api_key": os.getenv("AZURE_API_KEY"),
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE"),
},
}
],
debug_level="DEBUG",
set_verbose=True,
cooldown_time=300,
num_retries=0,
allowed_fails=0,
)
# Act & Assert
try:
response = await router.acompletion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "gm"}],
bad_param=200,
)
except Exception:
pass
await asyncio.sleep(3) # wait for deployment to get cooled-down
response = await router.acompletion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "gm"}],
mock_response="hello",
)
assert response is not None
print(response)
@pytest.mark.asyncio
async def test_dynamic_cooldowns():
"""
Assert kwargs for completion/embedding have 'cooldown_time' as a litellm_param
"""
# litellm.set_verbose = True
tmp_mock = MagicMock()
litellm.failure_callback = [tmp_mock]
router = Router(
model_list=[
{
"model_name": "my-fake-model",
"litellm_params": {
"model": "openai/gpt-1",
"api_key": "my-key",
"mock_response": Exception("this is an error"),
},
}
],
cooldown_time=60,
)
try:
_ = router.completion(
model="my-fake-model",
messages=[{"role": "user", "content": "Hey, how's it going?"}],
cooldown_time=0,
num_retries=0,
)
except Exception:
pass
tmp_mock.assert_called_once()
print(tmp_mock.call_count)
assert "cooldown_time" in tmp_mock.call_args[0][0]["litellm_params"]
assert tmp_mock.call_args[0][0]["litellm_params"]["cooldown_time"] == 0
@pytest.mark.parametrize("num_deployments", [1, 2])
def test_single_deployment_no_cooldowns(num_deployments):
"""
Do not cooldown on single deployment.
Cooldown on multiple deployments.
"""
model_list = []
for i in range(num_deployments):
model = DeploymentTypedDict(
model_name="gpt-3.5-turbo",
litellm_params=LiteLLMParamsTypedDict(
model="gpt-3.5-turbo",
),
)
model_list.append(model)
router = Router(model_list=model_list, allowed_fails=0, num_retries=0)
with patch.object(
router.cooldown_cache, "add_deployment_to_cooldown", new=MagicMock()
) as mock_client:
try:
router.completion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hey, how's it going?"}],
mock_response="litellm.RateLimitError",
)
except litellm.RateLimitError:
pass
if num_deployments == 1:
mock_client.assert_not_called()
else:
mock_client.assert_called_once()
@pytest.mark.asyncio
async def test_single_deployment_no_cooldowns_test_prod():
"""
Do not cooldown on single deployment.
"""
router = Router(
model_list=[
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
},
},
{
"model_name": "gpt-5",
"litellm_params": {
"model": "openai/gpt-5",
},
},
{
"model_name": "gpt-12",
"litellm_params": {
"model": "openai/gpt-12",
},
},
],
allowed_fails=0,
num_retries=0,
)
with patch.object(
router.cooldown_cache, "add_deployment_to_cooldown", new=MagicMock()
) as mock_client:
try:
await router.acompletion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hey, how's it going?"}],
mock_response="litellm.RateLimitError",
)
except litellm.RateLimitError:
pass
await asyncio.sleep(2)
mock_client.assert_not_called()
@pytest.mark.asyncio
async def test_single_deployment_no_cooldowns_test_prod_mock_completion_calls():
"""
Do not cooldown on single deployment.
"""
router = Router(
model_list=[
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
},
},
{
"model_name": "gpt-5",
"litellm_params": {
"model": "openai/gpt-5",
},
},
{
"model_name": "gpt-12",
"litellm_params": {
"model": "openai/gpt-12",
},
},
],
)
for _ in range(20):
try:
await router.acompletion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hey, how's it going?"}],
mock_response="litellm.RateLimitError",
)
except litellm.RateLimitError:
pass
cooldown_list = await _async_get_cooldown_deployments(
litellm_router_instance=router, parent_otel_span=None
)
assert len(cooldown_list) == 0
healthy_deployments, _ = await router._async_get_healthy_deployments(
model="gpt-3.5-turbo", parent_otel_span=None
)
print("healthy_deployments: ", healthy_deployments)
"""
E2E - Test router cooldowns
Test 1: 3 deployments, each deployment fails 25% requests. Assert that no deployments get put into cooldown
Test 2: 3 deployments, 1- deployment fails 6/10 requests, assert that bad deployment gets put into cooldown
Test 3: 3 deployments, 1 deployment has a period of 429 errors. Assert it is put into cooldown and other deployments work
"""
@pytest.mark.asyncio()
async def test_high_traffic_cooldowns_all_healthy_deployments():
"""
PROD TEST - 3 deployments, each deployment fails 25% requests. Assert that no deployments get put into cooldown
"""
router = Router(
model_list=[
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_base": "https://api.openai.com",
},
},
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_base": "https://api.openai.com-2",
},
},
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_base": "https://api.openai.com-3",
},
},
],
set_verbose=True,
debug_level="DEBUG",
)
all_deployment_ids = router.get_model_ids()
import random
from collections import defaultdict
# Create a defaultdict to track successes and failures for each model ID
model_stats = defaultdict(lambda: {"successes": 0, "failures": 0})
litellm.set_verbose = True
for _ in range(100):
try:
model_id = random.choice(all_deployment_ids)
num_successes = model_stats[model_id]["successes"]
num_failures = model_stats[model_id]["failures"]
total_requests = num_failures + num_successes
if total_requests > 0:
print(
"num failures= ",
num_failures,
"num successes= ",
num_successes,
"num_failures/total = ",
num_failures / total_requests,
)
if total_requests == 0:
mock_response = "hi"
elif num_failures / total_requests <= 0.25:
# Randomly decide between fail and succeed
if random.random() < 0.5:
mock_response = "hi"
else:
mock_response = "litellm.InternalServerError"
else:
mock_response = "hi"
await router.acompletion(
model=model_id,
messages=[{"role": "user", "content": "Hey, how's it going?"}],
mock_response=mock_response,
)
model_stats[model_id]["successes"] += 1
await asyncio.sleep(0.0001)
except litellm.InternalServerError:
model_stats[model_id]["failures"] += 1
pass
except Exception as e:
print("Failed test model stats=", model_stats)
raise e
print("model_stats: ", model_stats)
cooldown_list = await _async_get_cooldown_deployments(
litellm_router_instance=router, parent_otel_span=None
)
assert len(cooldown_list) == 0
@pytest.mark.asyncio()
async def test_high_traffic_cooldowns_one_bad_deployment():
"""
PROD TEST - 3 deployments, 1- deployment fails 6/10 requests, assert that bad deployment gets put into cooldown
"""
router = Router(
model_list=[
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_base": "https://api.openai.com",
},
},
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_base": "https://api.openai.com-2",
},
},
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_base": "https://api.openai.com-3",
},
},
],
set_verbose=True,
debug_level="DEBUG",
)
all_deployment_ids = router.get_model_ids()
import random
from collections import defaultdict
# Create a defaultdict to track successes and failures for each model ID
model_stats = defaultdict(lambda: {"successes": 0, "failures": 0})
bad_deployment_id = random.choice(all_deployment_ids)
litellm.set_verbose = True
for _ in range(100):
try:
model_id = random.choice(all_deployment_ids)
num_successes = model_stats[model_id]["successes"]
num_failures = model_stats[model_id]["failures"]
total_requests = num_failures + num_successes
if total_requests > 0:
print(
"num failures= ",
num_failures,
"num successes= ",
num_successes,
"num_failures/total = ",
num_failures / total_requests,
)
if total_requests == 0:
mock_response = "hi"
elif bad_deployment_id == model_id:
if num_failures / total_requests <= 0.6:
mock_response = "litellm.InternalServerError"
elif num_failures / total_requests <= 0.25:
# Randomly decide between fail and succeed
if random.random() < 0.5:
mock_response = "hi"
else:
mock_response = "litellm.InternalServerError"
else:
mock_response = "hi"
await router.acompletion(
model=model_id,
messages=[{"role": "user", "content": "Hey, how's it going?"}],
mock_response=mock_response,
)
model_stats[model_id]["successes"] += 1
await asyncio.sleep(0.0001)
except litellm.InternalServerError:
model_stats[model_id]["failures"] += 1
pass
except Exception as e:
print("Failed test model stats=", model_stats)
raise e
print("model_stats: ", model_stats)
cooldown_list = await _async_get_cooldown_deployments(
litellm_router_instance=router, parent_otel_span=None
)
assert len(cooldown_list) == 1
@pytest.mark.asyncio()
async def test_high_traffic_cooldowns_one_rate_limited_deployment():
"""
PROD TEST - 3 deployments, 1- deployment fails 6/10 requests, assert that bad deployment gets put into cooldown
"""
router = Router(
model_list=[
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_base": "https://api.openai.com",
},
},
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_base": "https://api.openai.com-2",
},
},
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_base": "https://api.openai.com-3",
},
},
],
set_verbose=True,
debug_level="DEBUG",
)
all_deployment_ids = router.get_model_ids()
import random
from collections import defaultdict
# Create a defaultdict to track successes and failures for each model ID
model_stats = defaultdict(lambda: {"successes": 0, "failures": 0})
bad_deployment_id = random.choice(all_deployment_ids)
litellm.set_verbose = True
for _ in range(100):
try:
model_id = random.choice(all_deployment_ids)
num_successes = model_stats[model_id]["successes"]
num_failures = model_stats[model_id]["failures"]
total_requests = num_failures + num_successes
if total_requests > 0:
print(
"num failures= ",
num_failures,
"num successes= ",
num_successes,
"num_failures/total = ",
num_failures / total_requests,
)
if total_requests == 0:
mock_response = "hi"
elif bad_deployment_id == model_id:
if num_failures / total_requests <= 0.6:
mock_response = "litellm.RateLimitError"
elif num_failures / total_requests <= 0.25:
# Randomly decide between fail and succeed
if random.random() < 0.5:
mock_response = "hi"
else:
mock_response = "litellm.InternalServerError"
else:
mock_response = "hi"
await router.acompletion(
model=model_id,
messages=[{"role": "user", "content": "Hey, how's it going?"}],
mock_response=mock_response,
)
model_stats[model_id]["successes"] += 1
await asyncio.sleep(0.0001)
except litellm.InternalServerError:
model_stats[model_id]["failures"] += 1
pass
except litellm.RateLimitError:
model_stats[bad_deployment_id]["failures"] += 1
pass
except Exception as e:
print("Failed test model stats=", model_stats)
raise e
print("model_stats: ", model_stats)
cooldown_list = await _async_get_cooldown_deployments(
litellm_router_instance=router, parent_otel_span=None
)
assert len(cooldown_list) == 1
"""
Unit tests for router set_cooldowns
1. _set_cooldown_deployments() will cooldown a deployment after it fails 50% requests
"""
def test_router_fallbacks_with_cooldowns_and_model_id():
router = Router(
model_list=[
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {"model": "gpt-3.5-turbo", "rpm": 1},
"model_info": {
"id": "123",
},
}
],
routing_strategy="usage-based-routing-v2",
fallbacks=[{"gpt-3.5-turbo": ["123"]}],
)
## trigger ratelimit
try:
router.completion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "hi"}],
mock_response="litellm.RateLimitError",
)
except litellm.RateLimitError:
pass
router.completion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "hi"}],
)