mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
test(test_parallel_request_limiter.py): unit testing for tpm/rpm rate limits
This commit is contained in:
parent
13b013b28d
commit
3957a8303a
1 changed files with 317 additions and 0 deletions
|
@ -57,6 +57,86 @@ async def test_pre_call_hook():
|
|||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pre_call_hook_rpm_limits():
|
||||
"""
|
||||
Test if error raised on hitting rpm limits
|
||||
"""
|
||||
_api_key = "sk-12345"
|
||||
user_api_key_dict = UserAPIKeyAuth(
|
||||
api_key=_api_key, max_parallel_requests=1, tpm_limit=9, rpm_limit=1
|
||||
)
|
||||
local_cache = DualCache()
|
||||
parallel_request_handler = MaxParallelRequestsHandler()
|
||||
|
||||
await parallel_request_handler.async_pre_call_hook(
|
||||
user_api_key_dict=user_api_key_dict, cache=local_cache, data={}, call_type=""
|
||||
)
|
||||
|
||||
kwargs = {"litellm_params": {"metadata": {"user_api_key": _api_key}}}
|
||||
|
||||
await parallel_request_handler.async_log_success_event(
|
||||
kwargs=kwargs,
|
||||
response_obj="",
|
||||
start_time="",
|
||||
end_time="",
|
||||
)
|
||||
|
||||
## Expected cache val: {"current_requests": 0, "current_tpm": 0, "current_rpm": 1}
|
||||
|
||||
try:
|
||||
await parallel_request_handler.async_pre_call_hook(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
cache=local_cache,
|
||||
data={},
|
||||
call_type="",
|
||||
)
|
||||
|
||||
pytest.fail(f"Expected call to fail")
|
||||
except Exception as e:
|
||||
assert e.status_code == 429
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pre_call_hook_tpm_limits():
|
||||
"""
|
||||
Test if error raised on hitting tpm limits
|
||||
"""
|
||||
_api_key = "sk-12345"
|
||||
user_api_key_dict = UserAPIKeyAuth(
|
||||
api_key=_api_key, max_parallel_requests=1, tpm_limit=9, rpm_limit=10
|
||||
)
|
||||
local_cache = DualCache()
|
||||
parallel_request_handler = MaxParallelRequestsHandler()
|
||||
|
||||
await parallel_request_handler.async_pre_call_hook(
|
||||
user_api_key_dict=user_api_key_dict, cache=local_cache, data={}, call_type=""
|
||||
)
|
||||
|
||||
kwargs = {"litellm_params": {"metadata": {"user_api_key": _api_key}}}
|
||||
|
||||
await parallel_request_handler.async_log_success_event(
|
||||
kwargs=kwargs,
|
||||
response_obj=litellm.ModelResponse(usage=litellm.Usage(total_tokens=10)),
|
||||
start_time="",
|
||||
end_time="",
|
||||
)
|
||||
|
||||
## Expected cache val: {"current_requests": 0, "current_tpm": 0, "current_rpm": 1}
|
||||
|
||||
try:
|
||||
await parallel_request_handler.async_pre_call_hook(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
cache=local_cache,
|
||||
data={},
|
||||
call_type="",
|
||||
)
|
||||
|
||||
pytest.fail(f"Expected call to fail")
|
||||
except Exception as e:
|
||||
assert e.status_code == 429
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_call_hook():
|
||||
"""
|
||||
|
@ -222,6 +302,85 @@ async def test_normal_router_call():
|
|||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_normal_router_tpm_limit():
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "azure-model",
|
||||
"litellm_params": {
|
||||
"model": "azure/gpt-turbo",
|
||||
"api_key": "os.environ/AZURE_FRANCE_API_KEY",
|
||||
"api_base": "https://openai-france-1234.openai.azure.com",
|
||||
"rpm": 1440,
|
||||
},
|
||||
"model_info": {"id": 1},
|
||||
},
|
||||
{
|
||||
"model_name": "azure-model",
|
||||
"litellm_params": {
|
||||
"model": "azure/gpt-35-turbo",
|
||||
"api_key": "os.environ/AZURE_EUROPE_API_KEY",
|
||||
"api_base": "https://my-endpoint-europe-berri-992.openai.azure.com",
|
||||
"rpm": 6,
|
||||
},
|
||||
"model_info": {"id": 2},
|
||||
},
|
||||
]
|
||||
router = Router(
|
||||
model_list=model_list,
|
||||
set_verbose=False,
|
||||
num_retries=3,
|
||||
) # type: ignore
|
||||
|
||||
_api_key = "sk-12345"
|
||||
user_api_key_dict = UserAPIKeyAuth(
|
||||
api_key=_api_key, max_parallel_requests=10, tpm_limit=10
|
||||
)
|
||||
local_cache = DualCache()
|
||||
pl = ProxyLogging(user_api_key_cache=local_cache)
|
||||
pl._init_litellm_callbacks()
|
||||
print(f"litellm callbacks: {litellm.callbacks}")
|
||||
parallel_request_handler = pl.max_parallel_request_limiter
|
||||
|
||||
await parallel_request_handler.async_pre_call_hook(
|
||||
user_api_key_dict=user_api_key_dict, cache=local_cache, data={}, call_type=""
|
||||
)
|
||||
|
||||
current_date = datetime.now().strftime("%Y-%m-%d")
|
||||
current_hour = datetime.now().strftime("%H")
|
||||
current_minute = datetime.now().strftime("%M")
|
||||
precise_minute = f"{current_date}-{current_hour}-{current_minute}"
|
||||
request_count_api_key = f"{_api_key}::{precise_minute}::request_count"
|
||||
|
||||
assert (
|
||||
parallel_request_handler.user_api_key_cache.get_cache(
|
||||
key=request_count_api_key
|
||||
)["current_requests"]
|
||||
== 1
|
||||
)
|
||||
|
||||
# normal call
|
||||
response = await router.acompletion(
|
||||
model="azure-model",
|
||||
messages=[{"role": "user", "content": "Write me a paragraph on the moon"}],
|
||||
metadata={"user_api_key": _api_key},
|
||||
)
|
||||
await asyncio.sleep(1) # success is done in a separate thread
|
||||
print(f"response: {response}")
|
||||
|
||||
try:
|
||||
await parallel_request_handler.async_pre_call_hook(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
cache=local_cache,
|
||||
data={},
|
||||
call_type="",
|
||||
)
|
||||
|
||||
pytest.fail(f"Expected call to fail")
|
||||
except Exception as e:
|
||||
assert e.status_code == 429
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_router_call():
|
||||
model_list = [
|
||||
|
@ -295,6 +454,87 @@ async def test_streaming_router_call():
|
|||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_router_tpm_limit():
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "azure-model",
|
||||
"litellm_params": {
|
||||
"model": "azure/gpt-turbo",
|
||||
"api_key": "os.environ/AZURE_FRANCE_API_KEY",
|
||||
"api_base": "https://openai-france-1234.openai.azure.com",
|
||||
"rpm": 1440,
|
||||
},
|
||||
"model_info": {"id": 1},
|
||||
},
|
||||
{
|
||||
"model_name": "azure-model",
|
||||
"litellm_params": {
|
||||
"model": "azure/gpt-35-turbo",
|
||||
"api_key": "os.environ/AZURE_EUROPE_API_KEY",
|
||||
"api_base": "https://my-endpoint-europe-berri-992.openai.azure.com",
|
||||
"rpm": 6,
|
||||
},
|
||||
"model_info": {"id": 2},
|
||||
},
|
||||
]
|
||||
router = Router(
|
||||
model_list=model_list,
|
||||
set_verbose=False,
|
||||
num_retries=3,
|
||||
) # type: ignore
|
||||
|
||||
_api_key = "sk-12345"
|
||||
user_api_key_dict = UserAPIKeyAuth(
|
||||
api_key=_api_key, max_parallel_requests=10, tpm_limit=10
|
||||
)
|
||||
local_cache = DualCache()
|
||||
pl = ProxyLogging(user_api_key_cache=local_cache)
|
||||
pl._init_litellm_callbacks()
|
||||
print(f"litellm callbacks: {litellm.callbacks}")
|
||||
parallel_request_handler = pl.max_parallel_request_limiter
|
||||
|
||||
await parallel_request_handler.async_pre_call_hook(
|
||||
user_api_key_dict=user_api_key_dict, cache=local_cache, data={}, call_type=""
|
||||
)
|
||||
|
||||
current_date = datetime.now().strftime("%Y-%m-%d")
|
||||
current_hour = datetime.now().strftime("%H")
|
||||
current_minute = datetime.now().strftime("%M")
|
||||
precise_minute = f"{current_date}-{current_hour}-{current_minute}"
|
||||
request_count_api_key = f"{_api_key}::{precise_minute}::request_count"
|
||||
|
||||
assert (
|
||||
parallel_request_handler.user_api_key_cache.get_cache(
|
||||
key=request_count_api_key
|
||||
)["current_requests"]
|
||||
== 1
|
||||
)
|
||||
|
||||
# normal call
|
||||
response = await router.acompletion(
|
||||
model="azure-model",
|
||||
messages=[{"role": "user", "content": "Write me a paragraph on the moon"}],
|
||||
stream=True,
|
||||
metadata={"user_api_key": _api_key},
|
||||
)
|
||||
async for chunk in response:
|
||||
continue
|
||||
await asyncio.sleep(1) # success is done in a separate thread
|
||||
|
||||
try:
|
||||
await parallel_request_handler.async_pre_call_hook(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
cache=local_cache,
|
||||
data={},
|
||||
call_type="",
|
||||
)
|
||||
|
||||
pytest.fail(f"Expected call to fail")
|
||||
except Exception as e:
|
||||
assert e.status_code == 429
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bad_router_call():
|
||||
model_list = [
|
||||
|
@ -366,3 +606,80 @@ async def test_bad_router_call():
|
|||
)["current_requests"]
|
||||
== 0
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bad_router_tpm_limit():
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "azure-model",
|
||||
"litellm_params": {
|
||||
"model": "azure/gpt-turbo",
|
||||
"api_key": "os.environ/AZURE_FRANCE_API_KEY",
|
||||
"api_base": "https://openai-france-1234.openai.azure.com",
|
||||
"rpm": 1440,
|
||||
},
|
||||
"model_info": {"id": 1},
|
||||
},
|
||||
{
|
||||
"model_name": "azure-model",
|
||||
"litellm_params": {
|
||||
"model": "azure/gpt-35-turbo",
|
||||
"api_key": "os.environ/AZURE_EUROPE_API_KEY",
|
||||
"api_base": "https://my-endpoint-europe-berri-992.openai.azure.com",
|
||||
"rpm": 6,
|
||||
},
|
||||
"model_info": {"id": 2},
|
||||
},
|
||||
]
|
||||
router = Router(
|
||||
model_list=model_list,
|
||||
set_verbose=False,
|
||||
num_retries=3,
|
||||
) # type: ignore
|
||||
|
||||
_api_key = "sk-12345"
|
||||
user_api_key_dict = UserAPIKeyAuth(
|
||||
api_key=_api_key, max_parallel_requests=10, tpm_limit=10
|
||||
)
|
||||
local_cache = DualCache()
|
||||
pl = ProxyLogging(user_api_key_cache=local_cache)
|
||||
pl._init_litellm_callbacks()
|
||||
print(f"litellm callbacks: {litellm.callbacks}")
|
||||
parallel_request_handler = pl.max_parallel_request_limiter
|
||||
|
||||
await parallel_request_handler.async_pre_call_hook(
|
||||
user_api_key_dict=user_api_key_dict, cache=local_cache, data={}, call_type=""
|
||||
)
|
||||
|
||||
current_date = datetime.now().strftime("%Y-%m-%d")
|
||||
current_hour = datetime.now().strftime("%H")
|
||||
current_minute = datetime.now().strftime("%M")
|
||||
precise_minute = f"{current_date}-{current_hour}-{current_minute}"
|
||||
request_count_api_key = f"{_api_key}::{precise_minute}::request_count"
|
||||
|
||||
assert (
|
||||
parallel_request_handler.user_api_key_cache.get_cache(
|
||||
key=request_count_api_key
|
||||
)["current_requests"]
|
||||
== 1
|
||||
)
|
||||
|
||||
# bad call
|
||||
try:
|
||||
response = await router.acompletion(
|
||||
model="azure-model",
|
||||
messages=[{"role": "user2", "content": "Write me a paragraph on the moon"}],
|
||||
stream=True,
|
||||
metadata={"user_api_key": _api_key},
|
||||
)
|
||||
except:
|
||||
pass
|
||||
await asyncio.sleep(1) # success is done in a separate thread
|
||||
|
||||
assert (
|
||||
parallel_request_handler.user_api_key_cache.get_cache(
|
||||
key=request_count_api_key
|
||||
)["current_tpm"]
|
||||
== 0
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue