forked from phoenix/litellm-mirror
* fix(core_helpers.py): return None, instead of raising kwargs is None error Closes https://github.com/BerriAI/litellm/issues/6500 * docs(cost_tracking.md): cleanup doc * fix(vertex_and_google_ai_studio.py): handle function call with no params passed in Closes https://github.com/BerriAI/litellm/issues/6495 * test(test_router_timeout.py): add test for router timeout + retry logic * test: update test to use module level values * (fix) Prometheus - Log Postgres DB latency, status on prometheus (#6484) * fix logging DB fails on prometheus * unit testing log to otel wrapper * unit testing for service logger + prometheus * use LATENCY buckets for service logging * fix service logging * docs clarify vertex vs gemini * (router_strategy/) ensure all async functions use async cache methods (#6489) * fix router strat * use async set / get cache in router_strategy * add coverage for router strategy * fix imports * fix batch_get_cache * use async methods for least busy * fix least busy use async methods * fix test_dual_cache_increment * test async_get_available_deployment when routing_strategy="least-busy" * (fix) proxy - fix when `STORE_MODEL_IN_DB` should be set (#6492) * set store_model_in_db at the top * correctly use store_model_in_db global * (fix) `PrometheusServicesLogger` `_get_metric` should return metric in Registry (#6486) * fix logging DB fails on prometheus * unit testing log to otel wrapper * unit testing for service logger + prometheus * use LATENCY buckets for service logging * fix service logging * fix _get_metric in prom services logger * add clear doc string * unit testing for prom service logger * bump: version 1.51.0 → 1.51.1 * Add `azure/gpt-4o-mini-2024-07-18` to model_prices_and_context_window.json (#6477) * Update utils.py (#6468) Fixed missing keys * (perf) Litellm redis router fix - ~100ms improvement (#6483) * docs(exception_mapping.md): add missing exception types Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183 * fix(main.py): register custom model pricing with specific key Ensure custom model pricing is registered to the specific model+provider key combination * test: make testing more robust for custom pricing * fix(redis_cache.py): instrument otel logging for sync redis calls ensures complete coverage for all redis cache calls * refactor: pass parent_otel_span for redis caching calls in router allows for more observability into what calls are causing latency issues * test: update tests with new params * refactor: ensure e2e otel tracing for router * refactor(router.py): add more otel tracing acrosss router catch all latency issues for router requests * fix: fix linting error * fix(router.py): fix linting error * fix: fix test * test: fix tests * fix(dual_cache.py): pass ttl to redis cache * fix: fix param * perf(cooldown_cache.py): improve cooldown cache, to store cache results in memory for 5s, prevents redis call from being made on each request reduces 100ms latency per call with caching enabled on router * fix: fix test * fix(cooldown_cache.py): handle if a result is None * fix(cooldown_cache.py): add debug statements * refactor(dual_cache.py): move to using an in-memory check for batch get cache, to prevent redis from being hit for every call * fix(cooldown_cache.py): fix linting erropr * refactor(prometheus.py): move to using standard logging payload for reading the remaining request / tokens Ensures prometheus token tracking works for anthropic as well * fix: fix linting error * fix(redis_cache.py): make sure ttl is always int (handle float values) Fixes issue where redis_client.ex was not working correctly due to float ttl * fix: fix linting error * test: update test * fix: fix linting error --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Xingyao Wang <xingyao@all-hands.dev> Co-authored-by: vibhanshu-ob <115142120+vibhanshu-ob@users.noreply.github.com>
189 lines
5.1 KiB
Python
189 lines
5.1 KiB
Python
#### What this tests ####
|
|
# This tests if the router timeout error handling during fallbacks
|
|
|
|
import asyncio
|
|
import os
|
|
import sys
|
|
import time
|
|
import traceback
|
|
|
|
import pytest
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../..")
|
|
) # Adds the parent directory to the system path
|
|
|
|
from unittest.mock import patch, MagicMock, AsyncMock
|
|
import os
|
|
|
|
from dotenv import load_dotenv
|
|
|
|
import litellm
|
|
from litellm import Router
|
|
|
|
load_dotenv()
|
|
|
|
|
|
def test_router_timeouts():
|
|
# Model list for OpenAI and Anthropic models
|
|
model_list = [
|
|
{
|
|
"model_name": "openai-gpt-4",
|
|
"litellm_params": {
|
|
"model": "azure/chatgpt-v-2",
|
|
"api_key": "os.environ/AZURE_API_KEY",
|
|
"api_base": "os.environ/AZURE_API_BASE",
|
|
"api_version": "os.environ/AZURE_API_VERSION",
|
|
},
|
|
"tpm": 80000,
|
|
},
|
|
{
|
|
"model_name": "anthropic-claude-instant-1.2",
|
|
"litellm_params": {
|
|
"model": "claude-instant-1.2",
|
|
"api_key": "os.environ/ANTHROPIC_API_KEY",
|
|
"mock_response": "hello world",
|
|
},
|
|
"tpm": 20000,
|
|
},
|
|
]
|
|
|
|
fallbacks_list = [
|
|
{"openai-gpt-4": ["anthropic-claude-instant-1.2"]},
|
|
]
|
|
|
|
# Configure router
|
|
router = Router(
|
|
model_list=model_list,
|
|
fallbacks=fallbacks_list,
|
|
routing_strategy="usage-based-routing",
|
|
debug_level="INFO",
|
|
set_verbose=True,
|
|
redis_host=os.getenv("REDIS_HOST"),
|
|
redis_password=os.getenv("REDIS_PASSWORD"),
|
|
redis_port=int(os.getenv("REDIS_PORT")),
|
|
timeout=10,
|
|
num_retries=0,
|
|
)
|
|
|
|
print("***** TPM SETTINGS *****")
|
|
for model_object in model_list:
|
|
print(f"{model_object['model_name']}: {model_object['tpm']} TPM")
|
|
|
|
# Sample list of questions
|
|
questions_list = [
|
|
{"content": "Tell me a very long joke.", "modality": "voice"},
|
|
]
|
|
|
|
total_tokens_used = 0
|
|
|
|
# Process each question
|
|
for question in questions_list:
|
|
messages = [{"content": question["content"], "role": "user"}]
|
|
|
|
prompt_tokens = litellm.token_counter(text=question["content"], model="gpt-4")
|
|
print("prompt_tokens = ", prompt_tokens)
|
|
|
|
response = router.completion(
|
|
model="openai-gpt-4", messages=messages, timeout=5, num_retries=0
|
|
)
|
|
|
|
total_tokens_used += response.usage.total_tokens
|
|
|
|
print("Response:", response)
|
|
print("********** TOKENS USED SO FAR = ", total_tokens_used)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_router_timeouts_bedrock():
|
|
import uuid
|
|
|
|
import openai
|
|
|
|
# Model list for OpenAI and Anthropic models
|
|
_model_list = [
|
|
{
|
|
"model_name": "bedrock",
|
|
"litellm_params": {
|
|
"model": "bedrock/anthropic.claude-instant-v1",
|
|
"timeout": 0.00001,
|
|
},
|
|
"tpm": 80000,
|
|
},
|
|
]
|
|
|
|
# Configure router
|
|
router = Router(
|
|
model_list=_model_list,
|
|
routing_strategy="usage-based-routing",
|
|
debug_level="DEBUG",
|
|
set_verbose=True,
|
|
num_retries=0,
|
|
)
|
|
|
|
litellm.set_verbose = True
|
|
try:
|
|
response = await router.acompletion(
|
|
model="bedrock",
|
|
messages=[{"role": "user", "content": f"hello, who are u {uuid.uuid4()}"}],
|
|
)
|
|
print(response)
|
|
pytest.fail("Did not raise error `openai.APITimeoutError`")
|
|
except openai.APITimeoutError as e:
|
|
print(
|
|
"Passed: Raised correct exception. Got openai.APITimeoutError\nGood Job", e
|
|
)
|
|
print(type(e))
|
|
pass
|
|
except Exception as e:
|
|
pytest.fail(
|
|
f"Did not raise error `openai.APITimeoutError`. Instead raised error type: {type(e)}, Error: {e}"
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"num_retries, expected_call_count",
|
|
[(0, 1), (1, 2), (2, 3), (3, 4)],
|
|
)
|
|
def test_router_timeout_with_retries_anthropic_model(num_retries, expected_call_count):
|
|
"""
|
|
If request hits custom timeout, ensure it's retried.
|
|
"""
|
|
litellm._turn_on_debug()
|
|
from litellm.llms.custom_httpx.http_handler import HTTPHandler
|
|
import time
|
|
|
|
litellm.num_retries = num_retries
|
|
litellm.request_timeout = 0.000001
|
|
|
|
router = Router(
|
|
model_list=[
|
|
{
|
|
"model_name": "claude-3-haiku",
|
|
"litellm_params": {
|
|
"model": "anthropic/claude-3-haiku-20240307",
|
|
},
|
|
}
|
|
],
|
|
)
|
|
|
|
custom_client = HTTPHandler()
|
|
|
|
with patch.object(custom_client, "post", new=MagicMock()) as mock_client:
|
|
try:
|
|
|
|
def delayed_response(*args, **kwargs):
|
|
time.sleep(0.01) # Exceeds the 0.000001 timeout
|
|
raise TimeoutError("Request timed out.")
|
|
|
|
mock_client.side_effect = delayed_response
|
|
|
|
router.completion(
|
|
model="claude-3-haiku",
|
|
messages=[{"role": "user", "content": "hello, who are u"}],
|
|
client=custom_client,
|
|
)
|
|
except litellm.Timeout:
|
|
pass
|
|
|
|
assert mock_client.call_count == expected_call_count
|