test(test_openai_endpoints.py): add concurrency testing for user defined rate limits on proxy

This commit is contained in:
Krrish Dholakia 2024-04-12 18:56:13 -07:00
parent c03b0bbb24
commit ea1574c160
6 changed files with 68 additions and 28 deletions

View file

@ -67,12 +67,12 @@ litellm_settings:
telemetry: False
context_window_fallbacks: [{"gpt-3.5-turbo": ["gpt-3.5-turbo-large"]}]
# router_settings:
# routing_strategy: usage-based-routing-v2
# redis_host: os.environ/REDIS_HOST
# redis_password: os.environ/REDIS_PASSWORD
# redis_port: os.environ/REDIS_PORT
# enable_pre_call_checks: true
router_settings:
routing_strategy: usage-based-routing-v2
redis_host: os.environ/REDIS_HOST
redis_password: os.environ/REDIS_PASSWORD
redis_port: os.environ/REDIS_PORT
enable_pre_call_checks: true
general_settings:
master_key: sk-1234 # [OPTIONAL] Use to enforce auth on proxy. See - https://docs.litellm.ai/docs/proxy/virtual_keys