forked from phoenix/litellm-mirror
test(test_openai_endpoints.py): add concurrency testing for user defined rate limits on proxy
This commit is contained in:
parent
c03b0bbb24
commit
ea1574c160
6 changed files with 68 additions and 28 deletions
|
@ -67,12 +67,12 @@ litellm_settings:
|
|||
telemetry: False
|
||||
context_window_fallbacks: [{"gpt-3.5-turbo": ["gpt-3.5-turbo-large"]}]
|
||||
|
||||
# router_settings:
|
||||
# routing_strategy: usage-based-routing-v2
|
||||
# redis_host: os.environ/REDIS_HOST
|
||||
# redis_password: os.environ/REDIS_PASSWORD
|
||||
# redis_port: os.environ/REDIS_PORT
|
||||
# enable_pre_call_checks: true
|
||||
router_settings:
|
||||
routing_strategy: usage-based-routing-v2
|
||||
redis_host: os.environ/REDIS_HOST
|
||||
redis_password: os.environ/REDIS_PASSWORD
|
||||
redis_port: os.environ/REDIS_PORT
|
||||
enable_pre_call_checks: true
|
||||
|
||||
general_settings:
|
||||
master_key: sk-1234 # [OPTIONAL] Use to enforce auth on proxy. See - https://docs.litellm.ai/docs/proxy/virtual_keys
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue