forked from phoenix/litellm-mirror
test(test_proxy_server.py): fix test to use valid redis host
This commit is contained in:
parent
9f21f87afd
commit
e5b98814ad
3 changed files with 20 additions and 22 deletions
|
@ -6,7 +6,6 @@ model_list:
|
||||||
api_base: https://openai-function-calling-workers.tasslexyz.workers.dev/
|
api_base: https://openai-function-calling-workers.tasslexyz.workers.dev/
|
||||||
# api_base: http://0.0.0.0:8080
|
# api_base: http://0.0.0.0:8080
|
||||||
stream_timeout: 0.001
|
stream_timeout: 0.001
|
||||||
rpm: 10
|
|
||||||
- model_name: fake-openai-endpoint
|
- model_name: fake-openai-endpoint
|
||||||
litellm_params:
|
litellm_params:
|
||||||
model: openai/my-fake-model-2
|
model: openai/my-fake-model-2
|
||||||
|
@ -14,7 +13,6 @@ model_list:
|
||||||
api_base: https://openai-function-calling-workers.tasslexyz.workers.dev/
|
api_base: https://openai-function-calling-workers.tasslexyz.workers.dev/
|
||||||
# api_base: http://0.0.0.0:8080
|
# api_base: http://0.0.0.0:8080
|
||||||
stream_timeout: 0.001
|
stream_timeout: 0.001
|
||||||
rpm: 10
|
|
||||||
- litellm_params:
|
- litellm_params:
|
||||||
model: azure/chatgpt-v-2
|
model: azure/chatgpt-v-2
|
||||||
api_base: os.environ/AZURE_API_BASE
|
api_base: os.environ/AZURE_API_BASE
|
||||||
|
@ -32,28 +30,26 @@ model_list:
|
||||||
# api_key: my-fake-key
|
# api_key: my-fake-key
|
||||||
# api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
# api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
||||||
|
|
||||||
litellm_settings:
|
# litellm_settings:
|
||||||
success_callback: ["prometheus"]
|
# success_callback: ["prometheus"]
|
||||||
failure_callback: ["prometheus"]
|
# failure_callback: ["prometheus"]
|
||||||
service_callback: ["prometheus_system"]
|
# service_callback: ["prometheus_system"]
|
||||||
upperbound_key_generate_params:
|
# upperbound_key_generate_params:
|
||||||
max_budget: os.environ/LITELLM_UPPERBOUND_KEYS_MAX_BUDGET
|
# max_budget: os.environ/LITELLM_UPPERBOUND_KEYS_MAX_BUDGET
|
||||||
|
|
||||||
router_settings:
|
router_settings:
|
||||||
routing_strategy: usage-based-routing-v2
|
routing_strategy: usage-based-routing-v2
|
||||||
redis_url: "rediss://:073f655645b843c4839329aea8384e68@us1-great-lizard-40486.upstash.io:40486/0"
|
# redis_url: "os.environ/REDIS_URL"
|
||||||
|
redis_host: os.environ/REDIS_HOST
|
||||||
|
redis_port: os.environ/REDIS_PORT
|
||||||
|
redis_password: os.environ/REDIS_PASSWORD
|
||||||
enable_pre_call_checks: True
|
enable_pre_call_checks: True
|
||||||
|
|
||||||
|
litellm_settings:
|
||||||
|
num_retries: 3 # retry call 3 times on each model_name
|
||||||
|
allowed_fails: 3 # cooldown model if it fails > 1 call in a minute.
|
||||||
|
|
||||||
general_settings:
|
general_settings:
|
||||||
master_key: sk-1234
|
|
||||||
allow_user_auth: true
|
|
||||||
alerting: ["slack"]
|
alerting: ["slack"]
|
||||||
store_model_in_db: True // set via environment variable - os.environ["STORE_MODEL_IN_DB"] = "True"
|
alerting_threshold: 300 # sends alerts if requests hang for 5min+ and responses take 5min+
|
||||||
proxy_batch_write_at: 5 # 👈 Frequency of batch writing logs to server (in seconds)
|
proxy_batch_write_at: 60 # Frequency of batch writing logs to server (in seconds)
|
||||||
enable_jwt_auth: True
|
|
||||||
alerting: ["slack"]
|
|
||||||
litellm_jwtauth:
|
|
||||||
admin_jwt_scope: "litellm_proxy_admin"
|
|
||||||
public_key_ttl: os.environ/LITELLM_PUBLIC_KEY_TTL
|
|
||||||
user_id_jwt_field: "sub"
|
|
||||||
org_id_jwt_field: "azp"
|
|
|
@ -8,4 +8,4 @@ litellm_settings:
|
||||||
cache_params:
|
cache_params:
|
||||||
type: "redis"
|
type: "redis"
|
||||||
supported_call_types: ["embedding", "aembedding"]
|
supported_call_types: ["embedding", "aembedding"]
|
||||||
host: "localhost"
|
host: "os.environ/REDIS_HOST"
|
|
@ -362,7 +362,9 @@ def test_load_router_config():
|
||||||
] # init with all call types
|
] # init with all call types
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail("Proxy: Got exception reading config", e)
|
pytest.fail(
|
||||||
|
f"Proxy: Got exception reading config: {str(e)}\n{traceback.format_exc()}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# test_load_router_config()
|
# test_load_router_config()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue