diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 71a356b80..f2f4e86ed 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -1,22 +1,22 @@ model_list: - model_name: openai/* litellm_params: - model: gpt-3.5-turbo + model: openai/* api_key: os.environ/OPENAI_API_KEY + model_info: + id: "good-openai" + - model_name: openai/* + litellm_params: + model: openai/* + api_key: os.environ/non-exsitent-env-var + tags: ["bad-model"] + model_info: + id: "test-openai" + + +router_settings: + enable_tag_filtering: True # 👈 Key Chang litellm_settings: success_callback: ["prometheus"] - failure_callback: ["prometheus"] - -guardrails: - - guardrail_name: "presidio-pre-guard" - litellm_params: - guardrail: presidio # supported values: "aporia", "lakera", "presidio" - mode: "pre_call" # pre_call, during_call, post_call - output_parse_pii: True - -general_settings: - master_key: sk-1234 - alerting: ["slack"] - spend_report_frequency: "1d" - + failure_callback: ["prometheus"] \ No newline at end of file diff --git a/litellm/router_strategy/tag_based_routing.py b/litellm/router_strategy/tag_based_routing.py index ed350109c..2ffec522f 100644 --- a/litellm/router_strategy/tag_based_routing.py +++ b/litellm/router_strategy/tag_based_routing.py @@ -25,14 +25,14 @@ async def get_deployments_for_tag( if request_kwargs is None: verbose_logger.debug( - "get_deployments_for_tier: request_kwargs is None returning healthy_deployments: %s", + "get_deployments_for_tag: request_kwargs is None returning healthy_deployments: %s", healthy_deployments, ) return healthy_deployments if healthy_deployments is None: verbose_logger.debug( - "get_deployments_for_tier: healthy_deployments is None returning healthy_deployments" + "get_deployments_for_tag: healthy_deployments is None returning healthy_deployments" ) return healthy_deployments