From 986352037645d688e5e1c78dc3958ee459a24fae Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 25 Jul 2024 18:48:56 -0700 Subject: [PATCH] support using */* --- .circleci/config.yml | 2 +- litellm/proxy/proxy_config.yaml | 3 +++ litellm/tests/test_get_llm_provider.py | 5 +++++ litellm/utils.py | 2 ++ proxy_server_config.yaml | 7 +++++++ 5 files changed, 18 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a9a5be671..a29b76110 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -208,7 +208,7 @@ jobs: -e AZURE_EUROPE_API_KEY=$AZURE_EUROPE_API_KEY \ -e MISTRAL_API_KEY=$MISTRAL_API_KEY \ -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ - -e ANTHROPIC_API_KEY=$ANTHROPIC_API_KEY \ + -e GROQ_API_KEY=$GROQ_API_KEY \ -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ -e AWS_REGION_NAME=$AWS_REGION_NAME \ -e AUTO_INFER_REGION=True \ diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 0e3f0826e..9d913b458 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -8,6 +8,9 @@ model_list: litellm_params: model: fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct api_key: "os.environ/FIREWORKS" + - model_name: "*" + litellm_params: + model: "*" general_settings: master_key: sk-1234 alerting: ["slack"] diff --git a/litellm/tests/test_get_llm_provider.py b/litellm/tests/test_get_llm_provider.py index 3ec867af4..6f53b0f8f 100644 --- a/litellm/tests/test_get_llm_provider.py +++ b/litellm/tests/test_get_llm_provider.py @@ -25,6 +25,11 @@ def test_get_llm_provider(): # test_get_llm_provider() +def test_get_llm_provider_catch_all(): + _, response, _, _ = litellm.get_llm_provider(model="*") + assert response == "openai" + + def test_get_llm_provider_gpt_instruct(): _, response, _, _ = litellm.get_llm_provider(model="gpt-3.5-turbo-instruct-0914") diff --git a/litellm/utils.py b/litellm/utils.py index e104de958..cceed6b9d 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4650,6 +4650,8 @@ def get_llm_provider( custom_llm_provider = "openai" elif model in litellm.empower_models: custom_llm_provider = "empower" + elif model == "*": + custom_llm_provider = "openai" if custom_llm_provider is None or custom_llm_provider == "": if litellm.suppress_debug_info == False: print() # noqa diff --git a/proxy_server_config.yaml b/proxy_server_config.yaml index 5ee7192c8..f7766b65b 100644 --- a/proxy_server_config.yaml +++ b/proxy_server_config.yaml @@ -85,6 +85,13 @@ model_list: litellm_params: model: openai/* api_key: os.environ/OPENAI_API_KEY + + # Pass through all llm requests to litellm.completion/litellm.embedding + # if user passes model="anthropic/claude-3-opus-20240229" proxy will make requests to anthropic claude-3-opus-20240229 using ANTHROPIC_API_KEY + - model_name: "*" + litellm_params: + model: "*" + - model_name: mistral-embed litellm_params: model: mistral/mistral-embed