support using */*

This commit is contained in:
Ishaan Jaff 2024-07-25 18:48:56 -07:00
parent 35203cede7
commit 9863520376
5 changed files with 18 additions and 1 deletions

View file

@ -208,7 +208,7 @@ jobs:
-e AZURE_EUROPE_API_KEY=$AZURE_EUROPE_API_KEY \ -e AZURE_EUROPE_API_KEY=$AZURE_EUROPE_API_KEY \
-e MISTRAL_API_KEY=$MISTRAL_API_KEY \ -e MISTRAL_API_KEY=$MISTRAL_API_KEY \
-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
-e ANTHROPIC_API_KEY=$ANTHROPIC_API_KEY \ -e GROQ_API_KEY=$GROQ_API_KEY \
-e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
-e AWS_REGION_NAME=$AWS_REGION_NAME \ -e AWS_REGION_NAME=$AWS_REGION_NAME \
-e AUTO_INFER_REGION=True \ -e AUTO_INFER_REGION=True \

View file

@ -8,6 +8,9 @@ model_list:
litellm_params: litellm_params:
model: fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct model: fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct
api_key: "os.environ/FIREWORKS" api_key: "os.environ/FIREWORKS"
- model_name: "*"
litellm_params:
model: "*"
general_settings: general_settings:
master_key: sk-1234 master_key: sk-1234
alerting: ["slack"] alerting: ["slack"]

View file

@ -25,6 +25,11 @@ def test_get_llm_provider():
# test_get_llm_provider() # test_get_llm_provider()
def test_get_llm_provider_catch_all():
_, response, _, _ = litellm.get_llm_provider(model="*")
assert response == "openai"
def test_get_llm_provider_gpt_instruct(): def test_get_llm_provider_gpt_instruct():
_, response, _, _ = litellm.get_llm_provider(model="gpt-3.5-turbo-instruct-0914") _, response, _, _ = litellm.get_llm_provider(model="gpt-3.5-turbo-instruct-0914")

View file

@ -4650,6 +4650,8 @@ def get_llm_provider(
custom_llm_provider = "openai" custom_llm_provider = "openai"
elif model in litellm.empower_models: elif model in litellm.empower_models:
custom_llm_provider = "empower" custom_llm_provider = "empower"
elif model == "*":
custom_llm_provider = "openai"
if custom_llm_provider is None or custom_llm_provider == "": if custom_llm_provider is None or custom_llm_provider == "":
if litellm.suppress_debug_info == False: if litellm.suppress_debug_info == False:
print() # noqa print() # noqa

View file

@ -85,6 +85,13 @@ model_list:
litellm_params: litellm_params:
model: openai/* model: openai/*
api_key: os.environ/OPENAI_API_KEY api_key: os.environ/OPENAI_API_KEY
# Pass through all llm requests to litellm.completion/litellm.embedding
# if user passes model="anthropic/claude-3-opus-20240229" proxy will make requests to anthropic claude-3-opus-20240229 using ANTHROPIC_API_KEY
- model_name: "*"
litellm_params:
model: "*"
- model_name: mistral-embed - model_name: mistral-embed
litellm_params: litellm_params:
model: mistral/mistral-embed model: mistral/mistral-embed