forked from phoenix/litellm-mirror
(fix) provider wildcard routing - when models specificed without provider prefix (#6173)
* fix wildcard routing scenario * fix pattern matching hits
This commit is contained in:
parent
b032e898c2
commit
80ecf0829c
2 changed files with 26 additions and 1 deletions
|
@ -5169,8 +5169,28 @@ class Router:
|
|||
|
||||
if model not in self.model_names:
|
||||
# check if provider/ specific wildcard routing use pattern matching
|
||||
_pattern_router_response = self.pattern_router.route(model)
|
||||
custom_llm_provider: Optional[str] = None
|
||||
try:
|
||||
(
|
||||
_,
|
||||
custom_llm_provider,
|
||||
_,
|
||||
_,
|
||||
) = litellm.get_llm_provider(model=model)
|
||||
except Exception:
|
||||
# get_llm_provider raises exception when provider is unknown
|
||||
pass
|
||||
|
||||
"""
|
||||
self.pattern_router.route(model):
|
||||
does exact pattern matching. Example openai/gpt-3.5-turbo gets routed to pattern openai/*
|
||||
|
||||
self.pattern_router.route(f"{custom_llm_provider}/{model}"):
|
||||
does pattern matching using litellm.get_llm_provider(), example claude-3-5-sonnet-20240620 gets routed to anthropic/* since 'claude-3-5-sonnet-20240620' is an Anthropic Model
|
||||
"""
|
||||
_pattern_router_response = self.pattern_router.route(
|
||||
model
|
||||
) or self.pattern_router.route(f"{custom_llm_provider}/{model}")
|
||||
if _pattern_router_response is not None:
|
||||
provider_deployments = []
|
||||
for deployment in _pattern_router_response:
|
||||
|
|
|
@ -124,6 +124,11 @@ async def test_router_provider_wildcard_routing():
|
|||
|
||||
print("response 3 = ", response3)
|
||||
|
||||
response4 = await router.acompletion(
|
||||
model="claude-3-5-sonnet-20240620",
|
||||
messages=[{"role": "user", "content": "hello"}],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio()
|
||||
async def test_router_provider_wildcard_routing_regex():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue