mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
LiteLLM Minor Fixes & Improvements (10/23/2024) (#6407)
* docs(bedrock.md): clarify bedrock auth in litellm docs * fix(convert_dict_to_response.py): Fixes https://github.com/BerriAI/litellm/issues/6387 * feat(pattern_match_deployments.py): more robust handling for wildcard routes (model_name: custom_route/* -> openai/*) Enables user to expose custom routes to users with dynamic handling * test: add more testing * docs(custom_pricing.md): add debug tutorial for custom pricing * test: skip codestral test - unreachable backend * test: fix test * fix(pattern_matching_deployments.py): fix typing * test: cleanup codestral tests - backend api unavailable * (refactor) prometheus async_log_success_event to be under 100 LOC (#6416) * unit testig for prometheus * unit testing for success metrics * use 1 helper for _increment_token_metrics * use helper for _increment_remaining_budget_metrics * use _increment_remaining_budget_metrics * use _increment_top_level_request_and_spend_metrics * use helper for _set_latency_metrics * remove noqa violation * fix test prometheus * test prometheus * unit testing for all prometheus helper functions * fix prom unit tests * fix unit tests prometheus * fix unit test prom * (refactor) router - use static methods for client init utils (#6420) * use InitalizeOpenAISDKClient * use InitalizeOpenAISDKClient static method * fix # noqa: PLR0915 * (code cleanup) remove unused and undocumented logging integrations - litedebugger, berrispend (#6406) * code cleanup remove unused and undocumented code files * fix unused logging integrations cleanup * bump: version 1.50.3 → 1.50.4 --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
This commit is contained in:
parent
c04c4a82f1
commit
1cd1d23fdf
9 changed files with 235 additions and 38 deletions
|
@ -914,3 +914,72 @@ def test_replace_model_in_jsonl(model_list):
|
|||
router = Router(model_list=model_list)
|
||||
deployments = router.pattern_router.get_deployments_by_pattern(model="claude-3")
|
||||
assert deployments is not None
|
||||
|
||||
|
||||
# def test_pattern_match_deployments(model_list):
|
||||
# from litellm.router_utils.pattern_match_deployments import PatternMatchRouter
|
||||
# import re
|
||||
|
||||
# patter_router = PatternMatchRouter()
|
||||
|
||||
# request = "fo::hi::static::hello"
|
||||
# model_name = "fo::*:static::*"
|
||||
|
||||
# model_name_regex = patter_router._pattern_to_regex(model_name)
|
||||
|
||||
# # Match against the request
|
||||
# match = re.match(model_name_regex, request)
|
||||
|
||||
# print(f"match: {match}")
|
||||
# print(f"match.end: {match.end()}")
|
||||
# if match is None:
|
||||
# raise ValueError("Match not found")
|
||||
# updated_model = patter_router.set_deployment_model_name(
|
||||
# matched_pattern=match, litellm_deployment_litellm_model="openai/*"
|
||||
# )
|
||||
# assert updated_model == "openai/fo::hi:static::hello"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"user_request_model, model_name, litellm_model, expected_model",
|
||||
[
|
||||
("llmengine/foo", "llmengine/*", "openai/foo", "openai/foo"),
|
||||
("llmengine/foo", "llmengine/*", "openai/*", "openai/foo"),
|
||||
(
|
||||
"fo::hi::static::hello",
|
||||
"fo::*::static::*",
|
||||
"openai/fo::*:static::*",
|
||||
"openai/fo::hi:static::hello",
|
||||
),
|
||||
(
|
||||
"fo::hi::static::hello",
|
||||
"fo::*::static::*",
|
||||
"openai/gpt-3.5-turbo",
|
||||
"openai/gpt-3.5-turbo",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_pattern_match_deployment_set_model_name(
|
||||
user_request_model, model_name, litellm_model, expected_model
|
||||
):
|
||||
from re import Match
|
||||
from litellm.router_utils.pattern_match_deployments import PatternMatchRouter
|
||||
|
||||
pattern_router = PatternMatchRouter()
|
||||
|
||||
import re
|
||||
|
||||
# Convert model_name into a proper regex
|
||||
model_name_regex = pattern_router._pattern_to_regex(model_name)
|
||||
|
||||
# Match against the request
|
||||
match = re.match(model_name_regex, user_request_model)
|
||||
|
||||
if match is None:
|
||||
raise ValueError("Match not found")
|
||||
|
||||
# Call the set_deployment_model_name function
|
||||
updated_model = pattern_router.set_deployment_model_name(match, litellm_model)
|
||||
|
||||
print(updated_model) # Expected output: "openai/fo::hi:static::hello"
|
||||
assert updated_model == expected_model
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue