mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Litellm UI qa 04 12 2025 p1 (#9955)
* fix(model_info_view.tsx): cleanup text * fix(key_management_endpoints.py): fix filtering litellm-dashboard keys for internal users * fix(proxy_track_cost_callback.py): prevent flooding spend logs with admin endpoint errors * test: add unit testing for logic * test(test_auth_exception_handler.py): add more unit testing * fix(router.py): correctly handle retrieving model info on get_model_group_info fixes issue where model hub was showing None prices * fix: fix linting errors
This commit is contained in:
parent
f8d52e2db9
commit
00e49380df
13 changed files with 249 additions and 80 deletions
|
@ -2767,3 +2767,24 @@ def test_router_dynamic_credentials():
|
|||
deployment = router.get_deployment(model_id=original_model_id)
|
||||
assert deployment is not None
|
||||
assert deployment.litellm_params.api_key == original_api_key
|
||||
|
||||
|
||||
def test_router_get_model_group_info():
|
||||
router = Router(
|
||||
model_list=[
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {"model": "gpt-3.5-turbo"},
|
||||
},
|
||||
{
|
||||
"model_name": "gpt-4",
|
||||
"litellm_params": {"model": "gpt-4"},
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
model_group_info = router.get_model_group_info(model_group="gpt-4")
|
||||
assert model_group_info is not None
|
||||
assert model_group_info.model_group == "gpt-4"
|
||||
assert model_group_info.input_cost_per_token > 0
|
||||
assert model_group_info.output_cost_per_token > 0
|
Loading…
Add table
Add a link
Reference in a new issue