mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(cost_calculator.py): handle custom pricing at deployment level fo… (#9855)
* fix(cost_calculator.py): handle custom pricing at deployment level for router * test: add unit tests * fix(router.py): show custom pricing on UI check correct model str * fix: fix linting error * docs(custom_pricing.md): clarify custom pricing for proxy Fixes https://github.com/BerriAI/litellm/issues/8573#issuecomment-2790420740 * test: update code qa test * fix: cleanup traceback * fix: handle litellm param custom pricing * test: update test * fix(cost_calculator.py): add router model id to list of potential model names * fix(cost_calculator.py): fix router model id check * fix: router.py - maintain older model registry approach * fix: fix ruff check * fix(router.py): router get deployment info add custom values to mapped dict * test: update test * fix(utils.py): update only if value is non-null * test: add unit test
This commit is contained in:
parent
0c5b4aa96d
commit
0dbd663877
16 changed files with 193 additions and 37 deletions
|
@ -451,3 +451,11 @@ def test_router_get_deployment_credentials():
|
|||
credentials = router.get_deployment_credentials(model_id="1")
|
||||
assert credentials is not None
|
||||
assert credentials["api_key"] == "123"
|
||||
|
||||
|
||||
def test_router_get_deployment_model_info():
|
||||
router = Router(
|
||||
model_list=[{"model_name": "gemini/*", "litellm_params": {"model": "gemini/*"}, "model_info": {"id": "1"}}]
|
||||
)
|
||||
model_info = router.get_deployment_model_info(model_id="1", model_name="gemini/gemini-1.5-flash")
|
||||
assert model_info is not None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue