mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
LiteLLM Minor Fixes and Improvements (09/13/2024) (#5689)
* refactor: cleanup unused variables + fix pyright errors * feat(health_check.py): Closes https://github.com/BerriAI/litellm/issues/5686 * fix(o1_reasoning.py): add stricter check for o-1 reasoning model * refactor(mistral/): make it easier to see mistral transformation logic * fix(openai.py): fix openai o-1 model param mapping Fixes https://github.com/BerriAI/litellm/issues/5685 * feat(main.py): infer finetuned gemini model from base model Fixes https://github.com/BerriAI/litellm/issues/5678 * docs(vertex.md): update docs to call finetuned gemini models * feat(proxy_server.py): allow admin to hide proxy model aliases Closes https://github.com/BerriAI/litellm/issues/5692 * docs(load_balancing.md): add docs on hiding alias models from proxy config * fix(base.py): don't raise notimplemented error * fix(user_api_key_auth.py): fix model max budget check * fix(router.py): fix elif * fix(user_api_key_auth.py): don't set team_id to empty str * fix(team_endpoints.py): fix response type * test(test_completion.py): handle predibase error * test(test_proxy_server.py): fix test * fix(o1_transformation.py): fix max_completion_token mapping * test(test_image_generation.py): mark flaky test
This commit is contained in:
parent
60c5d3ebec
commit
713d762411
35 changed files with 1020 additions and 539 deletions
|
@ -2481,3 +2481,31 @@ async def test_router_batch_endpoints(provider):
|
|||
model="my-custom-name", custom_llm_provider=provider, limit=2
|
||||
)
|
||||
print("list_batches=", list_batches)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("hidden", [True, False])
|
||||
def test_model_group_alias(hidden):
|
||||
_model_list = [
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {"model": "gpt-3.5-turbo"},
|
||||
},
|
||||
{"model_name": "gpt-4", "litellm_params": {"model": "gpt-4"}},
|
||||
]
|
||||
router = Router(
|
||||
model_list=_model_list,
|
||||
model_group_alias={
|
||||
"gpt-4.5-turbo": {"model": "gpt-3.5-turbo", "hidden": hidden}
|
||||
},
|
||||
)
|
||||
|
||||
models = router.get_model_list()
|
||||
|
||||
model_names = router.get_model_names()
|
||||
|
||||
if hidden:
|
||||
assert len(models) == len(_model_list)
|
||||
assert len(model_names) == len(_model_list)
|
||||
else:
|
||||
assert len(models) == len(_model_list) + 1
|
||||
assert len(model_names) == len(_model_list) + 1
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue