mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
test: update testing - having removed the router client init logic
this allows a user to just set the credential value in litellm params, and not have to worry about settin g credentials
This commit is contained in:
parent
a87f822c50
commit
d47707e409
2 changed files with 19 additions and 31 deletions
|
@ -182,25 +182,25 @@ def test_router_init_azure_service_principal_with_secret_with_environment_variab
|
||||||
# initialize the router
|
# initialize the router
|
||||||
router = Router(model_list=model_list)
|
router = Router(model_list=model_list)
|
||||||
|
|
||||||
# first check if environment variables were used at all
|
# # first check if environment variables were used at all
|
||||||
mocked_environ.assert_called()
|
# mocked_environ.assert_called()
|
||||||
# then check if the client was initialized with the correct environment variables
|
# # then check if the client was initialized with the correct environment variables
|
||||||
mocked_credential.assert_called_with(
|
# mocked_credential.assert_called_with(
|
||||||
**{
|
# **{
|
||||||
"client_id": environment_variables_expected_to_use["AZURE_CLIENT_ID"],
|
# "client_id": environment_variables_expected_to_use["AZURE_CLIENT_ID"],
|
||||||
"client_secret": environment_variables_expected_to_use[
|
# "client_secret": environment_variables_expected_to_use[
|
||||||
"AZURE_CLIENT_SECRET"
|
# "AZURE_CLIENT_SECRET"
|
||||||
],
|
# ],
|
||||||
"tenant_id": environment_variables_expected_to_use["AZURE_TENANT_ID"],
|
# "tenant_id": environment_variables_expected_to_use["AZURE_TENANT_ID"],
|
||||||
}
|
# }
|
||||||
)
|
# )
|
||||||
# check if the token provider was called at all
|
# # check if the token provider was called at all
|
||||||
mocked_get_bearer_token_provider.assert_called()
|
# mocked_get_bearer_token_provider.assert_called()
|
||||||
# then check if the token provider was initialized with the mocked credential
|
# # then check if the token provider was initialized with the mocked credential
|
||||||
for call_args in mocked_get_bearer_token_provider.call_args_list:
|
# for call_args in mocked_get_bearer_token_provider.call_args_list:
|
||||||
assert call_args.args[0] == mocked_credential.return_value
|
# assert call_args.args[0] == mocked_credential.return_value
|
||||||
# however, at this point token should not be fetched yet
|
# # however, at this point token should not be fetched yet
|
||||||
mocked_func_generating_token.assert_not_called()
|
# mocked_func_generating_token.assert_not_called()
|
||||||
|
|
||||||
# now let's try to make a completion call
|
# now let's try to make a completion call
|
||||||
deployment = model_list[0]
|
deployment = model_list[0]
|
||||||
|
|
|
@ -338,18 +338,6 @@ def test_update_kwargs_with_default_litellm_params(model_list):
|
||||||
assert kwargs["metadata"]["key2"] == "value2"
|
assert kwargs["metadata"]["key2"] == "value2"
|
||||||
|
|
||||||
|
|
||||||
def test_get_async_openai_model_client(model_list):
|
|
||||||
"""Test if the '_get_async_openai_model_client' function is working correctly"""
|
|
||||||
router = Router(model_list=model_list)
|
|
||||||
deployment = router.get_deployment_by_model_group_name(
|
|
||||||
model_group_name="gpt-3.5-turbo"
|
|
||||||
)
|
|
||||||
model_client = router._get_async_openai_model_client(
|
|
||||||
deployment=deployment, kwargs={}
|
|
||||||
)
|
|
||||||
assert model_client is not None
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_timeout(model_list):
|
def test_get_timeout(model_list):
|
||||||
"""Test if the '_get_timeout' function is working correctly"""
|
"""Test if the '_get_timeout' function is working correctly"""
|
||||||
router = Router(model_list=model_list)
|
router = Router(model_list=model_list)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue