diff --git a/litellm/tests/test_lowest_cost_routing.py b/litellm/tests/test_lowest_cost_routing.py index 217b4a970..a793ba0a2 100644 --- a/litellm/tests/test_lowest_cost_routing.py +++ b/litellm/tests/test_lowest_cost_routing.py @@ -102,18 +102,18 @@ async def test_get_available_deployments_custom_price(): @pytest.mark.asyncio async def test_lowest_cost_routing(): """ - Test if router returns model with the lowest cost + Test if router, returns model with the lowest cost """ model_list = [ { - "model_name": "gpt-3.5-turbo", + "model_name": "gpt-4", "litellm_params": {"model": "gpt-4"}, "model_info": {"id": "openai-gpt-4"}, }, { "model_name": "gpt-3.5-turbo", - "litellm_params": {"model": "groq/llama3-8b-8192"}, - "model_info": {"id": "groq-llama"}, + "litellm_params": {"model": "gpt-3.5-turbo"}, + "model_info": {"id": "gpt-3.5-turbo"}, }, ] @@ -127,7 +127,7 @@ async def test_lowest_cost_routing(): print( response._hidden_params["model_id"] ) # expect groq-llama, since groq/llama has lowest cost - assert "groq-llama" == response._hidden_params["model_id"] + assert "gpt-3.5-turbo" == response._hidden_params["model_id"] async def _deploy(lowest_cost_logger, deployment_id, tokens_used, duration):