diff --git a/tests/local_testing/test_text_completion.py b/tests/local_testing/test_text_completion.py index 55d6e229a3..697c3fc710 100644 --- a/tests/local_testing/test_text_completion.py +++ b/tests/local_testing/test_text_completion.py @@ -4116,12 +4116,11 @@ async def test_async_text_completion_chat_model_stream(): @pytest.mark.asyncio async def test_completion_codestral_fim_api(model): try: - if model == "vertex_ai/codestral@2405": - from test_amazing_vertex_completion import ( - load_vertex_ai_credentials, - ) + from test_amazing_vertex_completion import ( + load_vertex_ai_credentials, + ) - load_vertex_ai_credentials() + load_vertex_ai_credentials() litellm.set_verbose = True import logging @@ -4157,17 +4156,16 @@ async def test_completion_codestral_fim_api(model): @pytest.mark.parametrize( "model", - ["vertex_ai/codestral@2405"], + ["vertex_ai/mistral-nemo@2407"], ) @pytest.mark.asyncio async def test_completion_codestral_fim_api_stream(model): try: - if model == "vertex_ai/codestral@2405": - from test_amazing_vertex_completion import ( - load_vertex_ai_credentials, - ) + from test_amazing_vertex_completion import ( + load_vertex_ai_credentials, + ) - load_vertex_ai_credentials() + load_vertex_ai_credentials() import logging from litellm._logging import verbose_logger