diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 54640b54b..bb2145ed2 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -37,7 +37,7 @@ def test_completion_custom_provider_model_name(): try: litellm.cache = None response = completion( - model="together_ai/mistralai/Mistral-7B-Instruct-v0.1", + model="together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1", messages=messages, logger_fn=logger_fn, ) @@ -1369,7 +1369,7 @@ def test_customprompt_together_ai(): print(litellm.success_callback) print(litellm._async_success_callback) response = completion( - model="together_ai/mistralai/Mistral-7B-Instruct-v0.1", + model="together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1", messages=messages, roles={ "system": { @@ -1998,7 +1998,7 @@ def test_completion_together_ai_stream(): messages = [{"content": user_message, "role": "user"}] try: response = completion( - model="together_ai/mistralai/Mistral-7B-Instruct-v0.1", + model="together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1", messages=messages, stream=True, max_tokens=5, diff --git a/litellm/tests/test_multiple_deployments.py b/litellm/tests/test_multiple_deployments.py index 0a79cb518..1c34cc574 100644 --- a/litellm/tests/test_multiple_deployments.py +++ b/litellm/tests/test_multiple_deployments.py @@ -25,7 +25,7 @@ model_list = [ { "model_name": "mistral-7b-instruct", "litellm_params": { # params for litellm completion/embedding call - "model": "together_ai/mistralai/Mistral-7B-Instruct-v0.1", + "model": "together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1", "api_key": os.getenv("TOGETHERAI_API_KEY"), }, },