diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index d98745d0b..2ef525e9f 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -37,7 +37,7 @@ def test_completion_custom_provider_model_name(): try: litellm.cache = None response = completion( - model="together_ai/mistralai/Mistral-7B-Instruct-v0.1", + model="together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1", messages=messages, logger_fn=logger_fn, ) @@ -1369,7 +1369,7 @@ def test_customprompt_together_ai(): print(litellm.success_callback) print(litellm._async_success_callback) response = completion( - model="together_ai/mistralai/Mistral-7B-Instruct-v0.1", + model="together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1", messages=messages, roles={ "system": { diff --git a/litellm/utils.py b/litellm/utils.py index 6aba17f95..d67ee37e3 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4684,6 +4684,7 @@ def get_llm_provider( get_secret("TOGETHER_API_KEY") or get_secret("TOGETHER_AI_API_KEY") or get_secret("TOGETHERAI_API_KEY") + or get_secret("TOGETHER_AI_TOKEN") ) return model, custom_llm_provider, dynamic_api_key, api_base elif model.split("/", 1)[0] in litellm.provider_list: