diff --git a/litellm/tests/test_together_ai.py b/litellm/tests/test_together_ai.py
index ac7ea6771f..361ca8ee7f 100644
--- a/litellm/tests/test_together_ai.py
+++ b/litellm/tests/test_together_ai.py
@@ -1,64 +1,64 @@
-import sys, os
-import traceback
+# import sys, os
+# import traceback
-sys.path.insert(
- 0, os.path.abspath("../..")
-) # Adds the parent directory to the system path
-import time
-import litellm
-import openai
-import pytest
+# sys.path.insert(
+# 0, os.path.abspath("../..")
+# ) # Adds the parent directory to the system path
+# import time
+# import litellm
+# import openai
+# import pytest
-### Together AI
-import together
-together.api_key = "60c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f2f36"
+# ### Together AI
+# import together
+# together.api_key = ""
-sample_message = [
- {"role": "user", "content": "Who are you"},
- {"role": "assistant", "content": "I am your helpful assistant."},
- {"role": "user", "content": "Tell me a joke"},
-]
+# sample_message = [
+# {"role": "user", "content": "Who are you"},
+# {"role": "assistant", "content": "I am your helpful assistant."},
+# {"role": "user", "content": "Tell me a joke"},
+# ]
-def format_prompt_togetherai(messages, prompt_format, stop_words):
- start_token, end_token = prompt_format.split('{prompt}')
- prompt = ''
- for message in messages:
- role = message['role']
- message_content = message['content']
- if role == 'system':
- prompt += f"{start_token}\n<>\n{message_content}\n<>\n"
- elif role == 'user':
- prompt += f"{start_token}{message_content}{end_token}"
- else:
- prompt += f'{message_content}{stop_words[0]}'
- return prompt
+# def format_prompt_togetherai(messages, prompt_format, stop_words):
+# start_token, end_token = prompt_format.split('{prompt}')
+# prompt = ''
+# for message in messages:
+# role = message['role']
+# message_content = message['content']
+# if role == 'system':
+# prompt += f"{start_token}\n<>\n{message_content}\n<>\n"
+# elif role == 'user':
+# prompt += f"{start_token}{message_content}{end_token}"
+# else:
+# prompt += f'{message_content}{stop_words[0]}'
+# return prompt
-model = 'togethercomputer/CodeLlama-13b-Instruct'
-stop_words = list(together.Models.info(model)['config']['stop'])
-prompt_format = str(together.Models.info(model)['config']['prompt_format'])
-formatted_prompt = format_prompt_togetherai(
- messages=sample_message, prompt_format=prompt_format, stop_words=stop_words)
-for token in together.Complete.create_streaming(prompt=formatted_prompt,
- model=model, stop=stop_words, max_tokens=512):
- print(token, end="")
+# model = 'togethercomputer/CodeLlama-13b-Instruct'
+# stop_words = list(together.Models.info(model)['config']['stop'])
+# prompt_format = str(together.Models.info(model)['config']['prompt_format'])
+# formatted_prompt = format_prompt_togetherai(
+# messages=sample_message, prompt_format=prompt_format, stop_words=stop_words)
+# for token in together.Complete.create_streaming(prompt=formatted_prompt,
+# model=model, stop=stop_words, max_tokens=512):
+# print(token, end="")
-### litellm
+# ### litellm
-import os
-from litellm import completion
+# import os
+# from litellm import completion
-os.environ["TOGETHERAI_API_KEY"] = "60c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f2f36"
+# os.environ["TOGETHERAI_API_KEY"] = ""
-sample_message = [
- {"role": "user", "content": "Who are you"},
- {"role": "assistant", "content": "I am your helpful assistant."},
- {"role": "user", "content": "Tell me a joke"},
-]
+# sample_message = [
+# {"role": "user", "content": "Who are you"},
+# {"role": "assistant", "content": "I am your helpful assistant."},
+# {"role": "user", "content": "Tell me a joke"},
+# ]
-res = completion(model="together_ai/togethercomputer/CodeLlama-13b-Instruct",
- messages=sample_message, stream=False, max_tokens=1000)
+# res = completion(model="together_ai/togethercomputer/CodeLlama-13b-Instruct",
+# messages=sample_message, stream=False, max_tokens=1000)
-print(list(res))
\ No newline at end of file
+# print(list(res))
\ No newline at end of file