test: fix testing

This commit is contained in:
Krrish Dholakia 2023-12-06 19:35:12 -08:00
parent ac7d0a1632
commit d3e179e5ad

View file

@ -1,64 +1,64 @@
import sys, os # import sys, os
import traceback # import traceback
sys.path.insert( # sys.path.insert(
0, os.path.abspath("../..") # 0, os.path.abspath("../..")
) # Adds the parent directory to the system path # ) # Adds the parent directory to the system path
import time # import time
import litellm # import litellm
import openai # import openai
import pytest # import pytest
### Together AI # ### Together AI
import together # import together
together.api_key = "60c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f2f36" # together.api_key = ""
sample_message = [ # sample_message = [
{"role": "user", "content": "Who are you"}, # {"role": "user", "content": "Who are you"},
{"role": "assistant", "content": "I am your helpful assistant."}, # {"role": "assistant", "content": "I am your helpful assistant."},
{"role": "user", "content": "Tell me a joke"}, # {"role": "user", "content": "Tell me a joke"},
] # ]
def format_prompt_togetherai(messages, prompt_format, stop_words): # def format_prompt_togetherai(messages, prompt_format, stop_words):
start_token, end_token = prompt_format.split('{prompt}') # start_token, end_token = prompt_format.split('{prompt}')
prompt = '' # prompt = ''
for message in messages: # for message in messages:
role = message['role'] # role = message['role']
message_content = message['content'] # message_content = message['content']
if role == 'system': # if role == 'system':
prompt += f"{start_token}\n<<SYS>>\n{message_content}\n<</SYS>>\n" # prompt += f"{start_token}\n<<SYS>>\n{message_content}\n<</SYS>>\n"
elif role == 'user': # elif role == 'user':
prompt += f"{start_token}{message_content}{end_token}" # prompt += f"{start_token}{message_content}{end_token}"
else: # else:
prompt += f'{message_content}{stop_words[0]}' # prompt += f'{message_content}{stop_words[0]}'
return prompt # return prompt
model = 'togethercomputer/CodeLlama-13b-Instruct' # model = 'togethercomputer/CodeLlama-13b-Instruct'
stop_words = list(together.Models.info(model)['config']['stop']) # stop_words = list(together.Models.info(model)['config']['stop'])
prompt_format = str(together.Models.info(model)['config']['prompt_format']) # prompt_format = str(together.Models.info(model)['config']['prompt_format'])
formatted_prompt = format_prompt_togetherai( # formatted_prompt = format_prompt_togetherai(
messages=sample_message, prompt_format=prompt_format, stop_words=stop_words) # messages=sample_message, prompt_format=prompt_format, stop_words=stop_words)
for token in together.Complete.create_streaming(prompt=formatted_prompt, # for token in together.Complete.create_streaming(prompt=formatted_prompt,
model=model, stop=stop_words, max_tokens=512): # model=model, stop=stop_words, max_tokens=512):
print(token, end="") # print(token, end="")
### litellm # ### litellm
import os # import os
from litellm import completion # from litellm import completion
os.environ["TOGETHERAI_API_KEY"] = "60c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f2f36" # os.environ["TOGETHERAI_API_KEY"] = ""
sample_message = [ # sample_message = [
{"role": "user", "content": "Who are you"}, # {"role": "user", "content": "Who are you"},
{"role": "assistant", "content": "I am your helpful assistant."}, # {"role": "assistant", "content": "I am your helpful assistant."},
{"role": "user", "content": "Tell me a joke"}, # {"role": "user", "content": "Tell me a joke"},
] # ]
res = completion(model="together_ai/togethercomputer/CodeLlama-13b-Instruct", # res = completion(model="together_ai/togethercomputer/CodeLlama-13b-Instruct",
messages=sample_message, stream=False, max_tokens=1000) # messages=sample_message, stream=False, max_tokens=1000)
print(list(res)) # print(list(res))