mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
test: fix testing
This commit is contained in:
parent
ac7d0a1632
commit
d3e179e5ad
1 changed files with 50 additions and 50 deletions
|
@ -1,64 +1,64 @@
|
|||
import sys, os
|
||||
import traceback
|
||||
# import sys, os
|
||||
# import traceback
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
import time
|
||||
import litellm
|
||||
import openai
|
||||
import pytest
|
||||
# sys.path.insert(
|
||||
# 0, os.path.abspath("../..")
|
||||
# ) # Adds the parent directory to the system path
|
||||
# import time
|
||||
# import litellm
|
||||
# import openai
|
||||
# import pytest
|
||||
|
||||
### Together AI
|
||||
import together
|
||||
together.api_key = "60c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f2f36"
|
||||
# ### Together AI
|
||||
# import together
|
||||
# together.api_key = ""
|
||||
|
||||
sample_message = [
|
||||
{"role": "user", "content": "Who are you"},
|
||||
{"role": "assistant", "content": "I am your helpful assistant."},
|
||||
{"role": "user", "content": "Tell me a joke"},
|
||||
]
|
||||
# sample_message = [
|
||||
# {"role": "user", "content": "Who are you"},
|
||||
# {"role": "assistant", "content": "I am your helpful assistant."},
|
||||
# {"role": "user", "content": "Tell me a joke"},
|
||||
# ]
|
||||
|
||||
|
||||
def format_prompt_togetherai(messages, prompt_format, stop_words):
|
||||
start_token, end_token = prompt_format.split('{prompt}')
|
||||
prompt = ''
|
||||
for message in messages:
|
||||
role = message['role']
|
||||
message_content = message['content']
|
||||
if role == 'system':
|
||||
prompt += f"{start_token}\n<<SYS>>\n{message_content}\n<</SYS>>\n"
|
||||
elif role == 'user':
|
||||
prompt += f"{start_token}{message_content}{end_token}"
|
||||
else:
|
||||
prompt += f'{message_content}{stop_words[0]}'
|
||||
return prompt
|
||||
# def format_prompt_togetherai(messages, prompt_format, stop_words):
|
||||
# start_token, end_token = prompt_format.split('{prompt}')
|
||||
# prompt = ''
|
||||
# for message in messages:
|
||||
# role = message['role']
|
||||
# message_content = message['content']
|
||||
# if role == 'system':
|
||||
# prompt += f"{start_token}\n<<SYS>>\n{message_content}\n<</SYS>>\n"
|
||||
# elif role == 'user':
|
||||
# prompt += f"{start_token}{message_content}{end_token}"
|
||||
# else:
|
||||
# prompt += f'{message_content}{stop_words[0]}'
|
||||
# return prompt
|
||||
|
||||
|
||||
model = 'togethercomputer/CodeLlama-13b-Instruct'
|
||||
stop_words = list(together.Models.info(model)['config']['stop'])
|
||||
prompt_format = str(together.Models.info(model)['config']['prompt_format'])
|
||||
formatted_prompt = format_prompt_togetherai(
|
||||
messages=sample_message, prompt_format=prompt_format, stop_words=stop_words)
|
||||
for token in together.Complete.create_streaming(prompt=formatted_prompt,
|
||||
model=model, stop=stop_words, max_tokens=512):
|
||||
print(token, end="")
|
||||
# model = 'togethercomputer/CodeLlama-13b-Instruct'
|
||||
# stop_words = list(together.Models.info(model)['config']['stop'])
|
||||
# prompt_format = str(together.Models.info(model)['config']['prompt_format'])
|
||||
# formatted_prompt = format_prompt_togetherai(
|
||||
# messages=sample_message, prompt_format=prompt_format, stop_words=stop_words)
|
||||
# for token in together.Complete.create_streaming(prompt=formatted_prompt,
|
||||
# model=model, stop=stop_words, max_tokens=512):
|
||||
# print(token, end="")
|
||||
|
||||
|
||||
### litellm
|
||||
# ### litellm
|
||||
|
||||
import os
|
||||
from litellm import completion
|
||||
# import os
|
||||
# from litellm import completion
|
||||
|
||||
os.environ["TOGETHERAI_API_KEY"] = "60c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f2f36"
|
||||
# os.environ["TOGETHERAI_API_KEY"] = ""
|
||||
|
||||
sample_message = [
|
||||
{"role": "user", "content": "Who are you"},
|
||||
{"role": "assistant", "content": "I am your helpful assistant."},
|
||||
{"role": "user", "content": "Tell me a joke"},
|
||||
]
|
||||
# sample_message = [
|
||||
# {"role": "user", "content": "Who are you"},
|
||||
# {"role": "assistant", "content": "I am your helpful assistant."},
|
||||
# {"role": "user", "content": "Tell me a joke"},
|
||||
# ]
|
||||
|
||||
res = completion(model="together_ai/togethercomputer/CodeLlama-13b-Instruct",
|
||||
messages=sample_message, stream=False, max_tokens=1000)
|
||||
# res = completion(model="together_ai/togethercomputer/CodeLlama-13b-Instruct",
|
||||
# messages=sample_message, stream=False, max_tokens=1000)
|
||||
|
||||
print(list(res))
|
||||
# print(list(res))
|
Loading…
Add table
Add a link
Reference in a new issue