add mistral prompt templating

This commit is contained in:
Krrish Dholakia 2023-09-29 21:41:19 -07:00
parent 4eddf23a37
commit 6ac9470203
6 changed files with 85 additions and 43 deletions

View file

@ -314,33 +314,58 @@ def test_completion_cohere_stream_bad_key():
# test_completion_nlp_cloud_bad_key()
# def test_completion_hf_stream():
# try:
# messages = [
# {
# "content": "Hello! How are you today?",
# "role": "user"
# },
# ]
# response = completion(
# model="huggingface/meta-llama/Llama-2-7b-chat-hf", messages=messages, api_base="https://a8l9e3ucxinyl3oj.us-east-1.aws.endpoints.huggingface.cloud", stream=True, max_tokens=1000
# )
# complete_response = ""
# # Add any assertions here to check the response
# for idx, chunk in enumerate(response):
# chunk, finished = streaming_format_tests(idx, chunk)
# if finished:
# break
# complete_response += chunk
# if complete_response.strip() == "":
# raise Exception("Empty response received")
# print(f"completion_response: {complete_response}")
# except InvalidRequestError as e:
# pass
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
def test_completion_hf_stream():
try:
litellm.set_verbose = True
# messages = [
# {
# "content": "Hello! How are you today?",
# "role": "user"
# },
# ]
# response = completion(
# model="huggingface/mistralai/Mistral-7B-Instruct-v0.1", messages=messages, api_base="https://n9ox93a8sv5ihsow.us-east-1.aws.endpoints.huggingface.cloud", stream=True, max_tokens=1000
# )
# complete_response = ""
# # Add any assertions here to check the response
# for idx, chunk in enumerate(response):
# chunk, finished = streaming_format_tests(idx, chunk)
# if finished:
# break
# complete_response += chunk
# if complete_response.strip() == "":
# raise Exception("Empty response received")
# completion_response_1 = complete_response
messages = [
{
"content": "Hello! How are you today?",
"role": "user"
},
{
"content": "I'm doing well, thank you for asking! I'm excited to be here and help you with any questions or concerns you may have. What can I assist you with today?</s>",
"role": "assistant"
},
]
response = completion(
model="huggingface/mistralai/Mistral-7B-Instruct-v0.1", messages=messages, api_base="https://n9ox93a8sv5ihsow.us-east-1.aws.endpoints.huggingface.cloud", stream=True, max_tokens=1000
)
complete_response = ""
# Add any assertions here to check the response
for idx, chunk in enumerate(response):
chunk, finished = streaming_format_tests(idx, chunk)
if finished:
break
complete_response += chunk
if complete_response.strip() == "":
raise Exception("Empty response received")
# print(f"completion_response_1: {completion_response_1}")
print(f"completion_response: {complete_response}")
except InvalidRequestError as e:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# # test_completion_hf_stream()
test_completion_hf_stream()
# def test_completion_hf_stream_bad_key():
# try:
@ -680,7 +705,7 @@ def test_completion_sagemaker_stream():
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_completion_sagemaker_stream()
# test_completion_sagemaker_stream()
# test on openai completion call
def test_openai_text_completion_call():