forked from phoenix/litellm-mirror
(test) remove unused tests
This commit is contained in:
parent
b5968a3ed8
commit
80002e9a14
1 changed files with 0 additions and 145 deletions
|
@ -204,151 +204,6 @@ def test_completion_cohere_stream_bad_key():
|
|||
|
||||
# test_completion_cohere_stream_bad_key()
|
||||
|
||||
# def test_completion_nlp_cloud():
|
||||
# try:
|
||||
# messages = [
|
||||
# {"role": "system", "content": "You are a helpful assistant."},
|
||||
# {
|
||||
# "role": "user",
|
||||
# "content": "how does a court case get to the Supreme Court?",
|
||||
# },
|
||||
# ]
|
||||
# response = completion(model="dolphin", messages=messages, stream=True)
|
||||
# complete_response = ""
|
||||
# # Add any assertions here to check the response
|
||||
# has_finish_reason = False
|
||||
# for idx, chunk in enumerate(response):
|
||||
# chunk, finished = streaming_format_tests(idx, chunk)
|
||||
# has_finish_reason = finished
|
||||
# complete_response += chunk
|
||||
# if finished:
|
||||
# break
|
||||
# if has_finish_reason is False:
|
||||
# raise Exception("Finish reason not in final chunk")
|
||||
# if complete_response.strip() == "":
|
||||
# raise Exception("Empty response received")
|
||||
# print(f"completion_response: {complete_response}")
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# test_completion_nlp_cloud()
|
||||
|
||||
# def test_completion_nlp_cloud_bad_key():
|
||||
# try:
|
||||
# api_key = "bad-key"
|
||||
# messages = [
|
||||
# {"role": "system", "content": "You are a helpful assistant."},
|
||||
# {
|
||||
# "role": "user",
|
||||
# "content": "how does a court case get to the Supreme Court?",
|
||||
# },
|
||||
# ]
|
||||
# response = completion(model="dolphin", messages=messages, stream=True, api_key=api_key)
|
||||
# complete_response = ""
|
||||
# # Add any assertions here to check the response
|
||||
# has_finish_reason = False
|
||||
# for idx, chunk in enumerate(response):
|
||||
# chunk, finished = streaming_format_tests(idx, chunk)
|
||||
# has_finish_reason = finished
|
||||
# complete_response += chunk
|
||||
# if finished:
|
||||
# break
|
||||
# if has_finish_reason is False:
|
||||
# raise Exception("Finish reason not in final chunk")
|
||||
# if complete_response.strip() == "":
|
||||
# raise Exception("Empty response received")
|
||||
# print(f"completion_response: {complete_response}")
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# test_completion_nlp_cloud_bad_key()
|
||||
|
||||
# def test_completion_hf_stream():
|
||||
# try:
|
||||
# litellm.set_verbose = True
|
||||
# # messages = [
|
||||
# # {
|
||||
# # "content": "Hello! How are you today?",
|
||||
# # "role": "user"
|
||||
# # },
|
||||
# # ]
|
||||
# # response = completion(
|
||||
# # model="huggingface/mistralai/Mistral-7B-Instruct-v0.1", messages=messages, api_base="https://n9ox93a8sv5ihsow.us-east-1.aws.endpoints.huggingface.cloud", stream=True, max_tokens=1000
|
||||
# # )
|
||||
# # complete_response = ""
|
||||
# # # Add any assertions here to check the response
|
||||
# # for idx, chunk in enumerate(response):
|
||||
# # chunk, finished = streaming_format_tests(idx, chunk)
|
||||
# # if finished:
|
||||
# # break
|
||||
# # complete_response += chunk
|
||||
# # if complete_response.strip() == "":
|
||||
# # raise Exception("Empty response received")
|
||||
# # completion_response_1 = complete_response
|
||||
# messages = [
|
||||
# {
|
||||
# "content": "Hello! How are you today?",
|
||||
# "role": "user"
|
||||
# },
|
||||
# {
|
||||
# "content": "I'm doing well, thank you for asking! I'm excited to be here and help you with any questions or concerns you may have. What can I assist you with today?",
|
||||
# "role": "assistant"
|
||||
# },
|
||||
# {
|
||||
# "content": "What is the price of crude oil?",
|
||||
# "role": "user"
|
||||
# },
|
||||
# ]
|
||||
# response = completion(
|
||||
# model="huggingface/mistralai/Mistral-7B-Instruct-v0.1", messages=messages, api_base="https://n9ox93a8sv5ihsow.us-east-1.aws.endpoints.huggingface.cloud", stream=True, max_tokens=1000, n=1
|
||||
# )
|
||||
# complete_response = ""
|
||||
# # Add any assertions here to check the response
|
||||
# for idx, chunk in enumerate(response):
|
||||
# chunk, finished = streaming_format_tests(idx, chunk)
|
||||
# if finished:
|
||||
# break
|
||||
# complete_response += chunk
|
||||
# if complete_response.strip() == "":
|
||||
# raise Exception("Empty response received")
|
||||
# # print(f"completion_response_1: {completion_response_1}")
|
||||
# print(f"completion_response: {complete_response}")
|
||||
# except InvalidRequestError as e:
|
||||
# pass
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# test_completion_hf_stream()
|
||||
|
||||
# def test_completion_hf_stream_bad_key():
|
||||
# try:
|
||||
# api_key = "bad-key"
|
||||
# messages = [
|
||||
# {
|
||||
# "content": "Hello! How are you today?",
|
||||
# "role": "user"
|
||||
# },
|
||||
# ]
|
||||
# response = completion(
|
||||
# model="huggingface/meta-llama/Llama-2-7b-chat-hf", messages=messages, api_base="https://a8l9e3ucxinyl3oj.us-east-1.aws.endpoints.huggingface.cloud", stream=True, max_tokens=1000, api_key=api_key
|
||||
# )
|
||||
# complete_response = ""
|
||||
# # Add any assertions here to check the response
|
||||
# for idx, chunk in enumerate(response):
|
||||
# chunk, finished = streaming_format_tests(idx, chunk)
|
||||
# if finished:
|
||||
# break
|
||||
# complete_response += chunk
|
||||
# if complete_response.strip() == "":
|
||||
# raise Exception("Empty response received")
|
||||
# print(f"completion_response: {complete_response}")
|
||||
# except InvalidRequestError as e:
|
||||
# pass
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# test_completion_hf_stream_bad_key()
|
||||
|
||||
def test_completion_azure_stream():
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue