forked from phoenix/litellm-mirror
fix hf conversational task bug
This commit is contained in:
parent
154aa83b5b
commit
5b294c704e
4 changed files with 11 additions and 0 deletions
Binary file not shown.
Binary file not shown.
|
@ -56,6 +56,7 @@ def completion(
|
|||
if task == "conversational":
|
||||
inference_params = copy.deepcopy(optional_params)
|
||||
inference_params.pop("details")
|
||||
inference_params.pop("return_full_text")
|
||||
past_user_inputs = []
|
||||
generated_responses = []
|
||||
text = ""
|
||||
|
|
|
@ -420,6 +420,16 @@ def test_completion_azure_deployment_id():
|
|||
pytest.fail(f"Error occurred: {e}")
|
||||
# test_completion_azure_deployment_id()
|
||||
|
||||
# def test_hf_conversational_task():
|
||||
# try:
|
||||
# messages = [{ "content": "There's a llama in my garden 😱 What should I do?","role": "user"}]
|
||||
# # e.g. Call 'facebook/blenderbot-400M-distill' hosted on HF Inference endpoints
|
||||
# response = completion(model="huggingface/facebook/blenderbot-400M-distill", messages=messages, task="conversational")
|
||||
# print(f"response: {response}")
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# test_hf_conversational_task()
|
||||
# Replicate API endpoints are unstable -> throw random CUDA errors -> this means our tests can fail even if our tests weren't incorrect.
|
||||
|
||||
# def test_completion_replicate_llama_2():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue