diff --git a/litellm/main.py b/litellm/main.py index b4bb6d9a28..1d56a16009 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1927,7 +1927,7 @@ def text_completion(*args, **kwargs): raw_response = response._hidden_params.get("original_response", None) transformed_logprobs = litellm.utils.transform_logprobs(raw_response) except Exception as e: - print_verbose("LiteLLM non blocking exception", e) + print_verbose(f"LiteLLM non blocking exception: {e}") text_completion_response["id"] = response["id"] text_completion_response["object"] = "text_completion" text_completion_response["created"] = response["created"] diff --git a/litellm/tests/test_text_completion.py b/litellm/tests/test_text_completion.py index a29ceaf30f..17502c1948 100644 --- a/litellm/tests/test_text_completion.py +++ b/litellm/tests/test_text_completion.py @@ -49,22 +49,22 @@ def test_completion_openai_prompt_array(): pytest.fail(f"Error occurred: {e}") test_completion_openai_prompt_array() -def test_completion_hf_prompt_array(): - try: - litellm.set_verbose=False - response = text_completion( - model="huggingface/mistralai/Mistral-7B-v0.1", - prompt=token_prompt, # token prompt is a 2d list - ) - print("\n\n response") +# def test_completion_hf_prompt_array(): +# try: +# litellm.set_verbose=False +# response = text_completion( +# model="huggingface/mistralai/Mistral-7B-v0.1", +# prompt=token_prompt, # token prompt is a 2d list +# ) +# print("\n\n response") - print(response) - print(response.choices) - assert(len(response.choices)==2) - # response_str = response["choices"][0]["text"] - except Exception as e: - pytest.fail(f"Error occurred: {e}") -test_completion_hf_prompt_array() +# print(response) +# print(response.choices) +# assert(len(response.choices)==2) +# # response_str = response["choices"][0]["text"] +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") +# test_completion_hf_prompt_array() def test_completion_text_003_prompt_array():