diff --git a/litellm/main.py b/litellm/main.py index 3732d89f94..90c9b37e69 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -496,7 +496,8 @@ def completion( additional_args={"headers": headers}, ) elif ( - model in litellm.open_ai_text_completion_models + custom_llm_provider == "text-completion-openai" + or model in litellm.open_ai_text_completion_models or "ft:babbage-002" in model or "ft:davinci-002" in model # support for finetuned completion models # NOTE: Do NOT add custom_llm_provider == "openai". diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 47415312f2..c6d92b0239 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -426,21 +426,12 @@ def test_completion_hf_prompt_array(): # test_completion_hf_prompt_array() def test_completion_text_openai(): - try: - # litellm.set_verbose=True - response = completion(model="text-davinci-003", messages=messages) - # Add any assertions here to check the response - print(response) - except Exception as e: - pytest.fail(f"Error occurred: {e}") - -def test_completion_gpt_instruct(): try: response = completion(model="gpt-3.5-turbo-instruct", messages=messages) print(response) except Exception as e: pytest.fail(f"Error occurred: {e}") -# test_completion_gpt_instruct() +test_completion_text_openai() def test_completion_openai_with_optional_params(): try: