From 3ea776bdc0526d54ee6dc4ab172544c375fb6998 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 17 Nov 2023 18:25:21 -0800 Subject: [PATCH] fix(text_completion): allow either model or engine to be set --- litellm/main.py | 2 +- litellm/tests/test_text_completion.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/main.py b/litellm/main.py index d469dd8fe..2386a5bba 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1845,7 +1845,7 @@ def embedding( ###### Text Completion ################ def text_completion( prompt: Union[str, List[Union[str, List[Union[str, List[int]]]]]], # Required: The prompt(s) to generate completions for. - model: Optional[str], # Optional: either `model` or `engine` can be set + model: Optional[str]=None, # Optional: either `model` or `engine` can be set best_of: Optional[int] = None, # Optional: Generates best_of completions server-side. echo: Optional[bool] = None, # Optional: Echo back the prompt in addition to the completion. frequency_penalty: Optional[float] = None, # Optional: Penalize new tokens based on their existing frequency. diff --git a/litellm/tests/test_text_completion.py b/litellm/tests/test_text_completion.py index 90505d911..660367651 100644 --- a/litellm/tests/test_text_completion.py +++ b/litellm/tests/test_text_completion.py @@ -61,7 +61,7 @@ def test_completion_openai_engine(): #print(response.choices[0].text) except Exception as e: pytest.fail(f"Error occurred: {e}") -# test_completion_openai_engine() +test_completion_openai_engine() def test_completion_chatgpt_prompt():