forked from phoenix/litellm-mirror
fix(text_completion): allow either model or engine to be set
This commit is contained in:
parent
8e430fcfbe
commit
3ea776bdc0
2 changed files with 2 additions and 2 deletions
|
@ -1845,7 +1845,7 @@ def embedding(
|
|||
###### Text Completion ################
|
||||
def text_completion(
|
||||
prompt: Union[str, List[Union[str, List[Union[str, List[int]]]]]], # Required: The prompt(s) to generate completions for.
|
||||
model: Optional[str], # Optional: either `model` or `engine` can be set
|
||||
model: Optional[str]=None, # Optional: either `model` or `engine` can be set
|
||||
best_of: Optional[int] = None, # Optional: Generates best_of completions server-side.
|
||||
echo: Optional[bool] = None, # Optional: Echo back the prompt in addition to the completion.
|
||||
frequency_penalty: Optional[float] = None, # Optional: Penalize new tokens based on their existing frequency.
|
||||
|
|
|
@ -61,7 +61,7 @@ def test_completion_openai_engine():
|
|||
#print(response.choices[0].text)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
# test_completion_openai_engine()
|
||||
test_completion_openai_engine()
|
||||
|
||||
|
||||
def test_completion_chatgpt_prompt():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue