mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(feat) text_completion add rules on when to use engine & model together
This commit is contained in:
parent
a1223e1f55
commit
77468e0a70
1 changed files with 4 additions and 2 deletions
|
@ -1818,8 +1818,8 @@ def embedding(
|
|||
|
||||
###### Text Completion ################
|
||||
def text_completion(
|
||||
model: str, # Required: ID of the model to use.
|
||||
prompt: Union[str, List[Union[str, List[Union[str, List[int]]]]]], # Required: The prompt(s) to generate completions for.
|
||||
model: str = None, # Required: ID of the model to use.
|
||||
best_of: Optional[int] = None, # Optional: Generates best_of completions server-side.
|
||||
echo: Optional[bool] = None, # Optional: Echo back the prompt in addition to the completion.
|
||||
frequency_penalty: Optional[float] = None, # Optional: Penalize new tokens based on their existing frequency.
|
||||
|
@ -1875,7 +1875,9 @@ def text_completion(
|
|||
Your example of how to use this function goes here.
|
||||
"""
|
||||
if "engine" in kwargs:
|
||||
model = kwargs["engine"]
|
||||
if model==None:
|
||||
# only use engine when model not passed
|
||||
model = kwargs["engine"]
|
||||
kwargs.pop("engine")
|
||||
|
||||
text_completion_response = TextCompletionResponse()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue