diff --git a/docs/my-website/docs/tutorials/text_completion.md b/docs/my-website/docs/tutorials/text_completion.md index 3eb95592a..1d210076e 100644 --- a/docs/my-website/docs/tutorials/text_completion.md +++ b/docs/my-website/docs/tutorials/text_completion.md @@ -12,7 +12,8 @@ response = openai.Completion.create( ## Using LiteLLM in the Text Completion format ### With gpt-3.5-turbo ```python -response = openai.Completion.create( +from litellm import text_completion +response = text_completion( model="gpt-3.5-turbo", prompt='Write a tagline for a traditional bavarian tavern', temperature=0, @@ -21,7 +22,7 @@ response = openai.Completion.create( ### With text-davinci-003 ```python -response = openai.Completion.create( +response = text_completion( model="text-davinci-003", prompt='Write a tagline for a traditional bavarian tavern', temperature=0, @@ -30,7 +31,7 @@ response = openai.Completion.create( ### With llama2 ```python -response = openai.Completion.create( +response = text_completion( model="togethercomputer/llama-2-70b-chat", prompt='Write a tagline for a traditional bavarian tavern', temperature=0, diff --git a/litellm/main.py b/litellm/main.py index a161e663b..4de27dee1 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -91,7 +91,6 @@ def completion( custom_llm_provider=None, custom_api_base=None, litellm_call_id=None, - prompt="", # allow completion to be used as textCompletion or as ChatCompletion # model specific optional params # used by text-bison only top_k=40, @@ -102,8 +101,6 @@ def completion( try: if fallbacks != []: return completion_with_fallbacks(**args) - if messages == [] and prompt!="": - messages = [{"role": "user", "content": prompt}] if litellm.model_alias_map and model in litellm.model_alias_map: args["model_alias_map"] = litellm.model_alias_map model = litellm.model_alias_map[model] # update the model to the actual value if an alias has been passed in @@ -867,6 +864,13 @@ def embedding( custom_llm_provider="azure" if azure == True else None, ) +###### Text Completion ################ +def text_completion(*args, **kwargs): + if 'prompt' in kwargs: + messages = [{'role': 'system', 'content': kwargs['prompt']}] + kwargs['messages'] = messages + kwargs.pop('prompt') + return completion(*args, **kwargs) ####### HELPER FUNCTIONS ################ ## Set verbose to true -> ```litellm.set_verbose = True``` diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 6aa7612d6..e59448450 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -10,7 +10,7 @@ sys.path.insert( ) # Adds the parent directory to the system path import pytest import litellm -from litellm import embedding, completion +from litellm import embedding, completion, text_completion # from infisical import InfisicalClient @@ -144,7 +144,7 @@ def test_completion_openai(): def test_completion_openai_prompt(): try: - response = completion(model="gpt-3.5-turbo", prompt="What's the weather in SF?") + response = text_completion(model="gpt-3.5-turbo", prompt="What's the weather in SF?") response_str = response["choices"][0]["message"]["content"] response_str_2 = response.choices[0].message.content print(response)