add text_completion

This commit is contained in:
ishaan-jaff 2023-08-28 08:20:20 -07:00
parent 0dd9231e2c
commit fedd16361f
3 changed files with 13 additions and 8 deletions

View file

@ -12,7 +12,8 @@ response = openai.Completion.create(
## Using LiteLLM in the Text Completion format ## Using LiteLLM in the Text Completion format
### With gpt-3.5-turbo ### With gpt-3.5-turbo
```python ```python
response = openai.Completion.create( from litellm import text_completion
response = text_completion(
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
prompt='Write a tagline for a traditional bavarian tavern', prompt='Write a tagline for a traditional bavarian tavern',
temperature=0, temperature=0,
@ -21,7 +22,7 @@ response = openai.Completion.create(
### With text-davinci-003 ### With text-davinci-003
```python ```python
response = openai.Completion.create( response = text_completion(
model="text-davinci-003", model="text-davinci-003",
prompt='Write a tagline for a traditional bavarian tavern', prompt='Write a tagline for a traditional bavarian tavern',
temperature=0, temperature=0,
@ -30,7 +31,7 @@ response = openai.Completion.create(
### With llama2 ### With llama2
```python ```python
response = openai.Completion.create( response = text_completion(
model="togethercomputer/llama-2-70b-chat", model="togethercomputer/llama-2-70b-chat",
prompt='Write a tagline for a traditional bavarian tavern', prompt='Write a tagline for a traditional bavarian tavern',
temperature=0, temperature=0,

View file

@ -91,7 +91,6 @@ def completion(
custom_llm_provider=None, custom_llm_provider=None,
custom_api_base=None, custom_api_base=None,
litellm_call_id=None, litellm_call_id=None,
prompt="", # allow completion to be used as textCompletion or as ChatCompletion
# model specific optional params # model specific optional params
# used by text-bison only # used by text-bison only
top_k=40, top_k=40,
@ -102,8 +101,6 @@ def completion(
try: try:
if fallbacks != []: if fallbacks != []:
return completion_with_fallbacks(**args) return completion_with_fallbacks(**args)
if messages == [] and prompt!="":
messages = [{"role": "user", "content": prompt}]
if litellm.model_alias_map and model in litellm.model_alias_map: if litellm.model_alias_map and model in litellm.model_alias_map:
args["model_alias_map"] = litellm.model_alias_map args["model_alias_map"] = litellm.model_alias_map
model = litellm.model_alias_map[model] # update the model to the actual value if an alias has been passed in model = litellm.model_alias_map[model] # update the model to the actual value if an alias has been passed in
@ -867,6 +864,13 @@ def embedding(
custom_llm_provider="azure" if azure == True else None, custom_llm_provider="azure" if azure == True else None,
) )
###### Text Completion ################
def text_completion(*args, **kwargs):
if 'prompt' in kwargs:
messages = [{'role': 'system', 'content': kwargs['prompt']}]
kwargs['messages'] = messages
kwargs.pop('prompt')
return completion(*args, **kwargs)
####### HELPER FUNCTIONS ################ ####### HELPER FUNCTIONS ################
## Set verbose to true -> ```litellm.set_verbose = True``` ## Set verbose to true -> ```litellm.set_verbose = True```

View file

@ -10,7 +10,7 @@ sys.path.insert(
) # Adds the parent directory to the system path ) # Adds the parent directory to the system path
import pytest import pytest
import litellm import litellm
from litellm import embedding, completion from litellm import embedding, completion, text_completion
# from infisical import InfisicalClient # from infisical import InfisicalClient
@ -144,7 +144,7 @@ def test_completion_openai():
def test_completion_openai_prompt(): def test_completion_openai_prompt():
try: try:
response = completion(model="gpt-3.5-turbo", prompt="What's the weather in SF?") response = text_completion(model="gpt-3.5-turbo", prompt="What's the weather in SF?")
response_str = response["choices"][0]["message"]["content"] response_str = response["choices"][0]["message"]["content"]
response_str_2 = response.choices[0].message.content response_str_2 = response.choices[0].message.content
print(response) print(response)