forked from phoenix/litellm-mirror
add text_completion
This commit is contained in:
parent
0dd9231e2c
commit
fedd16361f
3 changed files with 13 additions and 8 deletions
|
@ -12,7 +12,8 @@ response = openai.Completion.create(
|
|||
## Using LiteLLM in the Text Completion format
|
||||
### With gpt-3.5-turbo
|
||||
```python
|
||||
response = openai.Completion.create(
|
||||
from litellm import text_completion
|
||||
response = text_completion(
|
||||
model="gpt-3.5-turbo",
|
||||
prompt='Write a tagline for a traditional bavarian tavern',
|
||||
temperature=0,
|
||||
|
@ -21,7 +22,7 @@ response = openai.Completion.create(
|
|||
|
||||
### With text-davinci-003
|
||||
```python
|
||||
response = openai.Completion.create(
|
||||
response = text_completion(
|
||||
model="text-davinci-003",
|
||||
prompt='Write a tagline for a traditional bavarian tavern',
|
||||
temperature=0,
|
||||
|
@ -30,7 +31,7 @@ response = openai.Completion.create(
|
|||
|
||||
### With llama2
|
||||
```python
|
||||
response = openai.Completion.create(
|
||||
response = text_completion(
|
||||
model="togethercomputer/llama-2-70b-chat",
|
||||
prompt='Write a tagline for a traditional bavarian tavern',
|
||||
temperature=0,
|
||||
|
|
|
@ -91,7 +91,6 @@ def completion(
|
|||
custom_llm_provider=None,
|
||||
custom_api_base=None,
|
||||
litellm_call_id=None,
|
||||
prompt="", # allow completion to be used as textCompletion or as ChatCompletion
|
||||
# model specific optional params
|
||||
# used by text-bison only
|
||||
top_k=40,
|
||||
|
@ -102,8 +101,6 @@ def completion(
|
|||
try:
|
||||
if fallbacks != []:
|
||||
return completion_with_fallbacks(**args)
|
||||
if messages == [] and prompt!="":
|
||||
messages = [{"role": "user", "content": prompt}]
|
||||
if litellm.model_alias_map and model in litellm.model_alias_map:
|
||||
args["model_alias_map"] = litellm.model_alias_map
|
||||
model = litellm.model_alias_map[model] # update the model to the actual value if an alias has been passed in
|
||||
|
@ -867,6 +864,13 @@ def embedding(
|
|||
custom_llm_provider="azure" if azure == True else None,
|
||||
)
|
||||
|
||||
###### Text Completion ################
|
||||
def text_completion(*args, **kwargs):
|
||||
if 'prompt' in kwargs:
|
||||
messages = [{'role': 'system', 'content': kwargs['prompt']}]
|
||||
kwargs['messages'] = messages
|
||||
kwargs.pop('prompt')
|
||||
return completion(*args, **kwargs)
|
||||
|
||||
####### HELPER FUNCTIONS ################
|
||||
## Set verbose to true -> ```litellm.set_verbose = True```
|
||||
|
|
|
@ -10,7 +10,7 @@ sys.path.insert(
|
|||
) # Adds the parent directory to the system path
|
||||
import pytest
|
||||
import litellm
|
||||
from litellm import embedding, completion
|
||||
from litellm import embedding, completion, text_completion
|
||||
|
||||
# from infisical import InfisicalClient
|
||||
|
||||
|
@ -144,7 +144,7 @@ def test_completion_openai():
|
|||
|
||||
def test_completion_openai_prompt():
|
||||
try:
|
||||
response = completion(model="gpt-3.5-turbo", prompt="What's the weather in SF?")
|
||||
response = text_completion(model="gpt-3.5-turbo", prompt="What's the weather in SF?")
|
||||
response_str = response["choices"][0]["message"]["content"]
|
||||
response_str_2 = response.choices[0].message.content
|
||||
print(response)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue