mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
add text completion format support
This commit is contained in:
parent
09598dfc70
commit
dfdf47ddd0
3 changed files with 53 additions and 1 deletions
38
docs/my-website/docs/tutorials/text_completion.md
Normal file
38
docs/my-website/docs/tutorials/text_completion.md
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
# Using Text Completion Format - with Completion()
|
||||||
|
|
||||||
|
If your prefer interfacing with the OpenAI Text Completion format this tutorial covers how to use LiteLLM in this format
|
||||||
|
```python
|
||||||
|
response = openai.Completion.create(
|
||||||
|
model="text-davinci-003",
|
||||||
|
prompt='Write a tagline for a traditional bavarian tavern',
|
||||||
|
temperature=0,
|
||||||
|
max_tokens=100)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using LiteLLM in the Text Completion format
|
||||||
|
### With gpt-3.5-turbo
|
||||||
|
```python
|
||||||
|
response = openai.Completion.create(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
prompt='Write a tagline for a traditional bavarian tavern',
|
||||||
|
temperature=0,
|
||||||
|
max_tokens=100)
|
||||||
|
```
|
||||||
|
|
||||||
|
### With text-davinci-003
|
||||||
|
```python
|
||||||
|
response = openai.Completion.create(
|
||||||
|
model="text-davinci-003",
|
||||||
|
prompt='Write a tagline for a traditional bavarian tavern',
|
||||||
|
temperature=0,
|
||||||
|
max_tokens=100)
|
||||||
|
```
|
||||||
|
|
||||||
|
### With llama2
|
||||||
|
```python
|
||||||
|
response = openai.Completion.create(
|
||||||
|
model="togethercomputer/llama-2-70b-chat",
|
||||||
|
prompt='Write a tagline for a traditional bavarian tavern',
|
||||||
|
temperature=0,
|
||||||
|
max_tokens=100)
|
||||||
|
```
|
|
@ -64,8 +64,8 @@ async def acompletion(*args, **kwargs):
|
||||||
) ## set timeouts, in case calls hang (e.g. Azure) - default is 600s, override with `force_timeout`
|
) ## set timeouts, in case calls hang (e.g. Azure) - default is 600s, override with `force_timeout`
|
||||||
def completion(
|
def completion(
|
||||||
model,
|
model,
|
||||||
messages, # required params
|
|
||||||
# Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create
|
# Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create
|
||||||
|
messages=[],
|
||||||
functions=[],
|
functions=[],
|
||||||
function_call="", # optional params
|
function_call="", # optional params
|
||||||
temperature=1,
|
temperature=1,
|
||||||
|
@ -91,6 +91,7 @@ def completion(
|
||||||
custom_llm_provider=None,
|
custom_llm_provider=None,
|
||||||
custom_api_base=None,
|
custom_api_base=None,
|
||||||
litellm_call_id=None,
|
litellm_call_id=None,
|
||||||
|
prompt="", # allow completion to be used as textCompletion or as ChatCompletion
|
||||||
# model specific optional params
|
# model specific optional params
|
||||||
# used by text-bison only
|
# used by text-bison only
|
||||||
top_k=40,
|
top_k=40,
|
||||||
|
@ -101,6 +102,8 @@ def completion(
|
||||||
try:
|
try:
|
||||||
if fallbacks != []:
|
if fallbacks != []:
|
||||||
return completion_with_fallbacks(**args)
|
return completion_with_fallbacks(**args)
|
||||||
|
if messages == [] and prompt!="":
|
||||||
|
messages = [{"role": "user", "content": prompt}]
|
||||||
if litellm.model_alias_map and model in litellm.model_alias_map:
|
if litellm.model_alias_map and model in litellm.model_alias_map:
|
||||||
args["model_alias_map"] = litellm.model_alias_map
|
args["model_alias_map"] = litellm.model_alias_map
|
||||||
model = litellm.model_alias_map[model] # update the model to the actual value if an alias has been passed in
|
model = litellm.model_alias_map[model] # update the model to the actual value if an alias has been passed in
|
||||||
|
|
|
@ -142,6 +142,17 @@ def test_completion_openai():
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
def test_completion_openai_prompt():
|
||||||
|
try:
|
||||||
|
response = completion(model="gpt-3.5-turbo", prompt="What's the weather in SF?")
|
||||||
|
response_str = response["choices"][0]["message"]["content"]
|
||||||
|
response_str_2 = response.choices[0].message.content
|
||||||
|
print(response)
|
||||||
|
assert response_str == response_str_2
|
||||||
|
assert type(response_str) == str
|
||||||
|
assert len(response_str) > 1
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
def test_completion_text_openai():
|
def test_completion_text_openai():
|
||||||
try:
|
try:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue