mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
support for finetuned gpt-3.5-turbo
This commit is contained in:
parent
d01017256a
commit
bb7aa6eaaf
3 changed files with 25 additions and 1 deletions
23
docs/my-website/docs/tutorials/finetuned_chat_gpt.md
Normal file
23
docs/my-website/docs/tutorials/finetuned_chat_gpt.md
Normal file
|
@ -0,0 +1,23 @@
|
|||
# Using Fine-Tuned gpt-3.5-turbo
|
||||
LiteLLM allows you to call `completion` with your fine-tuned gpt-3.5-turbo models
|
||||
|
||||
If you're trying to create your custom finetuned gpt-3.5-turbo model following along on this tutorial:
|
||||
|
||||
Once you've created your fine tuned model, you can call it with `completion()`
|
||||
|
||||
## Usage
|
||||
```python
|
||||
import os
|
||||
from litellm import completion
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
response = completion(
|
||||
model="ft:gpt-3.5-turbo:my-org:custom_suffix:id",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Hello!"}
|
||||
]
|
||||
)
|
||||
|
||||
print(response.choices[0].message)
|
||||
```
|
|
@ -34,7 +34,7 @@ const sidebars = {
|
|||
{
|
||||
type: 'category',
|
||||
label: 'Tutorials',
|
||||
items: ['tutorials/huggingface_tutorial', 'tutorials/TogetherAI_liteLLM', 'tutorials/fallbacks'],
|
||||
items: ['tutorials/huggingface_tutorial', 'tutorials/TogetherAI_liteLLM', 'tutorials/fallbacks', 'tutorials/finetuned_chat_gpt'],
|
||||
},
|
||||
'token_usage',
|
||||
'stream',
|
||||
|
|
|
@ -202,6 +202,7 @@ def completion(
|
|||
elif (
|
||||
model in litellm.open_ai_chat_completion_models
|
||||
or custom_llm_provider == "custom_openai"
|
||||
or "ft:gpt-3.5-turbo" in model # finetuned gpt-3.5-turbo
|
||||
): # allow user to make an openai call with a custom base
|
||||
openai.api_type = "openai"
|
||||
# note: if a user sets a custom base - we should ensure this works
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue