diff --git a/docs/my-website/docs/tutorials/finetuned_chat_gpt.md b/docs/my-website/docs/tutorials/finetuned_chat_gpt.md new file mode 100644 index 0000000000..4cdf1497db --- /dev/null +++ b/docs/my-website/docs/tutorials/finetuned_chat_gpt.md @@ -0,0 +1,23 @@ +# Using Fine-Tuned gpt-3.5-turbo +LiteLLM allows you to call `completion` with your fine-tuned gpt-3.5-turbo models + +If you're trying to create your custom finetuned gpt-3.5-turbo model following along on this tutorial: + +Once you've created your fine tuned model, you can call it with `completion()` + +## Usage +```python +import os +from litellm import completion +openai.api_key = os.getenv("OPENAI_API_KEY") + +response = completion( + model="ft:gpt-3.5-turbo:my-org:custom_suffix:id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ] +) + +print(response.choices[0].message) +``` \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index cf3f2ff52f..6d4f5084e6 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -34,7 +34,7 @@ const sidebars = { { type: 'category', label: 'Tutorials', - items: ['tutorials/huggingface_tutorial', 'tutorials/TogetherAI_liteLLM', 'tutorials/fallbacks'], + items: ['tutorials/huggingface_tutorial', 'tutorials/TogetherAI_liteLLM', 'tutorials/fallbacks', 'tutorials/finetuned_chat_gpt'], }, 'token_usage', 'stream', diff --git a/litellm/main.py b/litellm/main.py index 3cd8a07290..96ee03aeaa 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -202,6 +202,7 @@ def completion( elif ( model in litellm.open_ai_chat_completion_models or custom_llm_provider == "custom_openai" + or "ft:gpt-3.5-turbo" in model # finetuned gpt-3.5-turbo ): # allow user to make an openai call with a custom base openai.api_type = "openai" # note: if a user sets a custom base - we should ensure this works