From 9f6138ef0e9479fca31e913bc0ea25919064614a Mon Sep 17 00:00:00 2001 From: Zeeland Date: Mon, 16 Oct 2023 18:44:45 +0800 Subject: [PATCH] fix: llm_provider add openai finetune compatibility --- litellm/main.py | 2 +- litellm/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/main.py b/litellm/main.py index ae1c52554..7525b254e 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -375,7 +375,7 @@ def completion( model in litellm.open_ai_chat_completion_models or custom_llm_provider == "custom_openai" or custom_llm_provider == "openai" - or "ft:gpt-3.5-turbo" in model # finetuned gpt-3.5-turbo + or "ft:gpt-3.5-turbo" in model # finetune gpt-3.5-turbo ): # allow user to make an openai call with a custom base # note: if a user sets a custom base - we should ensure this works # allow for the setting of dynamic and stateful api-bases diff --git a/litellm/utils.py b/litellm/utils.py index f13e757aa..64499381b 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1461,7 +1461,7 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_ # check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.) ## openai - chatcompletion + text completion - if model in litellm.open_ai_chat_completion_models: + if model in litellm.open_ai_chat_completion_models or "ft:gpt-3.5-turbo" in model: custom_llm_provider = "openai" elif model in litellm.open_ai_text_completion_models: custom_llm_provider = "text-completion-openai"