adding support for finetuned completion models

This commit is contained in:
Krrish Dholakia 2023-08-23 10:02:13 -07:00
parent f3cf199cde
commit 228669caa1
3 changed files with 15 additions and 3 deletions

View file

@ -24,6 +24,16 @@ caching = False
caching_with_models = False # if you want the caching key to be model + prompt
debugger = False
model_cost = {
"babbage-002": {
"max_tokens": 16384,
"input_cost_per_token": 0.0000004,
"output_cost_per_token": 0.0000004,
},
"davinci-002": {
"max_tokens": 16384,
"input_cost_per_token": 0.000002,
"output_cost_per_token": 0.000002,
},
"gpt-3.5-turbo": {
"max_tokens": 4000,
"input_cost_per_token": 0.0000015,
@ -137,7 +147,7 @@ open_ai_chat_completion_models = [
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
]
open_ai_text_completion_models = ["text-davinci-003"]
open_ai_text_completion_models = ["text-davinci-003", "babbage-002", "davinci-002"]
cohere_models = [
"command-nightly",

View file

@ -248,7 +248,9 @@ def completion(
original_response=response,
additional_args={"headers": litellm.headers},
)
elif model in litellm.open_ai_text_completion_models:
elif (model in litellm.open_ai_text_completion_models or
"ft:babbage-002" in model or # support for finetuned completion models
"ft:davinci-002" in model):
openai.api_type = "openai"
openai.api_base = (
litellm.api_base

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
version = "0.1.457"
version = "0.1.458"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT License"