(feat) cost tracking ft:gpt-3.5-turbo

This commit is contained in:
ishaan-jaff 2023-11-23 13:58:59 -08:00
parent da7c07574f
commit 46fc8be4c8
2 changed files with 16 additions and 0 deletions

View file

@ -1611,6 +1611,15 @@ def cost_per_token(model="gpt-3.5-turbo", prompt_tokens=0, completion_tokens=0):
model_cost_ref[model]["output_cost_per_token"] * completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
elif "ft:gpt-3.5-turbo" in model:
# fuzzy match ft:gpt-3.5-turbo:abcd-id-cool-litellm
prompt_tokens_cost_usd_dollar = (
model_cost_ref["ft:gpt-3.5-turbo"]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref["ft:gpt-3.5-turbo"]["output_cost_per_token"] * completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
else:
# calculate average input cost, azure/gpt-deployments can potentially go here if users don't specify, gpt-4, gpt-3.5-turbo. LLMs litellm knows
input_cost_sum = 0

View file

@ -97,6 +97,13 @@
"litellm_provider": "openai",
"mode": "chat"
},
"ft:gpt-3.5-turbo": {
"max_tokens": 4097,
"input_cost_per_token": 0.000012,
"output_cost_per_token": 0.000016,
"litellm_provider": "openai",
"mode": "chat"
},
"azure/gpt-4-32k": {
"max_tokens": 8192,
"input_cost_per_token": 0.00006,