Merge pull request #3705 from BerriAI/litellm_add_cost_tracking_for_ft_models

[FEAT] add cost tracking for Fine Tuned OpenAI `ft:davinci-002` and `ft:babbage-002`
This commit is contained in:
Ishaan Jaff 2024-05-16 17:37:35 -07:00 committed by GitHub
commit 4952e244d9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 58 additions and 0 deletions

View file

@ -234,6 +234,24 @@
"litellm_provider": "openai",
"mode": "chat"
},
"ft:davinci-002": {
"max_tokens": 16384,
"max_input_tokens": 16384,
"max_output_tokens": 4096,
"input_cost_per_token": 0.000002,
"output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"ft:babbage-002": {
"max_tokens": 16384,
"max_input_tokens": 16384,
"max_output_tokens": 4096,
"input_cost_per_token": 0.0000004,
"output_cost_per_token": 0.0000004,
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"text-embedding-3-large": {
"max_tokens": 8191,
"max_input_tokens": 8191,

View file

@ -4381,6 +4381,28 @@ def cost_per_token(
* completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
elif "ft:davinci-002" in model:
print_verbose(f"Cost Tracking: {model} is an OpenAI FinteTuned LLM")
# fuzzy match ft:davinci-002:abcd-id-cool-litellm
prompt_tokens_cost_usd_dollar = (
model_cost_ref["ft:davinci-002"]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref["ft:davinci-002"]["output_cost_per_token"]
* completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
elif "ft:babbage-002" in model:
print_verbose(f"Cost Tracking: {model} is an OpenAI FinteTuned LLM")
# fuzzy match ft:babbage-002:abcd-id-cool-litellm
prompt_tokens_cost_usd_dollar = (
model_cost_ref["ft:babbage-002"]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref["ft:babbage-002"]["output_cost_per_token"]
* completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
elif model in litellm.azure_llms:
verbose_logger.debug(f"Cost Tracking: {model} is an Azure LLM")
model = litellm.azure_llms[model]

View file

@ -234,6 +234,24 @@
"litellm_provider": "openai",
"mode": "chat"
},
"ft:davinci-002": {
"max_tokens": 16384,
"max_input_tokens": 16384,
"max_output_tokens": 4096,
"input_cost_per_token": 0.000002,
"output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"ft:babbage-002": {
"max_tokens": 16384,
"max_input_tokens": 16384,
"max_output_tokens": 4096,
"input_cost_per_token": 0.0000004,
"output_cost_per_token": 0.0000004,
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"text-embedding-3-large": {
"max_tokens": 8191,
"max_input_tokens": 8191,