forked from phoenix/litellm-mirror
Merge pull request #3705 from BerriAI/litellm_add_cost_tracking_for_ft_models
[FEAT] add cost tracking for Fine Tuned OpenAI `ft:davinci-002` and `ft:babbage-002`
This commit is contained in:
commit
4952e244d9
3 changed files with 58 additions and 0 deletions
|
@ -234,6 +234,24 @@
|
|||
"litellm_provider": "openai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"ft:davinci-002": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 16384,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.000002,
|
||||
"output_cost_per_token": 0.000002,
|
||||
"litellm_provider": "text-completion-openai",
|
||||
"mode": "completion"
|
||||
},
|
||||
"ft:babbage-002": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 16384,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.0000004,
|
||||
"output_cost_per_token": 0.0000004,
|
||||
"litellm_provider": "text-completion-openai",
|
||||
"mode": "completion"
|
||||
},
|
||||
"text-embedding-3-large": {
|
||||
"max_tokens": 8191,
|
||||
"max_input_tokens": 8191,
|
||||
|
|
|
@ -4381,6 +4381,28 @@ def cost_per_token(
|
|||
* completion_tokens
|
||||
)
|
||||
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
|
||||
elif "ft:davinci-002" in model:
|
||||
print_verbose(f"Cost Tracking: {model} is an OpenAI FinteTuned LLM")
|
||||
# fuzzy match ft:davinci-002:abcd-id-cool-litellm
|
||||
prompt_tokens_cost_usd_dollar = (
|
||||
model_cost_ref["ft:davinci-002"]["input_cost_per_token"] * prompt_tokens
|
||||
)
|
||||
completion_tokens_cost_usd_dollar = (
|
||||
model_cost_ref["ft:davinci-002"]["output_cost_per_token"]
|
||||
* completion_tokens
|
||||
)
|
||||
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
|
||||
elif "ft:babbage-002" in model:
|
||||
print_verbose(f"Cost Tracking: {model} is an OpenAI FinteTuned LLM")
|
||||
# fuzzy match ft:babbage-002:abcd-id-cool-litellm
|
||||
prompt_tokens_cost_usd_dollar = (
|
||||
model_cost_ref["ft:babbage-002"]["input_cost_per_token"] * prompt_tokens
|
||||
)
|
||||
completion_tokens_cost_usd_dollar = (
|
||||
model_cost_ref["ft:babbage-002"]["output_cost_per_token"]
|
||||
* completion_tokens
|
||||
)
|
||||
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
|
||||
elif model in litellm.azure_llms:
|
||||
verbose_logger.debug(f"Cost Tracking: {model} is an Azure LLM")
|
||||
model = litellm.azure_llms[model]
|
||||
|
|
|
@ -234,6 +234,24 @@
|
|||
"litellm_provider": "openai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"ft:davinci-002": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 16384,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.000002,
|
||||
"output_cost_per_token": 0.000002,
|
||||
"litellm_provider": "text-completion-openai",
|
||||
"mode": "completion"
|
||||
},
|
||||
"ft:babbage-002": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 16384,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.0000004,
|
||||
"output_cost_per_token": 0.0000004,
|
||||
"litellm_provider": "text-completion-openai",
|
||||
"mode": "completion"
|
||||
},
|
||||
"text-embedding-3-large": {
|
||||
"max_tokens": 8191,
|
||||
"max_input_tokens": 8191,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue