forked from phoenix/litellm-mirror
build(model_prices_and_context_window.json): fix token information
This commit is contained in:
parent
1a0d8e7260
commit
2797b30a50
2 changed files with 34 additions and 7 deletions
|
@ -286,8 +286,35 @@
|
|||
"mode": "chat"
|
||||
},
|
||||
"ft:gpt-3.5-turbo": {
|
||||
"max_tokens": 4097,
|
||||
"max_input_tokens": 4097,
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 16385,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000006,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"ft:gpt-3.5-turbo-0125": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 16385,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000006,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"ft:gpt-3.5-turbo-1106": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 16385,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000006,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"ft:gpt-3.5-turbo-0613": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 4096,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000006,
|
||||
|
|
|
@ -286,7 +286,7 @@
|
|||
"mode": "chat"
|
||||
},
|
||||
"ft:gpt-3.5-turbo": {
|
||||
"max_tokens": 16385,
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 16385,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.000003,
|
||||
|
@ -295,7 +295,7 @@
|
|||
"mode": "chat"
|
||||
},
|
||||
"ft:gpt-3.5-turbo-0125": {
|
||||
"max_tokens": 16385,
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 16385,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.000003,
|
||||
|
@ -304,7 +304,7 @@
|
|||
"mode": "chat"
|
||||
},
|
||||
"ft:gpt-3.5-turbo-1106": {
|
||||
"max_tokens": 16385,
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 16385,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.000003,
|
||||
|
@ -313,8 +313,8 @@
|
|||
"mode": "chat"
|
||||
},
|
||||
"ft:gpt-3.5-turbo-0613": {
|
||||
"max_tokens": 4097,
|
||||
"max_input_tokens": 4097,
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 4096,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.000003,
|
||||
"output_cost_per_token": 0.000006,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue