build(model_prices_and_context_window.json): update gpt-4o-mini max_output_tokens

Fixes https://github.com/BerriAI/litellm/issues/5045
This commit is contained in:
Krrish Dholakia 2024-08-05 09:30:14 -07:00
parent e3119cef38
commit e0c986c000
2 changed files with 14 additions and 14 deletions

View file

@ -34,7 +34,7 @@
"supports_vision": true "supports_vision": true
}, },
"gpt-4o-mini": { "gpt-4o-mini": {
"max_tokens": 4096, "max_tokens": 16384,
"max_input_tokens": 128000, "max_input_tokens": 128000,
"max_output_tokens": 16384, "max_output_tokens": 16384,
"input_cost_per_token": 0.00000015, "input_cost_per_token": 0.00000015,
@ -46,9 +46,9 @@
"supports_vision": true "supports_vision": true
}, },
"gpt-4o-mini-2024-07-18": { "gpt-4o-mini-2024-07-18": {
"max_tokens": 4096, "max_tokens": 16384,
"max_input_tokens": 128000, "max_input_tokens": 128000,
"max_output_tokens": 4096, "max_output_tokens": 16384,
"input_cost_per_token": 0.00000015, "input_cost_per_token": 0.00000015,
"output_cost_per_token": 0.00000060, "output_cost_per_token": 0.00000060,
"litellm_provider": "openai", "litellm_provider": "openai",
@ -473,9 +473,9 @@
"supports_vision": true "supports_vision": true
}, },
"azure/global-standard/gpt-4o-mini": { "azure/global-standard/gpt-4o-mini": {
"max_tokens": 4096, "max_tokens": 16384,
"max_input_tokens": 128000, "max_input_tokens": 128000,
"max_output_tokens": 4096, "max_output_tokens": 16384,
"input_cost_per_token": 0.00000015, "input_cost_per_token": 0.00000015,
"output_cost_per_token": 0.00000060, "output_cost_per_token": 0.00000060,
"litellm_provider": "azure", "litellm_provider": "azure",
@ -485,9 +485,9 @@
"supports_vision": true "supports_vision": true
}, },
"azure/gpt-4o-mini": { "azure/gpt-4o-mini": {
"max_tokens": 4096, "max_tokens": 16384,
"max_input_tokens": 128000, "max_input_tokens": 128000,
"max_output_tokens": 4096, "max_output_tokens": 16384,
"input_cost_per_token": 0.000000165, "input_cost_per_token": 0.000000165,
"output_cost_per_token": 0.00000066, "output_cost_per_token": 0.00000066,
"litellm_provider": "azure", "litellm_provider": "azure",

View file

@ -34,7 +34,7 @@
"supports_vision": true "supports_vision": true
}, },
"gpt-4o-mini": { "gpt-4o-mini": {
"max_tokens": 4096, "max_tokens": 16384,
"max_input_tokens": 128000, "max_input_tokens": 128000,
"max_output_tokens": 16384, "max_output_tokens": 16384,
"input_cost_per_token": 0.00000015, "input_cost_per_token": 0.00000015,
@ -46,9 +46,9 @@
"supports_vision": true "supports_vision": true
}, },
"gpt-4o-mini-2024-07-18": { "gpt-4o-mini-2024-07-18": {
"max_tokens": 4096, "max_tokens": 16384,
"max_input_tokens": 128000, "max_input_tokens": 128000,
"max_output_tokens": 4096, "max_output_tokens": 16384,
"input_cost_per_token": 0.00000015, "input_cost_per_token": 0.00000015,
"output_cost_per_token": 0.00000060, "output_cost_per_token": 0.00000060,
"litellm_provider": "openai", "litellm_provider": "openai",
@ -473,9 +473,9 @@
"supports_vision": true "supports_vision": true
}, },
"azure/global-standard/gpt-4o-mini": { "azure/global-standard/gpt-4o-mini": {
"max_tokens": 4096, "max_tokens": 16384,
"max_input_tokens": 128000, "max_input_tokens": 128000,
"max_output_tokens": 4096, "max_output_tokens": 16384,
"input_cost_per_token": 0.00000015, "input_cost_per_token": 0.00000015,
"output_cost_per_token": 0.00000060, "output_cost_per_token": 0.00000060,
"litellm_provider": "azure", "litellm_provider": "azure",
@ -485,9 +485,9 @@
"supports_vision": true "supports_vision": true
}, },
"azure/gpt-4o-mini": { "azure/gpt-4o-mini": {
"max_tokens": 4096, "max_tokens": 16384,
"max_input_tokens": 128000, "max_input_tokens": 128000,
"max_output_tokens": 4096, "max_output_tokens": 16384,
"input_cost_per_token": 0.000000165, "input_cost_per_token": 0.000000165,
"output_cost_per_token": 0.00000066, "output_cost_per_token": 0.00000066,
"litellm_provider": "azure", "litellm_provider": "azure",