diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 11e24dbdd..0a262e310 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -9,6 +9,30 @@ "mode": "chat", "supports_function_calling": true }, + "gpt-4o": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000005, + "output_cost_per_token": 0.000015, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "gpt-4o-2024-05-13": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000005, + "output_cost_per_token": 0.000015, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, "gpt-4-turbo-preview": { "max_tokens": 4096, "max_input_tokens": 128000, @@ -3366,4 +3390,4 @@ "mode": "embedding" } -} \ No newline at end of file +} diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 5aa1f972c..0a262e310 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -12,7 +12,7 @@ "gpt-4o": { "max_tokens": 4096, "max_input_tokens": 128000, - "max_output_tokens": 2048, + "max_output_tokens": 4096, "input_cost_per_token": 0.000005, "output_cost_per_token": 0.000015, "litellm_provider": "openai", @@ -22,7 +22,7 @@ "supports_vision": true }, "gpt-4o-2024-05-13": { - "max_tokens": 2048, + "max_tokens": 4096, "max_input_tokens": 128000, "max_output_tokens": 4096, "input_cost_per_token": 0.000005,