diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index adb1f0ac7..09d39fb43 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -3137,6 +3137,54 @@ "litellm_provider": "openrouter", "mode": "chat" }, + "openrouter/openai/o1-mini": { + "max_tokens": 65536, + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000012, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "openrouter/openai/o1-mini-2024-09-12": { + "max_tokens": 65536, + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000012, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "openrouter/openai/o1-preview": { + "max_tokens": 32768, + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "input_cost_per_token": 0.000015, + "output_cost_per_token": 0.000060, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "openrouter/openai/o1-preview-2024-09-12": { + "max_tokens": 32768, + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "input_cost_per_token": 0.000015, + "output_cost_per_token": 0.000060, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, "openrouter/openai/gpt-4o": { "max_tokens": 4096, "max_input_tokens": 128000,