diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 6332fac25..0fe6f6b87 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -3604,7 +3604,7 @@ "max_output_tokens": 65536, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000012, - "litellm_provider": "openai", + "litellm_provider": "openrouter", "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, @@ -3616,7 +3616,7 @@ "max_output_tokens": 65536, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000012, - "litellm_provider": "openai", + "litellm_provider": "openrouter", "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, @@ -3628,7 +3628,7 @@ "max_output_tokens": 32768, "input_cost_per_token": 0.000015, "output_cost_per_token": 0.000060, - "litellm_provider": "openai", + "litellm_provider": "openrouter", "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, @@ -3640,7 +3640,7 @@ "max_output_tokens": 32768, "input_cost_per_token": 0.000015, "output_cost_per_token": 0.000060, - "litellm_provider": "openai", + "litellm_provider": "openrouter", "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true,