diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 171d959e36..7438241543 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -886,7 +886,7 @@ "max_tokens": 8192, "max_input_tokens": 8192, "max_output_tokens": 8192, - "input_cost_per_token": 0.00000010, + "input_cost_per_token": 0.00000005, "output_cost_per_token": 0.00000010, "litellm_provider": "groq", "mode": "chat", @@ -896,8 +896,8 @@ "max_tokens": 8192, "max_input_tokens": 8192, "max_output_tokens": 8192, - "input_cost_per_token": 0.00000064, - "output_cost_per_token": 0.00000080, + "input_cost_per_token": 0.00000059, + "output_cost_per_token": 0.00000079, "litellm_provider": "groq", "mode": "chat", "supports_function_calling": true diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 171d959e36..7438241543 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -886,7 +886,7 @@ "max_tokens": 8192, "max_input_tokens": 8192, "max_output_tokens": 8192, - "input_cost_per_token": 0.00000010, + "input_cost_per_token": 0.00000005, "output_cost_per_token": 0.00000010, "litellm_provider": "groq", "mode": "chat", @@ -896,8 +896,8 @@ "max_tokens": 8192, "max_input_tokens": 8192, "max_output_tokens": 8192, - "input_cost_per_token": 0.00000064, - "output_cost_per_token": 0.00000080, + "input_cost_per_token": 0.00000059, + "output_cost_per_token": 0.00000079, "litellm_provider": "groq", "mode": "chat", "supports_function_calling": true