From 1a8f45e8da8ac22ca9e89dd724d9477f961dc66c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 27 Jul 2024 10:46:52 -0700 Subject: [PATCH] build(model_prices_and_context_window.json): add mistral nemo + codestral pricing --- ...odel_prices_and_context_window_backup.json | 20 +++++++++++++++++++ model_prices_and_context_window.json | 20 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 7f773040e..0f20f6689 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -2038,6 +2038,26 @@ "mode": "chat", "supports_function_calling": true }, + "vertex_ai/mistral-nemo@2407": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000003, + "litellm_provider": "vertex_ai-mistral_models", + "mode": "chat", + "supports_function_calling": true + }, + "vertex_ai/codestral@2405": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000003, + "litellm_provider": "vertex_ai-mistral_models", + "mode": "chat", + "supports_function_calling": true + }, "vertex_ai/imagegeneration@006": { "cost_per_image": 0.020, "litellm_provider": "vertex_ai-image-models", diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 7f773040e..0f20f6689 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -2038,6 +2038,26 @@ "mode": "chat", "supports_function_calling": true }, + "vertex_ai/mistral-nemo@2407": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000003, + "litellm_provider": "vertex_ai-mistral_models", + "mode": "chat", + "supports_function_calling": true + }, + "vertex_ai/codestral@2405": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000003, + "litellm_provider": "vertex_ai-mistral_models", + "mode": "chat", + "supports_function_calling": true + }, "vertex_ai/imagegeneration@006": { "cost_per_image": 0.020, "litellm_provider": "vertex_ai-image-models",