feat - update gemini-1.5-pro pricing

This commit is contained in:
Ishaan Jaff 2024-05-07 07:56:52 -07:00
parent 30003afbf8
commit e91f0382d7
2 changed files with 12 additions and 12 deletions

View file

@ -1060,8 +1060,8 @@
"max_tokens": 8192, "max_tokens": 8192,
"max_input_tokens": 1000000, "max_input_tokens": 1000000,
"max_output_tokens": 8192, "max_output_tokens": 8192,
"input_cost_per_token": 0, "input_cost_per_token": 0.000000625,
"output_cost_per_token": 0, "output_cost_per_token": 0.000001875,
"litellm_provider": "vertex_ai-language-models", "litellm_provider": "vertex_ai-language-models",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
@ -1072,8 +1072,8 @@
"max_tokens": 8192, "max_tokens": 8192,
"max_input_tokens": 1000000, "max_input_tokens": 1000000,
"max_output_tokens": 8192, "max_output_tokens": 8192,
"input_cost_per_token": 0, "input_cost_per_token": 0.000000625,
"output_cost_per_token": 0, "output_cost_per_token": 0.000001875,
"litellm_provider": "vertex_ai-language-models", "litellm_provider": "vertex_ai-language-models",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
@ -1084,8 +1084,8 @@
"max_tokens": 8192, "max_tokens": 8192,
"max_input_tokens": 1000000, "max_input_tokens": 1000000,
"max_output_tokens": 8192, "max_output_tokens": 8192,
"input_cost_per_token": 0, "input_cost_per_token": 0.000000625,
"output_cost_per_token": 0, "output_cost_per_token": 0.000001875,
"litellm_provider": "vertex_ai-language-models", "litellm_provider": "vertex_ai-language-models",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,

View file

@ -1060,8 +1060,8 @@
"max_tokens": 8192, "max_tokens": 8192,
"max_input_tokens": 1000000, "max_input_tokens": 1000000,
"max_output_tokens": 8192, "max_output_tokens": 8192,
"input_cost_per_token": 0, "input_cost_per_token": 0.000000625,
"output_cost_per_token": 0, "output_cost_per_token": 0.000001875,
"litellm_provider": "vertex_ai-language-models", "litellm_provider": "vertex_ai-language-models",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
@ -1072,8 +1072,8 @@
"max_tokens": 8192, "max_tokens": 8192,
"max_input_tokens": 1000000, "max_input_tokens": 1000000,
"max_output_tokens": 8192, "max_output_tokens": 8192,
"input_cost_per_token": 0, "input_cost_per_token": 0.000000625,
"output_cost_per_token": 0, "output_cost_per_token": 0.000001875,
"litellm_provider": "vertex_ai-language-models", "litellm_provider": "vertex_ai-language-models",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
@ -1084,8 +1084,8 @@
"max_tokens": 8192, "max_tokens": 8192,
"max_input_tokens": 1000000, "max_input_tokens": 1000000,
"max_output_tokens": 8192, "max_output_tokens": 8192,
"input_cost_per_token": 0, "input_cost_per_token": 0.000000625,
"output_cost_per_token": 0, "output_cost_per_token": 0.000001875,
"litellm_provider": "vertex_ai-language-models", "litellm_provider": "vertex_ai-language-models",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,