From e14ef3eeda8d921ef3feaf3fae7bfe820ec69169 Mon Sep 17 00:00:00 2001 From: Wanis Elabbar <70503629+elabbarw@users.noreply.github.com> Date: Tue, 23 Jul 2024 23:57:50 +0100 Subject: [PATCH 1/2] feat - add azure_ai llama v3.1 8B 70B and 405B --- model_prices_and_context_window.json | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index f86ea8bd7..4c6dd8fdb 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -760,6 +760,33 @@ "litellm_provider": "azure_ai", "mode": "chat" }, + "azure_ai/Meta-Llama-31-8B-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.0000003, + "output_cost_per_token": 0.00000061, + "litellm_provider": "azure_ai", + "mode": "chat" + }, + "azure_ai/Meta-Llama-31-70B-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.00000268, + "output_cost_per_token": 0.00000354, + "litellm_provider": "azure_ai", + "mode": "chat" + }, + "azure_ai/Meta-Llama-31-405B-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.00000533, + "output_cost_per_token": 0.000016, + "litellm_provider": "azure_ai", + "mode": "chat" + }, "babbage-002": { "max_tokens": 16384, "max_input_tokens": 16384, From 77cf1fd600b5213422e807de277e6599a03a45dd Mon Sep 17 00:00:00 2001 From: Wanis Elabbar <70503629+elabbarw@users.noreply.github.com> Date: Wed, 24 Jul 2024 16:50:07 +0100 Subject: [PATCH 2/2] update azure_ai llamav31 prices with sources --- model_prices_and_context_window.json | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index e8ca6f74d..90a6b1283 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -767,7 +767,8 @@ "input_cost_per_token": 0.0000003, "output_cost_per_token": 0.00000061, "litellm_provider": "azure_ai", - "mode": "chat" + "mode": "chat", + "source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/metagenai.meta-llama-3-1-8b-instruct-offer?tab=PlansAndPrice" }, "azure_ai/Meta-Llama-31-70B-Instruct": { "max_tokens": 128000, @@ -776,7 +777,8 @@ "input_cost_per_token": 0.00000268, "output_cost_per_token": 0.00000354, "litellm_provider": "azure_ai", - "mode": "chat" + "mode": "chat", + "source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/metagenai.meta-llama-3-1-70b-instruct-offer?tab=PlansAndPrice" }, "azure_ai/Meta-Llama-31-405B-Instruct": { "max_tokens": 128000, @@ -785,7 +787,8 @@ "input_cost_per_token": 0.00000533, "output_cost_per_token": 0.000016, "litellm_provider": "azure_ai", - "mode": "chat" + "mode": "chat", + "source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/metagenai.meta-llama-3-1-405b-instruct-offer?tab=PlansAndPrice" }, "babbage-002": { "max_tokens": 16384,