diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 23389d530..2ec96c7b2 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -744,6 +744,28 @@ "supports_function_calling": true, "supports_parallel_function_calling": true }, + "azure/gpt-35-turbo-0613": { + "max_tokens": 4097, + "max_input_tokens": 4097, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000015, + "output_cost_per_token": 0.000002, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true + }, + "azure/gpt-35-turbo-0301": { + "max_tokens": 4097, + "max_input_tokens": 4097, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000002, + "output_cost_per_token": 0.000002, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true + }, "azure/gpt-35-turbo-0125": { "max_tokens": 4096, "max_input_tokens": 16384, @@ -790,6 +812,14 @@ "litellm_provider": "text-completion-openai", "mode": "completion" }, + "azure/gpt-35-turbo-instruct-0914": { + "max_tokens": 4097, + "max_input_tokens": 4097, + "input_cost_per_token": 0.0000015, + "output_cost_per_token": 0.000002, + "litellm_provider": "text-completion-openai", + "mode": "completion" + }, "azure/mistral-large-latest": { "max_tokens": 32000, "max_input_tokens": 32000, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 23389d530..2ec96c7b2 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -744,6 +744,28 @@ "supports_function_calling": true, "supports_parallel_function_calling": true }, + "azure/gpt-35-turbo-0613": { + "max_tokens": 4097, + "max_input_tokens": 4097, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000015, + "output_cost_per_token": 0.000002, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true + }, + "azure/gpt-35-turbo-0301": { + "max_tokens": 4097, + "max_input_tokens": 4097, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000002, + "output_cost_per_token": 0.000002, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true + }, "azure/gpt-35-turbo-0125": { "max_tokens": 4096, "max_input_tokens": 16384, @@ -790,6 +812,14 @@ "litellm_provider": "text-completion-openai", "mode": "completion" }, + "azure/gpt-35-turbo-instruct-0914": { + "max_tokens": 4097, + "max_input_tokens": 4097, + "input_cost_per_token": 0.0000015, + "output_cost_per_token": 0.000002, + "litellm_provider": "text-completion-openai", + "mode": "completion" + }, "azure/mistral-large-latest": { "max_tokens": 32000, "max_input_tokens": 32000,