From d533acd24ae88344e38e4cc368dbdb4b6eebda37 Mon Sep 17 00:00:00 2001 From: GTonehour <86954612+GTonehour@users.noreply.github.com> Date: Sat, 5 Oct 2024 11:50:44 +0200 Subject: [PATCH] openrouter/openai's litellm_provider should be openrouter, not openai (#6079) In model_prices_and_context_window.json, openrouter/* models all have litellm_provider set as "openrouter", except for four openrouter/openai/* models, which were set to "openai". I suppose they must be set to "openrouter", so one can know it should use the openrouter API for this model. --- model_prices_and_context_window.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 6332fac25..0fe6f6b87 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -3604,7 +3604,7 @@ "max_output_tokens": 65536, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000012, - "litellm_provider": "openai", + "litellm_provider": "openrouter", "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, @@ -3616,7 +3616,7 @@ "max_output_tokens": 65536, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000012, - "litellm_provider": "openai", + "litellm_provider": "openrouter", "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, @@ -3628,7 +3628,7 @@ "max_output_tokens": 32768, "input_cost_per_token": 0.000015, "output_cost_per_token": 0.000060, - "litellm_provider": "openai", + "litellm_provider": "openrouter", "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, @@ -3640,7 +3640,7 @@ "max_output_tokens": 32768, "input_cost_per_token": 0.000015, "output_cost_per_token": 0.000060, - "litellm_provider": "openai", + "litellm_provider": "openrouter", "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true,