openrouter/openai's litellm_provider should be openrouter, not openai (#6079)

In model_prices_and_context_window.json, openrouter/* models all have litellm_provider set as "openrouter", except for four openrouter/openai/* models, which were set to "openai".
I suppose they must be set to "openrouter", so one can know it should use the openrouter API for this model.
This commit is contained in:
GTonehour 2024-10-05 11:50:44 +02:00 committed by GitHub
parent ab0b536143
commit d533acd24a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -3604,7 +3604,7 @@
"max_output_tokens": 65536, "max_output_tokens": 65536,
"input_cost_per_token": 0.000003, "input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000012, "output_cost_per_token": 0.000012,
"litellm_provider": "openai", "litellm_provider": "openrouter",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
"supports_parallel_function_calling": true, "supports_parallel_function_calling": true,
@ -3616,7 +3616,7 @@
"max_output_tokens": 65536, "max_output_tokens": 65536,
"input_cost_per_token": 0.000003, "input_cost_per_token": 0.000003,
"output_cost_per_token": 0.000012, "output_cost_per_token": 0.000012,
"litellm_provider": "openai", "litellm_provider": "openrouter",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
"supports_parallel_function_calling": true, "supports_parallel_function_calling": true,
@ -3628,7 +3628,7 @@
"max_output_tokens": 32768, "max_output_tokens": 32768,
"input_cost_per_token": 0.000015, "input_cost_per_token": 0.000015,
"output_cost_per_token": 0.000060, "output_cost_per_token": 0.000060,
"litellm_provider": "openai", "litellm_provider": "openrouter",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
"supports_parallel_function_calling": true, "supports_parallel_function_calling": true,
@ -3640,7 +3640,7 @@
"max_output_tokens": 32768, "max_output_tokens": 32768,
"input_cost_per_token": 0.000015, "input_cost_per_token": 0.000015,
"output_cost_per_token": 0.000060, "output_cost_per_token": 0.000060,
"litellm_provider": "openai", "litellm_provider": "openrouter",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
"supports_parallel_function_calling": true, "supports_parallel_function_calling": true,