forked from phoenix/litellm-mirror
openrouter/openai's litellm_provider should be openrouter, not openai (#6079)
In model_prices_and_context_window.json, openrouter/* models all have litellm_provider set as "openrouter", except for four openrouter/openai/* models, which were set to "openai". I suppose they must be set to "openrouter", so one can know it should use the openrouter API for this model.
This commit is contained in:
parent
ab0b536143
commit
d533acd24a
1 changed files with 4 additions and 4 deletions
|
@ -3604,7 +3604,7 @@
|
||||||
"max_output_tokens": 65536,
|
"max_output_tokens": 65536,
|
||||||
"input_cost_per_token": 0.000003,
|
"input_cost_per_token": 0.000003,
|
||||||
"output_cost_per_token": 0.000012,
|
"output_cost_per_token": 0.000012,
|
||||||
"litellm_provider": "openai",
|
"litellm_provider": "openrouter",
|
||||||
"mode": "chat",
|
"mode": "chat",
|
||||||
"supports_function_calling": true,
|
"supports_function_calling": true,
|
||||||
"supports_parallel_function_calling": true,
|
"supports_parallel_function_calling": true,
|
||||||
|
@ -3616,7 +3616,7 @@
|
||||||
"max_output_tokens": 65536,
|
"max_output_tokens": 65536,
|
||||||
"input_cost_per_token": 0.000003,
|
"input_cost_per_token": 0.000003,
|
||||||
"output_cost_per_token": 0.000012,
|
"output_cost_per_token": 0.000012,
|
||||||
"litellm_provider": "openai",
|
"litellm_provider": "openrouter",
|
||||||
"mode": "chat",
|
"mode": "chat",
|
||||||
"supports_function_calling": true,
|
"supports_function_calling": true,
|
||||||
"supports_parallel_function_calling": true,
|
"supports_parallel_function_calling": true,
|
||||||
|
@ -3628,7 +3628,7 @@
|
||||||
"max_output_tokens": 32768,
|
"max_output_tokens": 32768,
|
||||||
"input_cost_per_token": 0.000015,
|
"input_cost_per_token": 0.000015,
|
||||||
"output_cost_per_token": 0.000060,
|
"output_cost_per_token": 0.000060,
|
||||||
"litellm_provider": "openai",
|
"litellm_provider": "openrouter",
|
||||||
"mode": "chat",
|
"mode": "chat",
|
||||||
"supports_function_calling": true,
|
"supports_function_calling": true,
|
||||||
"supports_parallel_function_calling": true,
|
"supports_parallel_function_calling": true,
|
||||||
|
@ -3640,7 +3640,7 @@
|
||||||
"max_output_tokens": 32768,
|
"max_output_tokens": 32768,
|
||||||
"input_cost_per_token": 0.000015,
|
"input_cost_per_token": 0.000015,
|
||||||
"output_cost_per_token": 0.000060,
|
"output_cost_per_token": 0.000060,
|
||||||
"litellm_provider": "openai",
|
"litellm_provider": "openrouter",
|
||||||
"mode": "chat",
|
"mode": "chat",
|
||||||
"supports_function_calling": true,
|
"supports_function_calling": true,
|
||||||
"supports_parallel_function_calling": true,
|
"supports_parallel_function_calling": true,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue