forked from phoenix/litellm-mirror
Add gpt-4o-2024-11-20. (#6832)
This commit is contained in:
parent
b0be5bf3a1
commit
a1f06de53d
2 changed files with 108 additions and 0 deletions
|
@ -197,6 +197,21 @@
|
|||
"supports_vision": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"gpt-4o-2024-11-20": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.0000025,
|
||||
"output_cost_per_token": 0.000010,
|
||||
"cache_read_input_token_cost": 0.00000125,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"gpt-4-turbo-preview": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 128000,
|
||||
|
@ -468,6 +483,19 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"ft:gpt-4o-2024-11-20": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.00000375,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"ft:gpt-4o-mini-2024-07-18": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
|
@ -730,6 +758,19 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"azure/gpt-4o-2024-11-20": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.00000275,
|
||||
"output_cost_per_token": 0.000011,
|
||||
"litellm_provider": "azure",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"azure/gpt-4o-2024-05-13": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 128000,
|
||||
|
@ -756,6 +797,19 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"azure/global-standard/gpt-4o-2024-11-20": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.0000025,
|
||||
"output_cost_per_token": 0.000010,
|
||||
"litellm_provider": "azure",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"azure/global-standard/gpt-4o-mini": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
|
|
|
@ -197,6 +197,21 @@
|
|||
"supports_vision": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"gpt-4o-2024-11-20": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.0000025,
|
||||
"output_cost_per_token": 0.000010,
|
||||
"cache_read_input_token_cost": 0.00000125,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true,
|
||||
"supports_prompt_caching": true
|
||||
},
|
||||
"gpt-4-turbo-preview": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 128000,
|
||||
|
@ -468,6 +483,19 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"ft:gpt-4o-2024-11-20": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.00000375,
|
||||
"output_cost_per_token": 0.000015,
|
||||
"litellm_provider": "openai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"ft:gpt-4o-mini-2024-07-18": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
|
@ -730,6 +758,19 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"azure/gpt-4o-2024-11-20": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.00000275,
|
||||
"output_cost_per_token": 0.000011,
|
||||
"litellm_provider": "azure",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"azure/gpt-4o-2024-05-13": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 128000,
|
||||
|
@ -756,6 +797,19 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"azure/global-standard/gpt-4o-2024-11-20": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 16384,
|
||||
"input_cost_per_token": 0.0000025,
|
||||
"output_cost_per_token": 0.000010,
|
||||
"litellm_provider": "azure",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"supports_parallel_function_calling": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_vision": true
|
||||
},
|
||||
"azure/global-standard/gpt-4o-mini": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 128000,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue