forked from phoenix/litellm-mirror
116 lines
3.3 KiB
JSON
116 lines
3.3 KiB
JSON
{
|
|
"gpt-3.5-turbo": {
|
|
"max_tokens": 4097,
|
|
"input_cost_per_token": 0.0000015,
|
|
"output_cost_per_token": 0.000002
|
|
},
|
|
"gpt-3.5-turbo-0613": {
|
|
"max_tokens": 4097,
|
|
"input_cost_per_token": 0.0000015,
|
|
"output_cost_per_token": 0.000002
|
|
},
|
|
"gpt-3.5-turbo-0301": {
|
|
"max_tokens": 4097,
|
|
"input_cost_per_token": 0.0000015,
|
|
"output_cost_per_token": 0.000002
|
|
},
|
|
"gpt-3.5-turbo-16k": {
|
|
"max_tokens": 16385,
|
|
"input_cost_per_token": 0.000003,
|
|
"output_cost_per_token": 0.000004
|
|
},
|
|
"gpt-3.5-turbo-16k-0613": {
|
|
"max_tokens": 16385,
|
|
"input_cost_per_token": 0.000003,
|
|
"output_cost_per_token": 0.000004
|
|
},
|
|
"gpt-4": {
|
|
"max_tokens": 8192,
|
|
"input_cost_per_token": 0.000003,
|
|
"output_cost_per_token": 0.00006
|
|
},
|
|
"gpt-4-0613": {
|
|
"max_tokens": 8192,
|
|
"input_cost_per_token": 0.000003,
|
|
"output_cost_per_token": 0.00006
|
|
},
|
|
"gpt-4-32k": {
|
|
"max_tokens": 32768,
|
|
"input_cost_per_token": 0.00006,
|
|
"output_cost_per_token": 0.00012
|
|
},
|
|
"claude-instant-1": {
|
|
"max_tokens": 100000,
|
|
"input_cost_per_token": 0.00000163,
|
|
"output_cost_per_token": 0.00000551
|
|
},
|
|
"claude-instant-1.2": {
|
|
"max_tokens": 100000,
|
|
"input_cost_per_token": 0.00000163,
|
|
"output_cost_per_token": 0.00000551
|
|
},
|
|
"claude-2": {
|
|
"max_tokens": 100000,
|
|
"input_cost_per_token": 0.00001102,
|
|
"output_cost_per_token": 0.00003268
|
|
},
|
|
"text-bison-001": {
|
|
"max_tokens": 8192,
|
|
"input_cost_per_token": 0.000004,
|
|
"output_cost_per_token": 0.000004
|
|
},
|
|
"chat-bison-001": {
|
|
"max_tokens": 4096,
|
|
"input_cost_per_token": 0.000002,
|
|
"output_cost_per_token": 0.000002
|
|
},
|
|
"command-nightly": {
|
|
"max_tokens": 4096,
|
|
"input_cost_per_token": 0.000015,
|
|
"output_cost_per_token": 0.000015
|
|
},
|
|
"command": {
|
|
"max_tokens": 4096,
|
|
"input_cost_per_token": 0.000015,
|
|
"output_cost_per_token": 0.000015
|
|
},
|
|
"command-light": {
|
|
"max_tokens": 4096,
|
|
"input_cost_per_token": 0.000015,
|
|
"output_cost_per_token": 0.000015
|
|
},
|
|
"command-medium-beta": {
|
|
"max_tokens": 4096,
|
|
"input_cost_per_token": 0.000015,
|
|
"output_cost_per_token": 0.000015
|
|
},
|
|
"command-xlarge-beta": {
|
|
"max_tokens": 4096,
|
|
"input_cost_per_token": 0.000015,
|
|
"output_cost_per_token": 0.000015
|
|
},
|
|
"replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1": {
|
|
"max_tokens": 4096
|
|
},
|
|
"together-ai-up-to-3b": {
|
|
"input_cost_per_token": 0.0000001,
|
|
"output_cost_per_token": 0.0000001
|
|
},
|
|
"together-ai-3.1b-7b": {
|
|
"input_cost_per_token": 0.0000002,
|
|
"output_cost_per_token": 0.0000002
|
|
},
|
|
"together-ai-7.1b-20b": {
|
|
"max_tokens": 1000,
|
|
"input_cost_per_token": 0.0000004,
|
|
"output_cost_per_token": 0.0000004
|
|
},
|
|
"together-ai-20.1b-40b": {
|
|
"input_cost_per_token": 0.000001,
|
|
"output_cost_per_token": 0.000001
|
|
},
|
|
"together-ai-40.1b-70b": {
|
|
"input_cost_per_token": 0.000003,
|
|
"output_cost_per_token": 0.000003
|
|
}
|
|
}
|