mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
added sambanova cloud models (#7187)
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 12s
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 12s
Co-authored-by: Rodrigo Maldonado <rodrigo.maldonado@sambanovasystems.com>
This commit is contained in:
parent
523beedb4c
commit
d214d3cc3f
1 changed files with 70 additions and 0 deletions
|
@ -15,6 +15,76 @@
|
||||||
"supports_prompt_caching": true,
|
"supports_prompt_caching": true,
|
||||||
"supports_response_schema": true
|
"supports_response_schema": true
|
||||||
},
|
},
|
||||||
|
"sambanova/Meta-Llama-3.1-8B-Instruct": {
|
||||||
|
"max_tokens": 16000,
|
||||||
|
"max_input_tokens": 16000,
|
||||||
|
"max_output_tokens": 16000,
|
||||||
|
"input_cost_per_token": 0.0000001,
|
||||||
|
"output_cost_per_token": 0.0000002,
|
||||||
|
"litellm_provider": "sambanova",
|
||||||
|
"supports_function_calling": true,
|
||||||
|
"mode": "chat"
|
||||||
|
},
|
||||||
|
"sambanova/Meta-Llama-3.1-70B-Instruct": {
|
||||||
|
"max_tokens": 128000,
|
||||||
|
"max_input_tokens": 128000,
|
||||||
|
"max_output_tokens": 128000,
|
||||||
|
"input_cost_per_token": 0.0000006,
|
||||||
|
"output_cost_per_token": 0.0000012,
|
||||||
|
"litellm_provider": "sambanova",
|
||||||
|
"supports_function_calling": true,
|
||||||
|
"mode": "chat"
|
||||||
|
},
|
||||||
|
"sambanova/Meta-Llama-3.1-405B-Instruct": {
|
||||||
|
"max_tokens": 16000,
|
||||||
|
"max_input_tokens": 16000,
|
||||||
|
"max_output_tokens": 16000,
|
||||||
|
"input_cost_per_token": 0.000005,
|
||||||
|
"output_cost_per_token": 0.000010,
|
||||||
|
"litellm_provider": "sambanova",
|
||||||
|
"supports_function_calling": true,
|
||||||
|
"mode": "chat"
|
||||||
|
},
|
||||||
|
"sambanova/Meta-Llama-3.2-1B-Instruct": {
|
||||||
|
"max_tokens": 16000,
|
||||||
|
"max_input_tokens": 16000,
|
||||||
|
"max_output_tokens": 16000,
|
||||||
|
"input_cost_per_token": 0.0000004,
|
||||||
|
"output_cost_per_token": 0.0000008,
|
||||||
|
"litellm_provider": "sambanova",
|
||||||
|
"supports_function_calling": true,
|
||||||
|
"mode": "chat"
|
||||||
|
},
|
||||||
|
"sambanova/Meta-Llama-3.2-3B-Instruct": {
|
||||||
|
"max_tokens": 4000,
|
||||||
|
"max_input_tokens": 4000,
|
||||||
|
"max_output_tokens": 4000,
|
||||||
|
"input_cost_per_token": 0.0000008,
|
||||||
|
"output_cost_per_token": 0.0000016,
|
||||||
|
"litellm_provider": "sambanova",
|
||||||
|
"supports_function_calling": true,
|
||||||
|
"mode": "chat"
|
||||||
|
},
|
||||||
|
"sambanova/Qwen2.5-Coder-32B-Instruct": {
|
||||||
|
"max_tokens": 8000,
|
||||||
|
"max_input_tokens": 8000,
|
||||||
|
"max_output_tokens": 8000,
|
||||||
|
"input_cost_per_token": 0.0000015,
|
||||||
|
"output_cost_per_token": 0.000003,
|
||||||
|
"litellm_provider": "sambanova",
|
||||||
|
"supports_function_calling": true,
|
||||||
|
"mode": "chat"
|
||||||
|
},
|
||||||
|
"sambanova/Qwen2.5-72B-Instruct": {
|
||||||
|
"max_tokens": 8000,
|
||||||
|
"max_input_tokens": 8000,
|
||||||
|
"max_output_tokens": 8000,
|
||||||
|
"input_cost_per_token": 0.000002,
|
||||||
|
"output_cost_per_token": 0.000004,
|
||||||
|
"litellm_provider": "sambanova",
|
||||||
|
"supports_function_calling": true,
|
||||||
|
"mode": "chat"
|
||||||
|
},
|
||||||
"gpt-4": {
|
"gpt-4": {
|
||||||
"max_tokens": 4096,
|
"max_tokens": 4096,
|
||||||
"max_input_tokens": 8192,
|
"max_input_tokens": 8192,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue