forked from phoenix/litellm-mirror
feat - add azure_ai llama v3.1 8B 70B and 405B
This commit is contained in:
parent
f64a3309d1
commit
e14ef3eeda
1 changed files with 27 additions and 0 deletions
|
@ -760,6 +760,33 @@
|
|||
"litellm_provider": "azure_ai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"azure_ai/Meta-Llama-31-8B-Instruct": {
|
||||
"max_tokens": 128000,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 128000,
|
||||
"input_cost_per_token": 0.0000003,
|
||||
"output_cost_per_token": 0.00000061,
|
||||
"litellm_provider": "azure_ai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"azure_ai/Meta-Llama-31-70B-Instruct": {
|
||||
"max_tokens": 128000,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 128000,
|
||||
"input_cost_per_token": 0.00000268,
|
||||
"output_cost_per_token": 0.00000354,
|
||||
"litellm_provider": "azure_ai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"azure_ai/Meta-Llama-31-405B-Instruct": {
|
||||
"max_tokens": 128000,
|
||||
"max_input_tokens": 128000,
|
||||
"max_output_tokens": 128000,
|
||||
"input_cost_per_token": 0.00000533,
|
||||
"output_cost_per_token": 0.000016,
|
||||
"litellm_provider": "azure_ai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"babbage-002": {
|
||||
"max_tokens": 16384,
|
||||
"max_input_tokens": 16384,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue