forked from phoenix/litellm-mirror
Use sepecific llama2 and llama3 model names in Ollama
This commit is contained in:
parent
8765e120f0
commit
e3cd8e169b
1 changed files with 18 additions and 0 deletions
|
@ -3354,6 +3354,15 @@
|
|||
"litellm_provider": "ollama",
|
||||
"mode": "completion"
|
||||
},
|
||||
"ollama/llama2:7b": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 4096,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.0,
|
||||
"output_cost_per_token": 0.0,
|
||||
"litellm_provider": "ollama",
|
||||
"mode": "completion"
|
||||
},
|
||||
"ollama/llama2:13b": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 4096,
|
||||
|
@ -3390,6 +3399,15 @@
|
|||
"litellm_provider": "ollama",
|
||||
"mode": "chat"
|
||||
},
|
||||
"ollama/llama3:8b": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 8192,
|
||||
"max_output_tokens": 8192,
|
||||
"input_cost_per_token": 0.0,
|
||||
"output_cost_per_token": 0.0,
|
||||
"litellm_provider": "ollama",
|
||||
"mode": "chat"
|
||||
},
|
||||
"ollama/llama3:70b": {
|
||||
"max_tokens": 8192,
|
||||
"max_input_tokens": 8192,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue