Use sepecific llama2 and llama3 model names in Ollama

This commit is contained in:
Kyrylo Yefimenko 2024-08-15 12:27:51 +01:00
parent 8765e120f0
commit e3cd8e169b

View file

@ -3354,6 +3354,15 @@
"litellm_provider": "ollama",
"mode": "completion"
},
"ollama/llama2:7b": {
"max_tokens": 4096,
"max_input_tokens": 4096,
"max_output_tokens": 4096,
"input_cost_per_token": 0.0,
"output_cost_per_token": 0.0,
"litellm_provider": "ollama",
"mode": "completion"
},
"ollama/llama2:13b": {
"max_tokens": 4096,
"max_input_tokens": 4096,
@ -3390,6 +3399,15 @@
"litellm_provider": "ollama",
"mode": "chat"
},
"ollama/llama3:8b": {
"max_tokens": 8192,
"max_input_tokens": 8192,
"max_output_tokens": 8192,
"input_cost_per_token": 0.0,
"output_cost_per_token": 0.0,
"litellm_provider": "ollama",
"mode": "chat"
},
"ollama/llama3:70b": {
"max_tokens": 8192,
"max_input_tokens": 8192,