forked from phoenix/litellm-mirror
Added Ollama LLMs for LLaMa and Mistral
This commit is contained in:
parent
dde2e3249a
commit
256107c999
2 changed files with 19 additions and 1 deletions
|
@ -6620,7 +6620,7 @@ def get_max_tokens(model: str):
|
||||||
raise Exception()
|
raise Exception()
|
||||||
except:
|
except:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json"
|
f"Model {model} from provider {custom_llm_provider} isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -2749,6 +2749,24 @@
|
||||||
"litellm_provider": "ollama",
|
"litellm_provider": "ollama",
|
||||||
"mode": "completion"
|
"mode": "completion"
|
||||||
},
|
},
|
||||||
|
"ollama/llama3": {
|
||||||
|
"max_tokens": 8192,
|
||||||
|
"max_input_tokens": 8192,
|
||||||
|
"max_output_tokens": 8192,
|
||||||
|
"input_cost_per_token": 0.00000010,
|
||||||
|
"output_cost_per_token": 0.00000010,
|
||||||
|
"litellm_provider": "ollama",
|
||||||
|
"mode": "chat"
|
||||||
|
},
|
||||||
|
"ollama/llama3:70b": {
|
||||||
|
"max_tokens": 8192,
|
||||||
|
"max_input_tokens": 8192,
|
||||||
|
"max_output_tokens": 8192,
|
||||||
|
"input_cost_per_token": 0.0,
|
||||||
|
"output_cost_per_token": 0.0,
|
||||||
|
"litellm_provider": "ollama",
|
||||||
|
"mode": "chat"
|
||||||
|
},
|
||||||
"ollama/mistral": {
|
"ollama/mistral": {
|
||||||
"max_tokens": 8192,
|
"max_tokens": 8192,
|
||||||
"max_input_tokens": 8192,
|
"max_input_tokens": 8192,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue