mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 09:21:45 +00:00
Add 3.3 70B to Ollama inference provider
This commit is contained in:
parent
bae197c37e
commit
d159fbf46f
1 changed files with 4 additions and 0 deletions
|
@ -100,6 +100,10 @@ model_aliases = [
|
||||||
"llama3.2-vision:90b",
|
"llama3.2-vision:90b",
|
||||||
CoreModelId.llama3_2_90b_vision_instruct.value,
|
CoreModelId.llama3_2_90b_vision_instruct.value,
|
||||||
),
|
),
|
||||||
|
build_model_alias_with_just_provider_model_id(
|
||||||
|
"llama3.3:70b",
|
||||||
|
CoreModelId.llama3_3_70b_instruct.value,
|
||||||
|
),
|
||||||
# The Llama Guard models don't have their full fp16 versions
|
# The Llama Guard models don't have their full fp16 versions
|
||||||
# so we are going to alias their default version to the canonical SKU
|
# so we are going to alias their default version to the canonical SKU
|
||||||
build_model_alias(
|
build_model_alias(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue