Add 3.3 70B to Ollama inference provider

This commit is contained in:
Aidan Do 2024-12-22 20:03:11 +11:00
parent bae197c37e
commit d159fbf46f

View file

@ -100,6 +100,10 @@ model_aliases = [
"llama3.2-vision:90b", "llama3.2-vision:90b",
CoreModelId.llama3_2_90b_vision_instruct.value, CoreModelId.llama3_2_90b_vision_instruct.value,
), ),
build_model_alias_with_just_provider_model_id(
"llama3.3:70b",
CoreModelId.llama3_3_70b_instruct.value,
),
# The Llama Guard models don't have their full fp16 versions # The Llama Guard models don't have their full fp16 versions
# so we are going to alias their default version to the canonical SKU # so we are going to alias their default version to the canonical SKU
build_model_alias( build_model_alias(