forked from phoenix-oss/llama-stack-mirror
feat: add api.llama provider, llama-guard-4 model (#2058)
This PR adds a llama-stack inference provider for `api.llama.com`, as well as adds entries for Llama-Guard-4 and updated Prompt-Guard models.
This commit is contained in:
parent
934446ddb4
commit
4d0bfbf984
21 changed files with 1526 additions and 47 deletions
|
@ -84,7 +84,7 @@ class ModelList(Subcommand):
|
|||
)
|
||||
|
||||
def _run_model_list_cmd(self, args: argparse.Namespace) -> None:
|
||||
from .safety_models import prompt_guard_model_sku
|
||||
from .safety_models import prompt_guard_model_skus
|
||||
|
||||
if args.downloaded:
|
||||
return _run_model_list_downloaded_cmd()
|
||||
|
@ -96,7 +96,7 @@ class ModelList(Subcommand):
|
|||
]
|
||||
|
||||
rows = []
|
||||
for model in all_registered_models() + [prompt_guard_model_sku()]:
|
||||
for model in all_registered_models() + prompt_guard_model_skus():
|
||||
if not args.show_all and not model.is_featured:
|
||||
continue
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue