Update provider_type -> inline::llama-guard in templates, update run.yaml

This commit is contained in:
Ashwin Bharambe 2024-11-11 09:12:17 -08:00
parent 15ffceb533
commit 4971113f92
24 changed files with 121 additions and 98 deletions

View file

@ -19,16 +19,16 @@ providers:
url: https://api.fireworks.ai/inference
# api_key: <ENTER_YOUR_API_KEY>
safety:
safety:
- provider_id: meta0
provider_type: meta-reference
provider_type: inline::llama-guard
config:
llama_guard_shield:
model: Llama-Guard-3-1B
excluded_categories: []
disable_input_check: false
disable_output_check: false
prompt_guard_shield:
model: Prompt-Guard-86M
model: Llama-Guard-3-1B
excluded_categories: []
- provider_id: meta1
provider_type: inline::prompt-guard
config:
model: Prompt-Guard-86M
memory:
- provider_id: meta0
provider_type: meta-reference