mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
fix safety using inference (#99)
This commit is contained in:
parent
5c4f73d52f
commit
82f420c4f0
2 changed files with 4 additions and 0 deletions
|
@ -18,6 +18,7 @@ from transformers import (
|
|||
)
|
||||
|
||||
from .base import CANNED_RESPONSE_TEXT, OnViolationAction, ShieldBase, ShieldResponse
|
||||
from llama_models.llama3.api.datatypes import Message, Role
|
||||
|
||||
|
||||
SAFE_RESPONSE = "safe"
|
||||
|
|
|
@ -28,6 +28,9 @@ def available_providers() -> List[ProviderSpec]:
|
|||
],
|
||||
module="llama_stack.providers.impls.meta_reference.safety",
|
||||
config_class="llama_stack.providers.impls.meta_reference.safety.SafetyConfig",
|
||||
api_dependencies=[
|
||||
Api.inference,
|
||||
],
|
||||
),
|
||||
remote_provider_spec(
|
||||
api=Api.safety,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue