chore: support default model in moderations API

# What does this PR do?


## Test Plan
This commit is contained in:
Eric Huang 2025-10-22 15:01:43 -07:00
parent 7b90e0e9c8
commit f6098fa73a
23 changed files with 212 additions and 36 deletions

View file

@ -101,7 +101,10 @@ class MetaReferenceCodeScannerSafetyImpl(Safety):
metadata=metadata,
)
async def run_moderation(self, input: str | list[str], model: str) -> ModerationObject:
async def run_moderation(self, input: str | list[str], model: str | None = None) -> ModerationObject:
if model is None:
raise ValueError("Code scanner moderation requires a model identifier.")
inputs = input if isinstance(input, list) else [input]
results = []

View file

@ -200,7 +200,10 @@ class LlamaGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
return await impl.run(messages)
async def run_moderation(self, input: str | list[str], model: str) -> ModerationObject:
async def run_moderation(self, input: str | list[str], model: str | None = None) -> ModerationObject:
if model is None:
raise ValueError("Llama Guard moderation requires a model identifier.")
if isinstance(input, list):
messages = input.copy()
else:

View file

@ -63,7 +63,7 @@ class PromptGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
return await self.shield.run(messages)
async def run_moderation(self, input: str | list[str], model: str) -> ModerationObject:
async def run_moderation(self, input: str | list[str], model: str | None = None) -> ModerationObject:
raise NotImplementedError("run_moderation is not implemented for Prompt Guard")

View file

@ -66,7 +66,7 @@ class NVIDIASafetyAdapter(Safety, ShieldsProtocolPrivate):
self.shield = NeMoGuardrails(self.config, shield.shield_id)
return await self.shield.run(messages)
async def run_moderation(self, input: str | list[str], model: str) -> ModerationObject:
async def run_moderation(self, input: str | list[str], model: str | None = None) -> ModerationObject:
raise NotImplementedError("NVIDIA safety provider currently does not implement run_moderation")