mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-15 14:08:00 +00:00
update to openai_chat_completion
This commit is contained in:
parent
13fb8126a1
commit
3f25eb4fb1
1 changed files with 3 additions and 5 deletions
|
@ -231,8 +231,6 @@ class LlamaGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
|
||||||
# convert to user messages format with role
|
# convert to user messages format with role
|
||||||
messages = [UserMessage(content=m) for m in messages]
|
messages = [UserMessage(content=m) for m in messages]
|
||||||
|
|
||||||
# Use the inference API's model resolution instead of hardcoded mappings
|
|
||||||
# This allows the shield to work with any registered model
|
|
||||||
# Determine safety categories based on the model type
|
# Determine safety categories based on the model type
|
||||||
# For known Llama Guard models, use specific categories
|
# For known Llama Guard models, use specific categories
|
||||||
if model in LLAMA_GUARD_MODEL_IDS:
|
if model in LLAMA_GUARD_MODEL_IDS:
|
||||||
|
@ -406,12 +404,12 @@ class LlamaGuardShield:
|
||||||
# TODO: Add Image based support for OpenAI Moderations
|
# TODO: Add Image based support for OpenAI Moderations
|
||||||
shield_input_message = self.build_text_shield_input(messages)
|
shield_input_message = self.build_text_shield_input(messages)
|
||||||
|
|
||||||
response = await self.inference_api.chat_completion(
|
response = await self.inference_api.openai_chat_completion(
|
||||||
model_id=self.model,
|
model=self.model,
|
||||||
messages=[shield_input_message],
|
messages=[shield_input_message],
|
||||||
stream=False,
|
stream=False,
|
||||||
)
|
)
|
||||||
content = response.completion_message.content
|
content = response.choices[0].message.content
|
||||||
content = content.strip()
|
content = content.strip()
|
||||||
return self.get_moderation_object(content)
|
return self.get_moderation_object(content)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue