forked from phoenix/litellm-mirror
Merge pull request #2045 from BerriAI/litellm_llamaguard_custom_categories
feat(llama_guard.py): allow user to define custom unsafe content categories
This commit is contained in:
commit
6ea6a1c4c7
4 changed files with 110 additions and 1 deletions
|
@ -35,6 +35,23 @@ class _ENTERPRISE_LlamaGuard(CustomLogger):
|
||||||
# Class variables or attributes
|
# Class variables or attributes
|
||||||
def __init__(self, model_name: Optional[str] = None):
|
def __init__(self, model_name: Optional[str] = None):
|
||||||
self.model = model_name or litellm.llamaguard_model_name
|
self.model = model_name or litellm.llamaguard_model_name
|
||||||
|
file_path = litellm.llamaguard_unsafe_content_categories
|
||||||
|
data = None
|
||||||
|
|
||||||
|
if file_path is not None:
|
||||||
|
try:
|
||||||
|
with open(file_path, "r") as file:
|
||||||
|
data = file.read()
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise Exception(f"File not found. file_path={file_path}")
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"An error occurred: {str(e)}, file_path={file_path}")
|
||||||
|
|
||||||
|
self.unsafe_content_categories = data
|
||||||
|
|
||||||
|
verbose_proxy_logger.debug(
|
||||||
|
f"self.unsafe_content_categories: {self.unsafe_content_categories}"
|
||||||
|
)
|
||||||
|
|
||||||
def print_verbose(self, print_statement):
|
def print_verbose(self, print_statement):
|
||||||
try:
|
try:
|
||||||
|
@ -44,6 +61,41 @@ class _ENTERPRISE_LlamaGuard(CustomLogger):
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def set_custom_prompt_template(self, messages: list):
|
||||||
|
if self.unsafe_content_categories is not None and self.model is not None:
|
||||||
|
role = "Agent" if len(messages) % 2 == 0 else "User"
|
||||||
|
litellm.register_prompt_template(
|
||||||
|
model=self.model,
|
||||||
|
initial_prompt_value=f"""[INST] Task: Check if there is unsafe content in '{role}' messages in conversations according our safety policy with the below categories.
|
||||||
|
|
||||||
|
<BEGIN UNSAFE CONTENT CATEGORIES>
|
||||||
|
{self.unsafe_content_categories}
|
||||||
|
<END UNSAFE CONTENT CATEGORIES>
|
||||||
|
|
||||||
|
<BEGIN CONVERSATION>
|
||||||
|
|
||||||
|
""",
|
||||||
|
roles={
|
||||||
|
"system": {
|
||||||
|
"pre_message": "User: ", # [OPTIONAL]
|
||||||
|
"post_message": "\n\n", # [OPTIONAL]
|
||||||
|
},
|
||||||
|
"user": {
|
||||||
|
"pre_message": "User: ", # [OPTIONAL]
|
||||||
|
"post_message": "\n\n", # [OPTIONAL]
|
||||||
|
},
|
||||||
|
"assistant": {
|
||||||
|
"pre_message": "Agent: ", # [OPTIONAL]
|
||||||
|
"post_message": "\n\n", # [OPTIONAL]
|
||||||
|
},
|
||||||
|
},
|
||||||
|
final_prompt_value="""<END CONVERSATION>
|
||||||
|
|
||||||
|
Provide your safety assessment for {role} in the above conversation:
|
||||||
|
- First line must read 'safe' or 'unsafe'.
|
||||||
|
- If unsafe, a second line must include a comma-separated list of violated categories. [/INST]""",
|
||||||
|
)
|
||||||
|
|
||||||
async def async_moderation_hook(
|
async def async_moderation_hook(
|
||||||
self,
|
self,
|
||||||
data: dict,
|
data: dict,
|
||||||
|
@ -57,12 +109,13 @@ class _ENTERPRISE_LlamaGuard(CustomLogger):
|
||||||
safety_check_messages = data["messages"][
|
safety_check_messages = data["messages"][
|
||||||
-1
|
-1
|
||||||
] # get the last response - llama guard has a 4k token limit
|
] # get the last response - llama guard has a 4k token limit
|
||||||
|
self.set_custom_prompt_template(messages=[safety_check_messages])
|
||||||
response = await litellm.acompletion(
|
response = await litellm.acompletion(
|
||||||
model=self.model,
|
model=self.model,
|
||||||
messages=[safety_check_messages],
|
messages=[safety_check_messages],
|
||||||
hf_model_name="meta-llama/LlamaGuard-7b",
|
hf_model_name="meta-llama/LlamaGuard-7b",
|
||||||
)
|
)
|
||||||
|
verbose_proxy_logger.info(f"LlamaGuard Response: {response}")
|
||||||
if "unsafe" in response.choices[0].message.content:
|
if "unsafe" in response.choices[0].message.content:
|
||||||
raise HTTPException(
|
raise HTTPException(
|
||||||
status_code=400, detail={"error": "Violated content safety policy"}
|
status_code=400, detail={"error": "Violated content safety policy"}
|
||||||
|
|
|
@ -56,6 +56,7 @@ aleph_alpha_key: Optional[str] = None
|
||||||
nlp_cloud_key: Optional[str] = None
|
nlp_cloud_key: Optional[str] = None
|
||||||
use_client: bool = False
|
use_client: bool = False
|
||||||
llamaguard_model_name: Optional[str] = None
|
llamaguard_model_name: Optional[str] = None
|
||||||
|
llamaguard_unsafe_content_categories: Optional[str] = None
|
||||||
logging: bool = True
|
logging: bool = True
|
||||||
caching: bool = (
|
caching: bool = (
|
||||||
False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
|
False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
|
||||||
|
|
|
@ -212,6 +212,15 @@ def completion(
|
||||||
final_prompt_value=model_prompt_details.get("final_prompt_value", ""),
|
final_prompt_value=model_prompt_details.get("final_prompt_value", ""),
|
||||||
messages=messages,
|
messages=messages,
|
||||||
)
|
)
|
||||||
|
elif hf_model_name in custom_prompt_dict:
|
||||||
|
# check if the base huggingface model has a registered custom prompt
|
||||||
|
model_prompt_details = custom_prompt_dict[hf_model_name]
|
||||||
|
prompt = custom_prompt(
|
||||||
|
role_dict=model_prompt_details.get("roles", None),
|
||||||
|
initial_prompt_value=model_prompt_details.get("initial_prompt_value", ""),
|
||||||
|
final_prompt_value=model_prompt_details.get("final_prompt_value", ""),
|
||||||
|
messages=messages,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if hf_model_name is None:
|
if hf_model_name is None:
|
||||||
if "llama-2" in model.lower(): # llama-2 model
|
if "llama-2" in model.lower(): # llama-2 model
|
||||||
|
|
46
litellm/proxy/llamaguard_prompt.txt
Normal file
46
litellm/proxy/llamaguard_prompt.txt
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
O1: Violence and Hate.
|
||||||
|
Should not
|
||||||
|
- Help people plan or engage in violence.
|
||||||
|
- Encourage people to engage in violence.
|
||||||
|
- Express hateful, derogatory, or demeaning sentiments against people based on sensitive personal characteristics like their race, color, religion, national origin, sexual orientation, gender, gender identity, or disability.
|
||||||
|
- Encourage discrimination against people based on those characteristics and should not use slurs or create insults based on those characteristics.
|
||||||
|
Can
|
||||||
|
- Provide information on violence and discrimination.
|
||||||
|
- Discuss the topics of hate and violence, and can discuss historical events involving violence.
|
||||||
|
O2: Sexual Content.
|
||||||
|
Should not
|
||||||
|
- Engage in sexually explicit (i.e., erotic) conversations and should not encourage people to engage in sexual activities.
|
||||||
|
Can
|
||||||
|
- Discuss topics related to sexuality and sexual education.
|
||||||
|
- Provide general information regarding sexual activities.
|
||||||
|
- Provide guidance on topics related to sexual health.
|
||||||
|
O3: Criminal Planning.
|
||||||
|
Should not
|
||||||
|
- Help people plan or engage in criminal activities like theft, kidnapping, or financial crimes, especially when the person indicates that they may act on the information provided to them.
|
||||||
|
- Encourage people to engage in criminal activities or condone criminal behavior.
|
||||||
|
Can
|
||||||
|
- Help people understand how crimes are committed, provided that the response is not so detailed that it could be used to provide a roadmap for future crimes.
|
||||||
|
- Help people understand how systems work that are used to prevent crimes, provided those descriptions do not provide detailed information on how to defeat those systems.
|
||||||
|
O4: Guns and Illegal Weapons.
|
||||||
|
Should not
|
||||||
|
- Assist in the planning or execution of firearm or illegal weapons crimes, especially when the person indicates that they may act on the information provided to them.
|
||||||
|
- Encourage people to engage in firearm or illegal weapons crimes or condone such behavior.
|
||||||
|
Can
|
||||||
|
- Discuss firearms and the arguments for and against firearm ownership.
|
||||||
|
O5: Regulated or Controlled Substances.
|
||||||
|
Should not
|
||||||
|
- Encourage or assist people in the trafficking, consumption, or use of regulated or controlled substances, including but not limited to illegal drugs, tobacco, alcohol, and cannabis.
|
||||||
|
- Assist or encourage people to create such substances when it is illegal to do so.
|
||||||
|
Can
|
||||||
|
- Discuss the use of regulated or controlled substances, and can provide general information on those substances (ex: what their chemical composition is and what their effects are).
|
||||||
|
- Provide information on how those substances are manufactured, provided that the information is not so detailed as to provide step-by-step instructions for their creation.
|
||||||
|
O6: Self-Harm.
|
||||||
|
Should not
|
||||||
|
- Encourage people to harm themselves.
|
||||||
|
- Romanticize or condone self-harm or suicide.
|
||||||
|
- Provide information regarding the methods of suicide or self-harm.
|
||||||
|
- Provide any information or advice, other than information regarding appropriate health resources, to people who express or imply that they wish to harm themselves.
|
||||||
|
Can
|
||||||
|
- Provide general information regarding self-harm and suicide (ex: statistics, the mental health challenges faced by those who take these actions).
|
||||||
|
Should
|
||||||
|
- Provide information regarding appropriate health resources to people who imply or express the desire to harm themselves.
|
Loading…
Add table
Add a link
Reference in a new issue