@inproceedings{roy-etal-2023-probing,
title = "Probing {LLM}s for hate speech detection: strengths and vulnerabilities",
author = "Roy, Sarthak and
Harshvardhan, Ashish and
Mukherjee, Animesh and
Saha, Punyajoy",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.407",
doi = "10.18653/v1/2023.findings-emnlp.407",
pages = "6116--6128",
abstract = "Recently efforts have been made by social media platforms as well as researchers to detect hateful or toxic language using large language models. However, none of these works aim to use explanation, additional context and victim community information in the detection process. We utilise different prompt variation, input information and evaluate large language models in zero shot setting (without adding any in-context examples). We select two large language models (GPT-3.5 and text-davinci) and three datasets - HateXplain, implicit hate and ToxicSpans. We find that on average including the target information in the pipeline improves the model performance substantially ($\sim20-30\%$) over the baseline across the datasets. There is also a considerable effect of adding the rationales/explanations into the pipeline ($\sim10-20\%$) over the baseline across the datasets. In addition, we further provide a typology of the error cases where these large language models fail to (i) classify and (ii) explain the reason for the decisions they take. Such vulnerable points automatically constitute {`}jailbreak{'} prompts for these models and industry scale safeguard techniques need to be developed to make the models robust against such prompts.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="roy-etal-2023-probing">
<titleInfo>
<title>Probing LLMs for hate speech detection: strengths and vulnerabilities</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sarthak</namePart>
<namePart type="family">Roy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ashish</namePart>
<namePart type="family">Harshvardhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Animesh</namePart>
<namePart type="family">Mukherjee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Punyajoy</namePart>
<namePart type="family">Saha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recently efforts have been made by social media platforms as well as researchers to detect hateful or toxic language using large language models. However, none of these works aim to use explanation, additional context and victim community information in the detection process. We utilise different prompt variation, input information and evaluate large language models in zero shot setting (without adding any in-context examples). We select two large language models (GPT-3.5 and text-davinci) and three datasets - HateXplain, implicit hate and ToxicSpans. We find that on average including the target information in the pipeline improves the model performance substantially (\sim20-30%) over the baseline across the datasets. There is also a considerable effect of adding the rationales/explanations into the pipeline (\sim10-20%) over the baseline across the datasets. In addition, we further provide a typology of the error cases where these large language models fail to (i) classify and (ii) explain the reason for the decisions they take. Such vulnerable points automatically constitute ‘jailbreak’ prompts for these models and industry scale safeguard techniques need to be developed to make the models robust against such prompts.</abstract>
<identifier type="citekey">roy-etal-2023-probing</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.407</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.407</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>6116</start>
<end>6128</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Probing LLMs for hate speech detection: strengths and vulnerabilities
%A Roy, Sarthak
%A Harshvardhan, Ashish
%A Mukherjee, Animesh
%A Saha, Punyajoy
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F roy-etal-2023-probing
%X Recently efforts have been made by social media platforms as well as researchers to detect hateful or toxic language using large language models. However, none of these works aim to use explanation, additional context and victim community information in the detection process. We utilise different prompt variation, input information and evaluate large language models in zero shot setting (without adding any in-context examples). We select two large language models (GPT-3.5 and text-davinci) and three datasets - HateXplain, implicit hate and ToxicSpans. We find that on average including the target information in the pipeline improves the model performance substantially (\sim20-30%) over the baseline across the datasets. There is also a considerable effect of adding the rationales/explanations into the pipeline (\sim10-20%) over the baseline across the datasets. In addition, we further provide a typology of the error cases where these large language models fail to (i) classify and (ii) explain the reason for the decisions they take. Such vulnerable points automatically constitute ‘jailbreak’ prompts for these models and industry scale safeguard techniques need to be developed to make the models robust against such prompts.
%R 10.18653/v1/2023.findings-emnlp.407
%U https://aclanthology.org/2023.findings-emnlp.407
%U https://doi.org/10.18653/v1/2023.findings-emnlp.407
%P 6116-6128
Markdown (Informal)
[Probing LLMs for hate speech detection: strengths and vulnerabilities](https://aclanthology.org/2023.findings-emnlp.407) (Roy et al., Findings 2023)
ACL