@inproceedings{fisher-etal-2020-debiasing,
title = "Debiasing knowledge graph embeddings",
author = "Fisher, Joseph and
Mittal, Arpit and
Palfrey, Dave and
Christodoulopoulos, Christos",
editor = "Webber, Bonnie and
Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.emnlp-main.595",
doi = "10.18653/v1/2020.emnlp-main.595",
pages = "7332--7345",
abstract = "It has been shown that knowledge graph embeddings encode potentially harmful social biases, such as the information that women are more likely to be nurses, and men more likely to be bankers. As graph embeddings begin to be used more widely in NLP pipelines, there is a need to develop training methods which remove such biases. Previous approaches to this problem both significantly increase the training time, by a factor of eight or more, and decrease the accuracy of the model substantially. We present a novel approach, in which all embeddings are trained to be neutral to sensitive attributes such as gender by default using an adversarial loss. We then add sensitive attributes back on in whitelisted cases. Training time only marginally increases over a baseline model, and the debiased embeddings perform almost as accurately in the triple prediction task as their non-debiased counterparts.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="fisher-etal-2020-debiasing">
<titleInfo>
<title>Debiasing knowledge graph embeddings</title>
</titleInfo>
<name type="personal">
<namePart type="given">Joseph</namePart>
<namePart type="family">Fisher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arpit</namePart>
<namePart type="family">Mittal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dave</namePart>
<namePart type="family">Palfrey</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bonnie</namePart>
<namePart type="family">Webber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>It has been shown that knowledge graph embeddings encode potentially harmful social biases, such as the information that women are more likely to be nurses, and men more likely to be bankers. As graph embeddings begin to be used more widely in NLP pipelines, there is a need to develop training methods which remove such biases. Previous approaches to this problem both significantly increase the training time, by a factor of eight or more, and decrease the accuracy of the model substantially. We present a novel approach, in which all embeddings are trained to be neutral to sensitive attributes such as gender by default using an adversarial loss. We then add sensitive attributes back on in whitelisted cases. Training time only marginally increases over a baseline model, and the debiased embeddings perform almost as accurately in the triple prediction task as their non-debiased counterparts.</abstract>
<identifier type="citekey">fisher-etal-2020-debiasing</identifier>
<identifier type="doi">10.18653/v1/2020.emnlp-main.595</identifier>
<location>
<url>https://aclanthology.org/2020.emnlp-main.595</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>7332</start>
<end>7345</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Debiasing knowledge graph embeddings
%A Fisher, Joseph
%A Mittal, Arpit
%A Palfrey, Dave
%A Christodoulopoulos, Christos
%Y Webber, Bonnie
%Y Cohn, Trevor
%Y He, Yulan
%Y Liu, Yang
%S Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F fisher-etal-2020-debiasing
%X It has been shown that knowledge graph embeddings encode potentially harmful social biases, such as the information that women are more likely to be nurses, and men more likely to be bankers. As graph embeddings begin to be used more widely in NLP pipelines, there is a need to develop training methods which remove such biases. Previous approaches to this problem both significantly increase the training time, by a factor of eight or more, and decrease the accuracy of the model substantially. We present a novel approach, in which all embeddings are trained to be neutral to sensitive attributes such as gender by default using an adversarial loss. We then add sensitive attributes back on in whitelisted cases. Training time only marginally increases over a baseline model, and the debiased embeddings perform almost as accurately in the triple prediction task as their non-debiased counterparts.
%R 10.18653/v1/2020.emnlp-main.595
%U https://aclanthology.org/2020.emnlp-main.595
%U https://doi.org/10.18653/v1/2020.emnlp-main.595
%P 7332-7345
Markdown (Informal)
[Debiasing knowledge graph embeddings](https://aclanthology.org/2020.emnlp-main.595) (Fisher et al., EMNLP 2020)
ACL
- Joseph Fisher, Arpit Mittal, Dave Palfrey, and Christos Christodoulopoulos. 2020. Debiasing knowledge graph embeddings. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7332–7345, Online. Association for Computational Linguistics.