@inproceedings{lim-liew-2022-english-malay,
title = "{E}nglish-{M}alay Word Embeddings Alignment for Cross-lingual Emotion Classification with Hierarchical Attention Network",
author = "Lim, Ying Hao and
Liew, Jasy Suet Yan",
editor = "Barnes, Jeremy and
De Clercq, Orph{\'e}e and
Barriere, Valentin and
Tafreshi, Shabnam and
Alqahtani, Sawsan and
Sedoc, Jo{\~a}o and
Klinger, Roman and
Balahur, Alexandra",
booktitle = "Proceedings of the 12th Workshop on Computational Approaches to Subjectivity, Sentiment {\&} Social Media Analysis",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.wassa-1.12",
doi = "10.18653/v1/2022.wassa-1.12",
pages = "113--124",
abstract = "The main challenge in English-Malay cross-lingual emotion classification is that there are no Malay training emotion corpora. Given that machine translation could fall short in contextually complex tweets, we only limited machine translation to the word level. In this paper, we bridge the language gap between English and Malay through cross-lingual word embeddings constructed using singular value decomposition. We pre-trained our hierarchical attention model using English tweets and fine-tuned it using a set of gold standard Malay tweets. Our model uses significantly less computational resources compared to the language models. Experimental results show that the performance of our model is better than mBERT in zero-shot learning by 2.4{\%} and Malay BERT by 0.8{\%} when a limited number of Malay tweets is available. In exchange for 6 {--} 7 times less in computational time, our model only lags behind mBERT and XLM-RoBERTa by a margin of 0.9 {--} 4.3 {\%} in few-shot learning. Also, the word-level attention could be transferred to the Malay tweets accurately using the cross-lingual word embeddings.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lim-liew-2022-english-malay">
<titleInfo>
<title>English-Malay Word Embeddings Alignment for Cross-lingual Emotion Classification with Hierarchical Attention Network</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ying</namePart>
<namePart type="given">Hao</namePart>
<namePart type="family">Lim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jasy</namePart>
<namePart type="given">Suet</namePart>
<namePart type="given">Yan</namePart>
<namePart type="family">Liew</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 12th Workshop on Computational Approaches to Subjectivity, Sentiment & Social Media Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jeremy</namePart>
<namePart type="family">Barnes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Orphée</namePart>
<namePart type="family">De Clercq</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Valentin</namePart>
<namePart type="family">Barriere</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shabnam</namePart>
<namePart type="family">Tafreshi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sawsan</namePart>
<namePart type="family">Alqahtani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">João</namePart>
<namePart type="family">Sedoc</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roman</namePart>
<namePart type="family">Klinger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexandra</namePart>
<namePart type="family">Balahur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The main challenge in English-Malay cross-lingual emotion classification is that there are no Malay training emotion corpora. Given that machine translation could fall short in contextually complex tweets, we only limited machine translation to the word level. In this paper, we bridge the language gap between English and Malay through cross-lingual word embeddings constructed using singular value decomposition. We pre-trained our hierarchical attention model using English tweets and fine-tuned it using a set of gold standard Malay tweets. Our model uses significantly less computational resources compared to the language models. Experimental results show that the performance of our model is better than mBERT in zero-shot learning by 2.4% and Malay BERT by 0.8% when a limited number of Malay tweets is available. In exchange for 6 – 7 times less in computational time, our model only lags behind mBERT and XLM-RoBERTa by a margin of 0.9 – 4.3 % in few-shot learning. Also, the word-level attention could be transferred to the Malay tweets accurately using the cross-lingual word embeddings.</abstract>
<identifier type="citekey">lim-liew-2022-english-malay</identifier>
<identifier type="doi">10.18653/v1/2022.wassa-1.12</identifier>
<location>
<url>https://aclanthology.org/2022.wassa-1.12</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>113</start>
<end>124</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T English-Malay Word Embeddings Alignment for Cross-lingual Emotion Classification with Hierarchical Attention Network
%A Lim, Ying Hao
%A Liew, Jasy Suet Yan
%Y Barnes, Jeremy
%Y De Clercq, Orphée
%Y Barriere, Valentin
%Y Tafreshi, Shabnam
%Y Alqahtani, Sawsan
%Y Sedoc, João
%Y Klinger, Roman
%Y Balahur, Alexandra
%S Proceedings of the 12th Workshop on Computational Approaches to Subjectivity, Sentiment & Social Media Analysis
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F lim-liew-2022-english-malay
%X The main challenge in English-Malay cross-lingual emotion classification is that there are no Malay training emotion corpora. Given that machine translation could fall short in contextually complex tweets, we only limited machine translation to the word level. In this paper, we bridge the language gap between English and Malay through cross-lingual word embeddings constructed using singular value decomposition. We pre-trained our hierarchical attention model using English tweets and fine-tuned it using a set of gold standard Malay tweets. Our model uses significantly less computational resources compared to the language models. Experimental results show that the performance of our model is better than mBERT in zero-shot learning by 2.4% and Malay BERT by 0.8% when a limited number of Malay tweets is available. In exchange for 6 – 7 times less in computational time, our model only lags behind mBERT and XLM-RoBERTa by a margin of 0.9 – 4.3 % in few-shot learning. Also, the word-level attention could be transferred to the Malay tweets accurately using the cross-lingual word embeddings.
%R 10.18653/v1/2022.wassa-1.12
%U https://aclanthology.org/2022.wassa-1.12
%U https://doi.org/10.18653/v1/2022.wassa-1.12
%P 113-124
Markdown (Informal)
[English-Malay Word Embeddings Alignment for Cross-lingual Emotion Classification with Hierarchical Attention Network](https://aclanthology.org/2022.wassa-1.12) (Lim & Liew, WASSA 2022)
ACL