2019
Maharjan, Suraj; Mave, Deepthi; Shrestha, Prasha; Montes, Manuel; Gonzalez, Fabio A; Solorio, Thamar
Jointly Learning Author and Annotated Character N-gram Embeddings: A Case Study in Literary Text Conference
In Proceedings of the 2019 Conference on Recent Advances in Natural Language Processing (RANLP), ACL, Varna, Bulgaria, 2019.
Abstract | Links | BibTeX | Tags: Authorship Attribution, Book Likability Prediction, Multitask, Neural Language Model, Transfer learning
@conference{Maharjan2019,
title = {Jointly Learning Author and Annotated Character N-gram Embeddings: A Case Study in Literary Text},
author = {Suraj Maharjan and Deepthi Mave and Prasha Shrestha and Manuel Montes and Fabio A Gonzalez and Thamar Solorio},
url = {https://www.aclweb.org/anthology/R19-1080/},
year = {2019},
date = {2019-09-02},
booktitle = {In Proceedings of the 2019 Conference on Recent Advances in Natural Language Processing (RANLP)},
pages = {684-692},
publisher = {ACL},
address = {Varna, Bulgaria},
abstract = {An author's way of presenting a story through his/her writing style has a great impact on whether the story will be liked by readers or not. In this paper, we learn representations for authors of literary texts together with representations for character n-grams annotated with their functional roles. We train a neural character n-gram based language model using an external corpus of literary texts and transfer learned representations for use in downstream tasks. We show that augmenting the knowledge from external works of authors produces results competitive with other style-based methods for book likability prediction, genre classification, and authorship attribution.},
keywords = {Authorship Attribution, Book Likability Prediction, Multitask, Neural Language Model, Transfer learning},
pubstate = {published},
tppubtype = {conference}
}
An author’s way of presenting a story through his/her writing style has a great impact on whether the story will be liked by readers or not. In this paper, we learn representations for authors of literary texts together with representations for character n-grams annotated with their functional roles. We train a neural character n-gram based language model using an external corpus of literary texts and transfer learned representations for use in downstream tasks. We show that augmenting the knowledge from external works of authors produces results competitive with other style-based methods for book likability prediction, genre classification, and authorship attribution.