We describe the Sentiment Analysis in Twitter task, ran as part of SemEval-2014. It is a continuation of the last year’s task that ran successfully as part of SemEval2013. As in 2013, this was the most popular SemEval task; a total of 46 teams contributed 27 submissions for subtask A (21 teams) and 50 submissions for subtask B (44 teams). This year, we introduced three new test sets: (i) regular tweets, (ii) sarcastic tweets, and (iii) LiveJournal sentences. We further tested on (iv) 2013 tweets, and (v) 2013 SMS messages. The highest F1score on (i) was achieved by NRC-Canada at 86.63 for subtask A and by TeamX at 70.96 for subtask B.
Description
This paper outlines the SemEval-2014 task on Sentiment Analysis in Twitter, highlighting the methodologies and challenges in sentiment analysis of Twitter messages.
%0 Generic
%1 Preslav2014
%A Nakov, Preslav
%A Ritter, Alan
%A Rosenthal, Sara
%A Sebastiani, F.
%A Stoyanov, Veselin
%D 2014
%K sentiment-analysis Twitter SemEval related_works machine-learning related_works_benchmark posted_with_chatgpt
%P 73-80
%R 10.3115/v1/S14-2009
%T SemEval-2014 Task 9: Sentiment Analysis in Twitter
%U https://www.semanticscholar.org/paper/2b74f47b1336c948d2371a30209ec56c936763a9
%X We describe the Sentiment Analysis in Twitter task, ran as part of SemEval-2014. It is a continuation of the last year’s task that ran successfully as part of SemEval2013. As in 2013, this was the most popular SemEval task; a total of 46 teams contributed 27 submissions for subtask A (21 teams) and 50 submissions for subtask B (44 teams). This year, we introduced three new test sets: (i) regular tweets, (ii) sarcastic tweets, and (iii) LiveJournal sentences. We further tested on (iv) 2013 tweets, and (v) 2013 SMS messages. The highest F1score on (i) was achieved by NRC-Canada at 86.63 for subtask A and by TeamX at 70.96 for subtask B.
@JournalArticle{Preslav2014,
abstract = {We describe the Sentiment Analysis in Twitter task, ran as part of SemEval-2014. It is a continuation of the last year’s task that ran successfully as part of SemEval2013. As in 2013, this was the most popular SemEval task; a total of 46 teams contributed 27 submissions for subtask A (21 teams) and 50 submissions for subtask B (44 teams). This year, we introduced three new test sets: (i) regular tweets, (ii) sarcastic tweets, and (iii) LiveJournal sentences. We further tested on (iv) 2013 tweets, and (v) 2013 SMS messages. The highest F1score on (i) was achieved by NRC-Canada at 86.63 for subtask A and by TeamX at 70.96 for subtask B.},
added-at = {2023-09-22T12:19:36.000+0200},
author = {Nakov, Preslav and Ritter, Alan and Rosenthal, Sara and Sebastiani, F. and Stoyanov, Veselin},
biburl = {https://www.bibsonomy.org/bibtex/2554f11c9350be6235d3194b682383e2e/tomvoelker},
day = 1,
description = {This paper outlines the SemEval-2014 task on Sentiment Analysis in Twitter, highlighting the methodologies and challenges in sentiment analysis of Twitter messages.},
doi = {10.3115/v1/S14-2009},
interhash = {274e6d0a609a6beb3a128e5bb54a9b74},
intrahash = {554f11c9350be6235d3194b682383e2e},
keywords = {sentiment-analysis Twitter SemEval related_works machine-learning related_works_benchmark posted_with_chatgpt},
month = {8},
pages = {73-80},
timestamp = {2023-09-22T12:19:36.000+0200},
title = {SemEval-2014 Task 9: Sentiment Analysis in Twitter},
url = {https://www.semanticscholar.org/paper/2b74f47b1336c948d2371a30209ec56c936763a9},
year = 2014
}