nlp_a2019_tp3/NLP-TP3.bib
François Pelletier d96337b30c version finale
2019-12-28 00:35:13 -05:00

137 lines
No EOL
7.7 KiB
BibTeX
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

@inproceedings{schultes_leave_2013,
title = {Leave a {Comment}! {An} {In}-{Depth} {Analysis} of {User} {Comments} on {YouTube}},
abstract = {User comments are the most popular but also extremely controversial form of communication on YouTube. Their public image is very poor; users generally expect that most comments will be of little value or even in thorough- ly bad taste. Nevertheless, heaps of comments continue to be posted every day. We propose an explanation for this contradiction in user attitudes and behaviour based on a new comment classification approach which captures salient aspects of YouTube comments. We show that, based on our new classification, we are able to perform very fast lightweight semantic video analysis. In addition, our results indicate that users' video perceptions (Likes and Dislikes) are indeed in- fluenced by the dispersion of valuable and inferior comments.},
booktitle = {Wirtschaftsinformatik},
author = {Schultes, Peter and Dorner, Verena and Lehner, Franz},
year = {2013},
keywords = {Video content analysis}
}
@book{halte_les_2018,
address = {Limoges},
title = {Les émoticônes et des interjections dans le tchat},
isbn = {9782359352399 paperback},
url = {http://ariane.ulaval.ca/cgi-bin/recherche.cgi?qu=a2767912},
language = {Français},
publisher = {Lambert-Lucas},
author = {Halté, Pierre},
year = {2018},
keywords = {Binettes (Informatique), Clavardage, Français (Langue) Analyse du discours, Interjections, Sémiotique et médias sociaux, Symbolisme phonique},
annote = {Bibliographie : pages 209-216.}
}
@book{georgalou_discourse_2017,
address = {London},
title = {Discourse and identity on {Facebook}},
isbn = {9781474289122 hardback alkaline paper},
url = {http://ariane.ulaval.ca/cgi-bin/recherche.cgi?qu=a2650955},
language = {Anglais},
publisher = {Bloomsbury Academic, an imprint of Bloomsbury Publishing Plc},
author = {Georgalou, Mariza},
year = {2017},
keywords = {Analyse du discours Aspect social, Analyse du discours Technologie, Facebook (Site Web) Aspect social, Réseaux sociaux (Internet) Aspect social},
annote = {Bibliographie : pages 281-305.}
}
@inproceedings{liebeskind_comment_2018,
address = {Cham},
title = {Comment {Relevance} {Classification} in {Facebook}},
isbn = {978-3-319-77116-8},
abstract = {Social posts and their comments are rich and interesting social data. In this study, we aim to classify comments as relevant or irrelevant to the content of their posts. Since the comments in social media are usually short, their bag-of-words (BoW) representations are highly sparse. We investigate four semantic vector representations for the relevance classification task. We investigate different types of large unlabeled data for learning the distributional representations. We also empirically demonstrate that expanding the input of the task to include the post text does not improve the classification performance over using only the comment text. We show that representing the comment in the post space is a cheap and good representation for comment relevance classification.},
booktitle = {Computational {Linguistics} and {Intelligent} {Text} {Processing}},
publisher = {Springer International Publishing},
author = {Liebeskind, Chaya and Liebeskind, Shmuel and HaCohen-Kerner, Yaakov},
editor = {Gelbukh, Alexander},
year = {2018},
pages = {241--254}
}
@misc{noauthor_exportcomments.com_2019,
title = {exportcomments.com},
url = {https://exportcomments.com/},
month = nov,
year = {2019}
}
@misc{ou-yang_newspaper3k:_2019,
title = {Newspaper3k: {Article} scraping \& curation},
url = {https://github.com/codelucas/newspaper/},
author = {Ou-Yang, Lucas},
year = {2019}
}
@inproceedings{mckinney_data_2010,
title = {Data {Structures} for {Statistical} {Computing} in {Python}},
booktitle = {Proceedings of the 9th {Python} in {Science} {Conference}},
author = {McKinney, Wes},
editor = {Walt, Stéfan van der and Millman, Jarrod},
year = {2010},
pages = {51 -- 56}
}
@incollection{baxter_discourse-analytic_2010,
title = {Discourse-analytic approaches to text and talk},
isbn = {978-0-8264-8993-7},
abstract = {This chapter explores the different ways in which discourse-analytic approaches reveal the meaningfulness of text and talk. It reviews four diverse approaches to discourse analysis of particular value for current research in linguistics: Conversation Analysis (CA), Discourse Analysis (DA), Critical Discourse Analysis (CDA) and Feminist Post-structuralist Discourse Analysis (FPDA). Each approach is examined in terms of its background, motivation, key features, and possible strengths and limitations in relation to the field of linguistics. A key way to schematize discourse-analytic methodology is in terms of its relationship between microanalytical approaches, which examine the finer detail of linguistic interactions in transcripts, and macroanalytical approaches, which consider how broader social processes work through language (Heller, 2001). This chapter assesses whether there is a strength in a discourse-analytic approach that aligns itself exclusively with either a micro- or macrostrategy, or whether, as Heller suggests, the field needs to fi nd a way of undoing the micromacro dichotomy in order to produce richer, more complex insights within linguistic research.},
language = {English},
booktitle = {Research {Methods} in {Linguistics}},
publisher = {Continuum},
author = {Baxter, Judith A.},
editor = {Litosseliti, Lia},
year = {2010},
pages = {117--137}
}
@article{robb_how_2014,
title = {How {Capital} {Letters} {Became} {Internet} {Code} for {Yelling}},
url = {https://newrepublic.com/article/117390/netiquette-capitalization-how-caps-became-code-yelling},
urldate = {2019-12-21},
journal = {The New Republic},
author = {Robb, Alice},
month = apr,
year = {2014}
}
@inproceedings{bird_nltk:_2002,
title = {Nltk: {The} natural language toolkit},
booktitle = {In {Proceedings} of the {ACL} {Workshop} on {Effective} {Tools} and {Methodologies} for {Teaching} {Natural} {Language} {Processing} and {Computational} {Linguistics}. {Philadelphia}: {Association} for {Computational} {Linguistics}},
author = {Bird, Steven},
year = {2002}
}
@misc{coulombe_french_2019,
title = {French {LEFFF} {Lemmatizer}},
url = {https://github.com/ClaudeCoulombe/FrenchLefffLemmatizer},
abstract = {A French Lemmatizer in Python based on the LEFFF (Lexique des Formes Fléchies du Français / Lexicon of French inflected forms) is a large-scale morphological and syntactic lexicon for French. A lemmatizer retrurns the lemma or more simply the dictionary entry of a word, In French, the lemmatization of a verb returns this verb to the infinitive and for the other words, the lemmatization returns this word to the masculine singular.},
urldate = {2019-12-26},
author = {Coulombe, Claude},
month = jul,
year = {2019}
}
@book{noauthor_universal_2019,
title = {Universal {Dependencies} 2.5},
copyright = {Licence Universal Dependencies v2.5},
url = {http://hdl.handle.net/11234/1-3105},
year = {2019},
annote = {LINDAT/CLARIN digital library at the Institute of Formal and Applied Linguistics (ÚFAL), Faculty of Mathematics and Physics, Charles University}
}
@inproceedings{manning_stanford_2014,
title = {The {Stanford} {CoreNLP} {Natural} {Language} {Processing} {Toolkit}},
url = {http://www.aclweb.org/anthology/P/P14/P14-5010},
booktitle = {Association for {Computational} {Linguistics} ({ACL}) {System} {Demonstrations}},
author = {Manning, Christopher D. and Surdeanu, Mihai and Bauer, John and Finkel, Jenny and Bethard, Steven J. and McClosky, David},
year = {2014},
pages = {55--60}
}
@misc{taehoon_kim_emoji_2019,
title = {emoji},
url = {https://github.com/carpedm20/emoji/},
abstract = {emoji terminal output for Python},
author = {{Taehoon Kim} and {Kevin Wurster}},
month = sep,
year = {2019}
}