Here is a list of Bibtex items of my publications. See also my Google Scholar profile page.
Bibliography
-
Federico Cabitza, Andrea Campagner and Valerio Basile:
Toward a Perspectivist Turn in Ground Truthing for Predictive Computing
@article{Cabitza_Campagner_Basile_2023, title={Toward a Perspectivist Turn in Ground Truthing for Predictive Computing}, Volume={37}, url={https://ojs.aaai.org/index.php/AAAI/article/view/25840}, DOI={10.1609/aaai.v37i6.25840}, Number={6}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Cabitza, Federico and Campagner, Andrea and Basile, Valerio}, Year={2023}, month={Jun.}, pages={6860-6868} }Back to the publication list
-
Valerio Basile, Tomaso Caselli, Anna Koufakou and Viviana Patti:
Automatically Computing Connotative Shifts of Lexical Items
@InProceedings{10.1007/978-3-031-08473-7_39, author="Basile, Valerio and Caselli, Tommaso and Koufakou, Anna and Patti, Viviana", editor="Rosso, Paolo and Basile, Valerio and Mart{\'i}nez, Raquel and M{\'e}tais, Elisabeth and Meziane, Farid", title="Automatically Computing Connotative Shifts of Lexical Items", booktitle="Natural Language Processing and Information Systems", Year="2022", publisher="Springer International Publishing", address="Cham", Pages="425–436", abstract="Connotation is a dimension of lexical meaning at the semantic-pragmatic interface. Connotations can be used to express point of views, perspectives, and implied emotional associations. Variations in connotations of the same lexical item can occur at different level of analysis: from individuals, to community of speech, specific domains, and even time. In this paper, we present a simple yet effective method to assign connotative values to selected target items and to quantify connotation shifts. We test our method via a set of experiments using different social media data (Reddit and Twitter) and languages (English and Italian). While we kept the connotative axis (i.e., the polarity associated to a lexical item) fixed, we investigated connotation shifts along two dimensions: the first target shifts across communities of speech and domain while the second targets shifts in time. Our results indicate the validity of the proposed method and its potential application for the identification of connotation shifts and application to automatically induce specific connotation lexica.", isbn="978-3-031-08473-7" }Back to the publication list
-
Endang Wahyu Pamungkas, Valerio Basile and Viviana Patti :
A joint learning approach with knowledge injection for zero-shot cross-lingual hate speech detection
@article{PAMUNGKAS2021102544, Title = {A joint learning approach with knowledge injection for zero-shot cross-lingual hate speech detection}, journal = {Information Processing & Management}, volume = {58}, number = {4}, pages = {102544}, year = {2021}, issn = {0306-4573}, doi = {https://doi.org/10.1016/j.ipm.2021.102544}, url = {https://www.sciencedirect.com/science/article/pii/S0306457321000510}, author = {Endang Wahyu Pamungkas and Valerio Basile and Viviana Patti}, keywords = {Hate speech detection, Cross-lingual classification, Social media, Transfer learning, Zero-shot learning}, abstract = {Hate speech is an increasingly important societal issue in the era of digital communication. Hateful expressions often make use of figurative language and, although they represent, in some sense, the dark side of language, they are also often prime examples of creative use of language. While hate speech is a global phenomenon, current studies on automatic hate speech detection are typically framed in a monolingual setting. In this work, we explore hate speech detection in low-resource languages by transferring knowledge from a resource-rich language, English, in a zero-shot learning fashion. We experiment with traditional and recent neural architectures, and propose two joint-learning models, using different multilingual language representations to transfer knowledge between pairs of languages. We also evaluate the impact of additional knowledge in our experiment, by incorporating information from a multilingual lexicon of abusive words. The results show that our joint-learning models achieve the best performance on most languages. However, a simple approach that uses machine translation and a pre-trained English language model achieves a robust performance. In contrast, Multilingual BERT fails to obtain a good performance in cross-lingual hate speech detection. We also experimentally found that the external knowledge from a multilingual abusive lexicon is able to improve the models’ performance, specifically in detecting the positive class. The results of our experimental evaluation highlight a number of challenges and issues in this particular task. One of the main challenges is related to the issue of current benchmarks for hate speech detection, in particular how bias related to the topical focus in the datasets influences the classification performance. The insufficient ability of current multilingual language models to transfer knowledge between languages in the specific hate speech detection task also remain an open problem. However, our experimental evaluation and our qualitative analysis show how the explicit integration of linguistic knowledge from a structured abusive language lexicon helps to alleviate this issue.} }Back to the publication list
-
Valerio Basile, Michael Fell, Tommaso Fornaciari, Dirk Hovy, Silviu Paun, Barbara Plank, Massimo Poesio, Alexandra Uma:
We Need to Consider Disagreement in Evaluation
@inproceedings{basile-etal-2021-need, title = "We Need to Consider Disagreement in Evaluation", author = "Basile, Valerio and Fell, Michael and Fornaciari, Tommaso and Hovy, Dirk and Paun, Silviu and Plank, Barbara and Poesio, Massimo and Uma, Alexandra", booktitle = "Proceedings of the 1st Workshop on Benchmarking: Past, Present and Future", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.bppf-1.3", doi = "10.18653/v1/2021.bppf-1.3", pages = "15–21", abstract = "Evaluation is of paramount importance in data-driven research fields such as Natural Language Processing (NLP) and Computer Vision (CV). Current evaluation practice largely hinges on the existence of a single {``}ground truth{''} against which we can meaningfully compare the prediction of a model. However, this comparison is flawed for two reasons. 1) In many cases, more than one answer is correct. 2) Even where there is a single answer, disagreement among annotators is ubiquitous, making it difficult to decide on a gold standard. We argue that the current methods of adjudication, agreement, and evaluation need serious reconsideration. Some researchers now propose to minimize disagreement and to fix datasets. We argue that this is a gross oversimplification, and likely to conceal the underlying complexity. Instead, we suggest that we need to better capture the sources of disagreement to improve today{'}s evaluation practice. We discuss three sources of disagreement: from the annotator, the data, and the context, and show how this affects even seemingly objective tasks. Datasets with multiple annotations are becoming more common, as are methods to integrate disagreement into modeling. The logical next step is to extend this to evaluation." }Back to the publication list
-
Valerio Basile:
It’s the End of the Gold Standard as We Know It. Leveraging Non-aggregated Data for Better Evaluation and Explanation of Subjective Tasks
@InProceedings{10.1007/978-3-030-77091-4_26, author="Basile, Valerio", editor="Baldoni, Matteo and Bandini, Stefania", title="It's the End of the Gold Standard as We Know It", booktitle="AIxIA 2020 -- Advances in Artificial Intelligence", Year="2021", publisher="Springer International Publishing", address="Cham", Pages="441–453", abstract="Supervised machine learning, in particular in Natural Language Processing, is based on the creation of high-quality gold standard datasets for training and benchmarking. The de-facto standard annotation methodologies work well for traditionally relevant tasks in Computational Linguistics. However, critical issues are surfacing when applying old techniques to the study of highly subjective phenomena such as irony and sarcasm, or abusive and offensive language. This paper calls for a paradigm shift, away from monolithic, majority-aggregated gold standards, and towards an inclusive framework that preserves the personal opinions and culturally-driven perspectives of the annotators. New training sets and supervised machine learning techniques will have to be adapted in order to create fair, inclusive, and ultimately more informed models of subjective semantic and pragmatic phenomena. The arguments are backed by a synthetic experiment showing the lack of correlation between the difficulty of an annotation task, its degree of subjectivity, and the quality of the predictions of a supervised classifier trained on the resulting data. A further experiment on real data highlights the beneficial impact of the proposed methodologies in terms of explainability of perspective-aware hate speech detection.", Isbn="978-3-030-77091-4" }Back to the publication list
-
Anna Koufakou, Endang Wahyu Pamungkas, Valerio Basile, Viviana Patti:
HurtBERT: Incorporating Lexical Features with BERT for the Detection of Abusive Language
@inproceedings{koufakou-etal-2020-hurtbert, Title = "{H}urt{BERT}: Incorporating Lexical Features with {BERT} for the Detection of Abusive Language", author = "Koufakou, Anna and Pamungkas, Endang Wahyu and Basile, Valerio and Patti, Viviana", booktitle = "Proceedings of the Fourth Workshop on Online Abuse and Harms", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2020.alw-1.5", doi = "10.18653/v1/2020.alw-1.5", pages = "34–43", abstract = "The detection of abusive or offensive remarks in social texts has received significant attention in research. In several related shared tasks, BERT has been shown to be the state-of-the-art. In this paper, we propose to utilize lexical features derived from a hate lexicon towards improving the performance of BERT in such tasks. We explore different ways to utilize the lexical features in the form of lexicon-based encodings at the sentence level or embeddings at the word level. We provide an extensive dataset evaluation that addresses in-domain as well as cross-domain detection of abusive content to render a complete picture. Our results indicate that our proposed models combining BERT with lexical features help improve over a baseline BERT model in many of our in-domain and cross-domain experiments." }Back to the publication list
-
Fabio Poletto, Valerio Basile, Manuela Sanguinetti, Cristina Bosco and Viviana Patti :
Resources and benchmark corpora for hate speech detection: a systematic review
@article{10.1007/s10579-020-09502-8, author = {Poletto, Fabio and Basile, Valerio and Sanguinetti, Manuela and Bosco, Cristina and Patti, Viviana}, title = {Resources and Benchmark Corpora for Hate Speech Detection: A Systematic Review}, year = {2021}, issue_date = {Jun 2021}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, volume = {55}, number = {2}, issn = {1574-020X}, url = {https://doi.org/10.1007/s10579-020-09502-8}, doi = {10.1007/s10579-020-09502-8}, abstract = {Hate Speech in social media is a complex phenomenon, whose detection has recently gained significant traction in the Natural Language Processing community, as attested by several recent review works. Annotated corpora and benchmarks are key resources, considering the vast number of supervised approaches that have been proposed. Lexica play an important role as well for the development of hate speech detection systems. In this review, we systematically analyze the resources made available by the community at large, including their development methodology, topical focus, language coverage, and other factors. The results of our analysis highlight a heterogeneous, growing landscape, marked by several issues and venues for improvement.}, journal = {Lang. Resour. Eval.}, month = {jun}, pages = {477–523}, numpages = {47}, keywords = {Systematic review, Benchmark corpora, Natural Language Processing shared tasks, Hate speech detection} }Back to the publication list
-
Valerio Basile, Christian Cagnazzo:
Litescale: A Lightweight Tool for Best-worst Scaling Annotation
@inproceedings{basile-cagnazzo-2021-litescale, title = "Litescale: A Lightweight Tool for Best-worst Scaling Annotation", author = "Basile, Valerio and Cagnazzo, Christian", booktitle = "Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2021)", month = sep, Year = "2021", address = "Held Online", publisher = "INCOMA Ltd.", url = "https://aclanthology.org/2021.ranlp-1.15", Pages = "121–127", abstract = "Best-worst Scaling (BWS) is a methodology for annotation based on comparing and ranking instances, rather than classifying or scoring individual instances. Studies have shown the efficacy of this methodology applied to NLP tasks in terms of a higher quality of the datasets produced by following it. In this system demonstration paper, we present Litescale, a free software library to create and manage BWS annotation tasks. Litescale computes the tuples to annotate, manages the users and the annotation process, and creates the final gold standard. The functionalities of Litescale can be accessed programmatically through a Python module, or via two alternative user interfaces, a textual console-based one and a graphical Web-based one. We further developed and deployed a fully online version of Litescale complete with multi-user support.", }Back to the publication list
-
Sohail Akhtar, Valerio Basile and Viviana Patti :
Modeling Annotator Perspective and Polarized Opinions to Improve Hate Speech Detection
@article{Akhtar_Basile_Patti_2020, title={Modeling Annotator Perspective and Polarized Opinions to Improve Hate Speech Detection}, Volume={8}, url={https://ojs.aaai.org/index.php/HCOMP/article/view/7473}, DOI={10.1609/hcomp.v8i1.7473}, abstractNote={<p class="abstract">In this paper we propose an approach to exploit the fine-grained knowledge expressed by individual human annotators during a hate speech (HS) detection task, before the aggregation of single judgments in a gold standard dataset eliminates non-majority perspectives. We automatically divide the annotators into groups, aiming at grouping them by similar personal characteristics (ethnicity, social background, culture etc.). To serve a multi-lingual perspective, we performed classification experiments on three different Twitter datasets in English and Italian languages. We created different gold standards, one for each group, and trained a state-of-the-art deep learning model on them, showing that supervised models informed by different perspectives on the target phenomena outperform a baseline represented by models trained on fully aggregated data. Finally, we implemented an ensemble approach that combines the single perspective-aware classifiers into an inclusive model. The results show that this strategy further improves the classification performance, especially with a significant boost in the recall of HS prediction.</p>}, Number={1}, journal={Proceedings of the AAAI Conference on Human Computation and Crowdsourcing}, author={Akhtar, Sohail and Basile, Valerio and Patti, Viviana}, Year={2020}, month={Oct.}, pages={151-154} }Back to the publication list
-
Valerio Basile, Danilo Croce, Maria Di Maro, Lucia C. Passaro:
EVALITA 2020: Overview of the 7th Evaluation Campaign of Natural Language Processing and Speech Tools for Italian
@article{Basile2020EVALITA2O, title={EVALITA 2020: Overview of the 7th Evaluation Campaign of Natural Language Processing and Speech Tools for Italian}, author={Valerio Basile and Danilo Croce and Maria Di Maro and Lucia C. Passaro}, journal={EVALITA Evaluation of NLP and Speech Tools for Italian - December 17th, 2020}, year={2020} }Back to the publication list
-
Endang Wahyu Pamungkas, Valerio Basile and Viviana Patti :
Misogyny Detection in Twitter: a Multilingual and Cross-Domain Study
@article{PAMUNGKAS2020102360, title = {Misogyny Detection in Twitter: a Multilingual and Cross-Domain Study}, journal = {Information Processing & Management}, volume = {57}, number = {6}, pages = {102360}, year = {2020}, issn = {0306-4573}, doi = {https://doi.org/10.1016/j.ipm.2020.102360}, url = {https://www.sciencedirect.com/science/article/pii/S0306457320308554}, author = {Endang Wahyu Pamungkas and Valerio Basile and Viviana Patti}, keywords = {Automatic misogyny identification, Abusive language online, Cross-domain classification, Cross-lingual classification, Social media}, abstract = {The freedom of expression given by social media has a dark side: the growing proliferation of abusive contents on these platforms. Misogynistic speech is a kind of abusive language, which can be simplified as hate speech targeting women, and it is becoming a more and more relevant issue in recent years. AMI IberEval 2018 and AMI EVALITA 2018 were two shared tasks which mainly focused on tackling the problem of misogyny in Twitter, in three different languages, namely English, Italian, and Spanish. In this paper, we present an in-depth study on the phenomena of misogyny in those three languages, by focusing on three main objectives. Firstly, we investigate the most important features to detect misogyny and the issues which contribute to the difficulty of misogyny detection, by proposing a novel system and conducting a broad evaluation on this task. Secondly, we study the relationship between misogyny and other abusive language phenomena, by conducting a series of cross-domain classification experiments. Finally, we explore the feasibility of detecting misogyny in a multilingual environment, by carrying out cross-lingual classification experiments. Our system succeeded to outperform all state of the art systems in all benchmark AMI datasets both subtask A and subtask B. Moreover, intriguing insights emerged from error analysis, in particular about the interaction between different but related abusive phenomena. Based on our cross-domain experiment, we conclude that misogyny is quite a specific kind of abusive language, while we experimentally found that it is different from sexism. Lastly, our cross-lingual experiments show promising results. Our proposed joint-learning architecture obtained a robust performance across languages, worth to be explored in further investigation.} }Back to the publication list
-
Tommaso Caselli, Valerio Basile, Jelena Mitrovic, Michael Granitzer:
HateBERT: Retraining BERT for Abusive Language Detection in English
@inproceedings{caselli-etal-2021-hatebert, Title = "{H}ate{BERT}: Retraining {BERT} for Abusive Language Detection in {E}nglish", author = "Caselli, Tommaso and Basile, Valerio and Mitrovi{\'c}, Jelena and Granitzer, Michael", booktitle = "Proceedings of the 5th Workshop on Online Abuse and Harms (WOAH 2021)", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.woah-1.3", doi = "10.18653/v1/2021.woah-1.3", pages = "17–25", abstract = "We introduce HateBERT, a re-trained BERT model for abusive language detection in English. The model was trained on RAL-E, a large-scale dataset of Reddit comments in English from communities banned for being offensive, abusive, or hateful that we have curated and made available to the public. We present the results of a detailed comparison between a general pre-trained language model and the retrained version on three English datasets for offensive, abusive language and hate speech detection tasks. In all datasets, HateBERT outperforms the corresponding general BERT model. We also discuss a battery of experiments comparing the portability of the fine-tuned models across the datasets, suggesting that portability is affected by compatibility of the annotated phenomena." }Back to the publication list
-
Tommaso Caselli, Valerio Basile, Jelena Mitrovic, Inga Kartoziya, Michael Granitzer:
I Feel Offended, Don’t Be Abusive! Implicit/Explicit Messages in Offensive and Abusive Language
@InProceedings{caselli-EtAl:2020:LREC, author = {Caselli, Tommaso and Basile, Valerio and Mitrović, Jelena and Kartoziya, Inga and Granitzer, Michael}, title = {I Feel Offended, Don’t Be Abusive! Implicit/Explicit Messages in Offensive and Abusive Language}, booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference}, month = {May}, Year = {2020}, address = {Marseille, France}, publisher = {European Language Resources Association}, Pages = {6193–6202}, abstract = {Abusive language detection is an unsolved and challenging problem for the NLP community. Recent literature suggests various approaches to distinguish between different language phenomena (e.g., hate speech vs. cyberbullying vs. offensive language) and factors (degree of explicitness and target) that may help to classify different abusive language phenomena. There are data sets that annotate the target of abusive messages (i.e.OLID/OffensEval (Zampieri et al., 2019a)). However, there is a lack of data sets that take into account the degree of explicitness. In this paper, we propose annotation guidelines to distinguish between explicit and implicit abuse in English and apply them to OLID/OffensEval. The outcome is a newly created resource, AbuseEval v1.0, which aims to address some of the existing issues in the annotation of offensive and abusive language (e.g., explicitness of the message, presence of a target, need of context, and interaction across different phenomena).}, url = {https://www.aclweb.org/anthology/2020.lrec-1.760} }Back to the publication list
-
Marco Polignano, Pierpaolo Basile, Marco de Gemmis, Giovanni Semeraro and Valerio Basile:
AlBERTo: Italian BERT Language Understanding Model for NLP Challenging Tasks Based on Tweets
@InProceedings{PolignanoEtAlCLIC2019, author = {Marco Polignano and Pierpaolo Basile and Marco de Gemmis and Giovanni Semeraro and Valerio Basile}, title = {{AlBERTo: Italian BERT Language Understanding Model for NLP Challenging Tasks Based on Tweets}}, booktitle = {Proceedings of the Sixth Italian Conference on Computational Linguistics (CLiC-it 2019)}, year = {2019}, publisher = {CEUR}, journal={CEUR Workshop Proceedings}, volume={2481}, url={https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074851349&partnerID=40&md5=7abed946e06f76b3825ae5e294ffac14}, document_type={Conference Paper}, source={Scopus} }Back to the publication list
-
Valerio Basile, Cristina Bosco, Elisabetta Fersini, Debora Nozza, Viviana Patti, Francisco Manuel Rangel Pardo, Paolo Rosso, Manuela Sanguinetti:
SemEval-2019 Task 5: Multilingual Detection of Hate Speech Against Immigrants and Women in Twitter
@inproceedings{basile-etal-2019-semeval, title = "{S}em{E}val-2019 Task 5: Multilingual Detection of Hate Speech Against Immigrants and Women in Twitter", author = "Basile, Valerio and Bosco, Cristina and Fersini, Elisabetta and Nozza, Debora and Patti, Viviana and Rangel Pardo, Francisco Manuel and Rosso, Paolo and Sanguinetti, Manuela", booktitle = "Proceedings of the 13th International Workshop on Semantic Evaluation", month = jun, year = "2019", address = "Minneapolis, Minnesota, USA", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/S19-2007", pages = "54--63", abstract = "The paper describes the organization of the SemEval 2019 Task 5 about the detection of hate speech against immigrants and women in Spanish and English messages extracted from Twitter. The task is organized in two related classification subtasks: a main binary subtask for detecting the presence of hate speech, and a finer-grained one devoted to identifying further features in hateful contents such as the aggressive attitude and the target harassed, to distinguish if the incitement is against an individual rather than a group. HatEval has been one of the most popular tasks in SemEval-2019 with a total of 108 submitted runs for Subtask A and 70 runs for Subtask B, from a total of 74 different teams. Data provided for the task are described by showing how they have been collected and annotated. Moreover, the paper provides an analysis and discussion about the participant systems and the results they achieved in both subtasks.", }Back to the publication list
-
Soufian Jebbara, Valerio Basile, Elena Cabrio, Philip Cimiano:
Extracting common sense knowledge via triple ranking using supervised and unsupervised distributional models
@article{jebbara2018extracting, title={Extracting common sense knowledge via triple ranking using supervised and unsupervised distributional models}, author={Jebbara, Soufian and Basile, Valerio and Cabrio, Elena and Cimiano, Philipp}, journal={Semantic Web}, year={2018} }Back to the publication list
-
Valerio Basile, Mirko Lai, Manuela Sanguinetti:
Long-term Social Media Data Collection at the University of Turin
@inproceedings{DBLP:conf/clic-it/BasileLS18, author = {Valerio Basile and Mirko Lai and Manuela Sanguinetti}, title = {Long-term Social Media Data Collection at the University of Turin}, booktitle = {Proceedings of the Fifth Italian Conference on Computational Linguistics (CLiC-it 2018), Torino, Italy, December 10-12, 2018.}, year = {2018}, crossref = {DBLP:conf/clic-it/2018}, url = {http://ceur-ws.org/Vol-2253/paper48.pdf}, timestamp = {Mon, 17 Dec 2018 17:18:40 +0100}, biburl = {https://dblp.org/rec/bib/conf/clic-it/BasileLS18}, bibsource = {dblp computer science bibliography, https://dblp.org} }Back to the publication list
-
Asprino Luigi, Valerio Basile, Ciancarini Paolo, Presutti Valentina:
Empirical Analysis of Foundational Distinctions in Linked Open Data
@inproceedings{luigi2018empirical, title={Empirical Analysis of Foundational Distinctions in Linked Open Data}, author={Luigi, Asprino and Basile, Valerio and Paolo, Ciancarini and Valentina, Presutti}, booktitle={27th International Joint Conference on Artificial Intelligence and the 23rd European Conference on Artificial Intelligence}, pages={3962--3969}, year={2018}, organization={International Joint Conferences on Artificial Intelligence} }Back to the publication list
-
Elisa Bassignana, Valerio Basile, Viviana Patti:
Hurtlex: A multilingual lexicon of words to hurt
@inproceedings{bassignana2018hurtlex, title={Hurtlex: A multilingual lexicon of words to hurt}, author={Bassignana, Elisa and Basile, Valerio and Patti, Viviana}, booktitle={5th Italian Conference on Computational Linguistics, CLiC-it 2018}, volume={2253}, pages={1--6}, year={2018}, organization={CEUR-WS} }Back to the publication list
-
Pierpaolo Basile, Valerio Basile, Malvina Nissim, Nicole Novielli, Viviana Patti:
Sentiment Analysis of Microblogging Data
@article{basile2017sentiment, title={Sentiment Analysis of Microblogging Data}, author={Basile, Pierpaolo and Basile, Valerio and Nissim, Malvina and Novielli, Nicole and Patti, Viviana and others}, year={2017}, publisher={Springer Science+ Business Media} }Back to the publication list
-
Johan Bos, Valerio Basile, Kilian Evang, Noortje Venhuizen, Johannes Bjerva:
The Groningen Meaning Bank
@incollection{bos2017groningen, title={The groningen meaning bank}, author={Bos, Johan and Basile, Valerio and Evang, Kilian and Venhuizen, Noortje J and Bjerva, Johannes}, booktitle={Handbook of Linguistic Annotation}, pages={463--496}, year={2017}, publisher={Springer Netherlands} }Back to the publication list
-
Valerio Basile, Francesco Barbieri, Danilo Croce, Malvina Nissim, Nicole Novielli, Viviana Patti:
Overview of the EVALITA 2016 SENTiment POLarity Classification Task
@article{basile_evalita_2016, title = {Evalita 2016 {Sentipolc} {Task} {Task} {Guidelines}}, author = {Basile, Valerio and Barbieri, Francesco and Croce, Danilo and Nissim, Malvina and Novielli, Nicole and Patti, Viviana}, year = {2016} }Back to the publication list
-
Jay Young, Valerio Basile, Lars Kunze, Elena Cabrio, Nick Hawes:
Towards Lifelong Object Learning by Integrating Situated Robot Perception and Semantic Web Mining
@inproceedings{jay_towards_2016, title = {Towards {Lifelong} {Object} {Learning} by {Integrating} {Situated} {Robot} {Perception} and {Semantic} {Web} {Mining}}, booktitle = {Proceedings of the {European} {Conference} on {Artificial} {Intelligence} ({ECAI})}, author = {Jay, Young and Basile, Valerio and Kunze, Lars and Cabrio, Elena and Nick, Hawes}, year = {2016}, file = {ecai2016-2.pdf:files/288/ecai2016-2.pdf:application/pdf} }Back to the publication list
-
Valerio Basile, Elena Cabrio, Claudia Schon:
KNEWS: Using Logical and Lexical Semantics to Extract Knowledge from Natural Language
@inproceedings{basile_knews:_2016, title = {{KNEWS}: {Using} {Logical} and {Lexical} {Semantics} to {Extract} {Knowledge} from {Natural} {Language}}, booktitle = {Proceedings of the {European} {Conference} on {Artificial} {Intelligence} ({ECAI}) 2016 conference}, author = {Basile, Valerio and Cabrio, Elena and Schon, Claudia}, year = {2016}, file = {demo3-NLP.pdf:files/291/demo3-NLP.pdf:application/pdf} }Back to the publication list
-
Valerio Basile:
From Logic to Language: Natural Language Generation from Logical Forms
@phdthesis{basile_logic_2015, address = {Groningen}, title = {From logic to language: natural language generation from logical forms}, shorttitle = {From logic to language}, language = {English}, school = {University of Groningen}, author = {Basile, Valerio}, year = {2015}, note = {OCLC: 930023296}, file = {Complete_thesis.pdf!null:files/278/Complete_thesis.pdf:application/pdf} }Back to the publication list
-
Kilian Evang, Valerio Basile, Grzegorz Chrupala, Johan Bos:
Elephant: Sequence Labeling for Word and Sentence Segmentation.
@inproceedings{evang_elephant:_2013, title = {Elephant: {Sequence} {Labeling} for {Word} and {Sentence} {Segmentation}.}, booktitle = {{EMNLP}}, author = {Evang, Kilian and Basile, Valerio and Chrupala, Grzegorz and Bos, Johan}, year = {2013}, pages = {1422--1426}, file = {D13-1146.pdf:files/259/D13-1146.pdf:application/pdf} }Back to the publication list
-
Valerio Basile, Johan Bos:
Aligning Formal Meaning Representations with Surface Strings for Wide-coverage Text Generation
@inproceedings{basile_aligning_2013, title = {Aligning {Formal} {Meaning} {Representations} with {Surface} {Strings} for {Wide}-coverage {Text} {Generation}}, booktitle = {{ENLG} 2013}, author = {Basile, Valerio and Bos, Johan}, year = {2013}, pages = {1}, file = {W13-2101.pdf:files/261/W13-2101.pdf:application/pdf} }Back to the publication list
-
Valerio Basile, Malvina Nissim:
Sentiment analysis on Italian tweets
@inproceedings{basile_sentiment_2013, title = {Sentiment analysis on {Italian} tweets}, url = {https://www.aclweb.org/anthology/W13-1614}, urldate = {2015-08-21}, booktitle = {Proceedings of the 4th {Workshop} on {Computational} {Approaches} to {Subjectivity}, {Sentiment} and {Social} {Media} {Analysis}}, author = {Basile, Valerio and Nissim, Malvina}, year = {2013}, pages = {100--107}, file = {W13-1614.pdf:files/273/W13-1614.pdf:application/pdf} }Back to the publication list