@InProceedings{SergioEscalera2009, author="Sergio Escalera and Eloi Puertas and Petia Radeva and Oriol Pujol", title="Multimodal laughter recognition in video conversations", booktitle="2nd IEEE Workshop on CVPR for Human communicative Behavior analysis", year="2009", pages="110--115", abstract="Laughter detection is an important area of interest in the Affective Computing and Human-computer Interaction fields. In this paper, we propose a multi-modal methodology based on the fusion of audio and visual cues to deal with the laughter recognition problem in face-to-face conversations. The audio features are extracted from the spectogram and the video features are obtained estimating the mouth movement degree and using a smile and laughter classifier. Finally, the multi-modal cues are included in a sequential classifier. Results over videos from the public discussion blog of the New York Times show that both types of features perform better when considered together by the classifier. Moreover, the sequential methodology shows to significantly outperform the results obtained by an Adaboost classifier.", optnote="MILAB;HuPBA", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=1188), last updated on Tue, 17 Dec 2013 15:41:36 +0100", isbn="978-1-4244-3994-2", issn="2160-7508", doi="10.1109/CVPRW.2009.5204268" }