@Article{DavidSanchez-Mendoza2015, author="David Sanchez-Mendoza and David Masip and Agata Lapedriza", title="Emotion recognition from mid-level features", journal="Pattern Recognition Letters", year="2015", publisher="Elsevier B.V.", volume="67", number="Part 1", pages="66--74", optkeywords="Facial expression", optkeywords="Emotion recognition", optkeywords="Action units", optkeywords="Computer vision", abstract="In this paper we present a study on the use of Action Units as mid-level features for automatically recognizing basic and subtle emotions. We propose a representation model based on mid-level facial muscular movement features. We encode these movements dynamically using the Facial Action Coding System, and propose to use these intermediate features based on Action Units (AUs) to classify emotions. AUs activations are detected fusing a set of spatiotemporal geometric and appearance features. The algorithm is validated in two applications: (i) the recognition of 7 basic emotions using the publicly available Cohn-Kanade database, and (ii) the inference of subtle emotional cues in the Newscast database. In this second scenario, we consider emotions that are perceived cumulatively in longer periods of time. In particular, we Automatically classify whether video shoots from public News TV channels refer to Good or Bad news. To deal with the different video lengths we propose a Histogram of Action Units and compute it using a sliding window strategy on the frame sequences. Our approach achieves accuracies close to human perception.", optnote="OR;MV", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=2746), last updated on Wed, 27 Apr 2016 10:50:51 +0200", issn="0167-8655", doi="doi:10.1016/j.patrec.2015.06.007", file=":http://refbase.cvc.uab.es/files/SML2015.htm:TYPE" }