@InProceedings{SergioEscalera2014, author="Sergio Escalera and Xavier Baro and Jordi Gonzalez and Miguel Angel Bautista and Meysam Madadi and Miguel Reyes and Victor Ponce and Hugo Jair Escalante and Jaime Shotton and Isabelle Guyon", title="ChaLearn Looking at People Challenge 2014: Dataset and Results", booktitle="ECCV Workshop on ChaLearn Looking at People", year="2014", volume="8925", pages="459--473", optkeywords="Human Pose Recovery", optkeywords="Behavior Analysis", optkeywords="Action and in- teractions", optkeywords="Multi-modal gestures", optkeywords="recognition", abstract="This paper summarizes the ChaLearn Looking at People 2014 challenge data and the results obtained by the participants. The competition was split into three independent tracks: human pose recovery from RGB data, action and interaction recognition from RGB data sequences, and multi-modal gesture recognition from RGB-Depth sequences. For all the tracks, the goal was to perform user-independent recognition in sequences of continuous images using the overlapping Jaccard index as the evaluation measure. In this edition of the ChaLearn challenge, two large novel data sets were made publicly available and the Microsoft Codalab platform were used to manage the competition. Outstanding results were achieved in the three challenge tracks, with accuracy results of 0.20, 0.50, and 0.85 for pose recovery, action/interaction recognition, and multi-modal gesture recognition, respectively.", optnote="HuPBA; ISE; 600.063;MV", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=2529), last updated on Tue, 18 Oct 2016 11:49:40 +0200", doi="10.1007/978-3-319-16178-5_32", file=":http://refbase.cvc.uab.es/files/EBG2014.pdf:PDF" }