@Article{FahadShahbazKhan2014, author="Fahad Shahbaz Khan and Joost Van de Weijer and Muhammad Anwer Rao and Michael Felsberg and Carlo Gatta", title="Semantic Pyramids for Gender and Action Recognition", journal="IEEE Transactions on Image Processing", year="2014", volume="23", number="8", pages="3633--3645", abstract="Person description is a challenging problem in computer vision. We investigated two major aspects of person description: 1) gender and 2) action recognition in still images. Most state-of-the-art approaches for gender and action recognition rely on the description of a single body part, such as face or full-body. However, relying on a single body part is suboptimal due to significant variations in scale, viewpoint, and pose in real-world images. This paper proposes a semantic pyramid approach for pose normalization. Our approach is fully automatic and based on combining information from full-body, upper-body, and face regions for gender and action recognition in still images. The proposed approach does not require any annotations for upper-body and face of a person. Instead, we rely on pretrained state-of-the-art upper-body and face detectors to automatically extract semantic information of a person. Given multiple bounding boxes from each body part detector, we then propose a simple method to select the best candidate bounding box, which is used for feature extraction. Finally, the extracted features from the full-body, upper-body, and face regions are combined into a single representation for classification. To validate the proposed approach for gender recognition, experiments are performed on three large data sets namely: 1) human attribute; 2) head-shoulder; and 3) proxemics. For action recognition, we perform experiments on four data sets most used for benchmarking action recognition in still images: 1) Sports; 2) Willow; 3) PASCAL VOC 2010; and 4) Stanford-40. Our experiments clearly demonstrate that the proposed approach, despite its simplicity, outperforms state-of-the-art methods for gender and action recognition.", optnote="CIC; LAMP; 601.160; 600.074; 600.079;MILAB", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=2507), last updated on Fri, 04 Feb 2022 13:11:56 +0100", issn="1057-7149", doi="10.1109/TIP.2014.2331759", file=":http://refbase.cvc.uab.es/files/KWR2014.pdf:PDF" }