@Article{MeysamMadadi2020, author="Meysam Madadi and Hugo Bertiche and Sergio Escalera", title="SMPLR: Deep learning based SMPL reverse for 3D human pose and shape recovery", journal="Pattern Recognition", year="2020", volume="106", pages="107472", optkeywords="Deep learning", optkeywords="3D Human pose", optkeywords="Body shape", optkeywords="SMPL", optkeywords="Denoising autoencoder", optkeywords="Volumetric stack hourglass", abstract="In this paper we propose to embed SMPL within a deep-based model to accurately estimate 3D pose and shape from a still RGB image. We use CNN-based 3D joint predictions as an intermediate representation to regress SMPL pose and shape parameters. Later, 3D joints are reconstructed again in the SMPL output. This module can be seen as an autoencoder where the encoder is a deep neural network and the decoder is SMPL model. We refer to this as SMPL reverse (SMPLR). By implementing SMPLR as an encoder-decoder we avoid the need of complex constraints on pose and shape. Furthermore, given that in-the-wild datasets usually lack accurate 3D annotations, it is desirable to lift 2D joints to 3D without pairing 3D annotations with RGB images. Therefore, we also propose a denoising autoencoder (DAE) module between CNN and SMPLR, able to lift 2D joints to 3D and partially recover from structured error. We evaluate our method on SURREAL and Human3.6M datasets, showing improvement over SMPL-based state-of-the-art alternatives by about 4 and 12 mm, respectively.", optnote="HuPBA; no proj", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=3439), last updated on Fri, 19 Nov 2021 10:14:24 +0100", opturl="https://doi.org/10.1016/j.patcog.2020.107472", file=":http://refbase.cvc.uab.es/files/MBE2020.pdf:PDF" }