@InProceedings{M.Campos-Taberner2015, author="M. Campos-Taberner and Adriana Romero and Carlo Gatta and Gustavo Camps-Valls", title="Shared feature representations of LiDAR and optical images: Trading sparsity for semantic discrimination", booktitle="IEEE International Geoscience and Remote Sensing Symposium IGARSS2015", year="2015", pages="4169--4172", abstract="This paper studies the level of complementary information conveyed by extremely high resolution LiDAR and optical images. We pursue this goal following an indirect approach via unsupervised spatial-spectral feature extraction. We used a recently presented unsupervised convolutional neural network trained to enforce both population and lifetime spar-sity in the feature representation. We derived independent and joint feature representations, and analyzed the sparsity scores and the discriminative power. Interestingly, the obtained results revealed that the RGB+LiDAR representation is no longer sparse, and the derived basis functions merge color and elevation yielding a set of more expressive colored edge filters. The joint feature representation is also more discriminative when used for clustering and topological data visualization.", optnote="LAMP; 600.079;MILAB", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=2724), last updated on Thu, 12 May 2016 15:46:05 +0200", doi="10.1109/IGARSS.2015.7326744", opturl="http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=7303999" }