@InProceedings{LluisGomez2017, author="Lluis Gomez and Y. Patel and Mar{\c{c}}al Rusi{\~n}ol and C.V. Jawahar and Dimosthenis Karatzas", title="Self-supervised learning of visual features through embedding images into text topic spaces", booktitle="30th IEEE Conference on Computer Vision and Pattern Recognition", year="2017", abstract="End-to-end training from scratch of current deep architectures for new computer vision problems would require Imagenet-scale datasets, and this is not always possible. In this paper we present a method that is able to take advantage of freely available multi-modal content to train computer vision algorithms without human supervision. We put forward the idea of performing self-supervised learning of visual features by mining a large scale corpus of multi-modal (text and image) documents. We show that discriminative visual features can be learnt efficiently by training a CNN to predict the semantic context in which a particular image is more probable to appear as an illustration. For this we leverage the hidden semantic structures discovered in the text corpus with a well-known topic modeling technique. Our experiments demonstrate state of the art performance in image classification, object detection, and multi-modal retrieval compared to recent self-supervised or natural-supervised approaches.", optnote="DAG; 600.084; 600.121", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=2889), last updated on Mon, 07 Dec 2020 14:28:47 +0100", doi="10.1109/CVPR.2017.218", opturl="https://arxiv.org/abs/1705.08631", file=":http://refbase.cvc.uab.es/files/GPR2017.pdf:PDF" }