@InProceedings{SounakDey2018, author="Sounak Dey and Anjan Dutta and Suman Ghosh and Ernest Valveny and Josep Llados", title="Aligning Salient Objects to Queries: A Multi-modal and Multi-object Image Retrieval Framework", booktitle="14th Asian Conference on Computer Vision", year="2018", abstract="In this paper we propose an approach for multi-modal image retrieval in multi-labelled images. A multi-modal deep network architecture is formulated to jointly model sketches and text as input query modalities into a common embedding space, which is then further aligned with the image feature space. Our architecture also relies on a salient object detection through a supervised LSTM-based visual attention model learned from convolutional features. Both the alignment between the queries and the image and the supervision of the attention on the images are obtained by generalizing the Hungarian Algorithm using different loss functions. This permits encoding the object-based features and its alignment with the query irrespective of the availability of the co-occurrence of different objects in the training set. We validate the performance of our approach on standard single/multi-object datasets, showing state-of-the art performance in every dataset.", optnote="DAG; 600.097; 600.121; 600.129", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=3151), last updated on Fri, 26 Feb 2021 14:09:50 +0100", file=":http://refbase.cvc.uab.es/files/DDG2018a.pdf:PDF" }