@InProceedings{LluisGomez2018, author="Lluis Gomez and Andres Mafla and Mar{\c{c}}al Rusi{\~n}ol and Dimosthenis Karatzas", title="Single Shot Scene Text Retrieval", booktitle="15th European Conference on Computer Vision", year="2018", volume="11218", pages="728--744", optkeywords="Image retrieval", optkeywords="Scene text", optkeywords="Word spotting", optkeywords="Convolutional Neural Networks", optkeywords="Region Proposals Networks", optkeywords="PHOC", abstract="Textual information found in scene images provides high level semantic information about the image and its context and it can be leveraged for better scene understanding. In this paper we address the problem of scene text retrieval: given a text query, the system must return all images containing the queried text. The novelty of the proposed model consists in the usage of a single shot CNN architecture that predicts at the same time bounding boxes and a compact text representation of the words in them. In this way, the text based image retrieval task can be casted as a simple nearest neighbor search of the query text representation over the outputs of the CNN over the entire imagedatabase. Our experiments demonstrate that the proposed architectureoutperforms previous state-of-the-art while it offers a significant increasein processing speed.", optnote="DAG; 600.084; 601.338; 600.121; 600.129", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=3143), last updated on Fri, 26 Feb 2021 14:01:25 +0100", opturl="https://doi.org/10.1007/978-3-030-01264-9_43", file=":http://refbase.cvc.uab.es/files/GMR2018.pdf:PDF" }