@InProceedings{MohamedAliSouibgui2023, author="Mohamed Ali Souibgui and Sanket Biswas and Andres Mafla and Ali Furkan Biten and Alicia Fornes and Yousri Kessentini and Josep Llados and Lluis Gomez and Dimosthenis Karatzas", title="Text-DIAE: a self-supervised degradation invariant autoencoder for text recognition and document enhancement", booktitle="Proceedings of the 37th AAAI Conference on Artificial Intelligence", year="2023", volume="37", number="2", optkeywords="Representation Learning for Vision", optkeywords="CV Applications", optkeywords="CV Language and Vision", optkeywords="ML Unsupervised", optkeywords="Self-Supervised Learning", abstract="In this paper, we propose a Text-Degradation Invariant Auto Encoder (Text-DIAE), a self-supervised model designed to tackle two tasks, text recognition (handwritten or scene-text) and document image enhancement. We start by employing a transformer-based architecture that incorporates three pretext tasks as learning objectives to be optimized during pre-training without the usage of labelled data. Each of the pretext objectives is specifically tailored for the final downstream tasks. We conduct several ablation experiments that confirm the design choice of the selected pretext tasks. Importantly, the proposed model does not exhibit limitations of previous state-of-the-art methods based on contrastive losses, while at the same time requiring substantially fewer data samples to converge. Finally, we demonstrate that our method surpasses the state-of-the-art in existing supervised and self-supervised settings in handwritten and scene text recognition and document image enhancement. Our code and trained models will be made publicly available at https://github.com/dali92002/SSL-OCR", optnote="DAG", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=3848), last updated on Wed, 17 Jan 2024 11:15:34 +0100", opturl="https://doi.org/10.1609/aaai.v37i2.25328" }