@InProceedings{AdarshTiwari2023, author="Adarsh Tiwari and Sanket Biswas and Josep Llados", title="Can Pre-trained Language Models Help in Understanding Handwritten Symbols?", booktitle="17th International Conference on Document Analysis and Recognition", year="2023", volume="14193", pages="199--211", abstract="The emergence of transformer models like BERT, GPT-2, GPT-3, RoBERTa, T5 for natural language understanding tasks has opened the floodgates towards solving a wide array of machine learning tasks in other modalities like images, audio, music, sketches and so on. These language models are domain-agnostic and as a result could be applied to 1-D sequences of any kind. However, the key challenge lies in bridging the modality gap so that they could generate strong features beneficial for out-of-domain tasks. This work focuses on leveraging the power of such pre-trained language models and discusses the challenges in predicting challenging handwritten symbols and alphabets.", optnote="DAG", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=3908), last updated on Mon, 22 Jan 2024 11:12:04 +0100", opturl="https://link.springer.com/chapter/10.1007/978-3-031-41498-5_15" }