@InProceedings{WenwenYu2023, author="Wenwen Yu and Chengquan Zhang and Haoyu Cao and Wei Hua and Bohan Li and Huang Chen and Mingyu Liu and Mingrui Chen and Jianfeng Kuang and Mengjun Cheng and Yuning Du and Shikun Feng and Xiaoguang Hu and Pengyuan Lyu and Kun Yao and Yuechen Yu and Yuliang Liu and Wanxiang Che and Errui Ding and Cheng-Lin Liu and Jiebo Luo and Shuicheng Yan and Min Zhang and Dimosthenis Karatzas and Xing Sun and Jingdong Wang and Xiang Bai", title="ICDAR 2023 Competition on Structured Text Extraction from Visually-Rich Document Images", booktitle="17th International Conference on Document Analysis and Recognition", year="2023", volume="14188", pages="536--552", abstract="Structured text extraction is one of the most valuable and challenging application directions in the field of Document AI. However, the scenarios of past benchmarks are limited, and the corresponding evaluation protocols usually focus on the submodules of the structured text extraction scheme. In order to eliminate these problems, we organized the ICDAR 2023 competition on Structured text extraction from Visually-Rich Document images (SVRD). We set up two tracks for SVRD including Track 1: HUST-CELL and Track 2: Baidu-FEST, where HUST-CELL aims to evaluate the end-to-end performance of Complex Entity Linking and Labeling, and Baidu-FEST focuses on evaluating the performance and generalization of Zero-shot/Few-shot Structured Text extraction from an end-to-end perspective. Compared to the current document benchmarks, our two tracks of competition benchmark enriches the scenarios greatly and contains more than 50 types of visually-rich document images (mainly from the actual enterprise applications). The competition opened on 30th December, 2022 and closed on 24th March, 2023. There are 35 participants and 91 valid submissions received for Track 1, and 15 participants and 26 valid submissions received for Track 2. In this report we will presents the motivation, competition datasets, task definition, evaluation protocol, and submission summaries. According to the performance of the submissions, we believe there is still a large gap on the expected information extraction performance for complex and zero-shot scenarios. It is hoped that this competition will attract many researchers in the field of CV and NLP, and bring some new thoughts to the field of Document AI.", optnote="DAG", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=3896), last updated on Wed, 17 Jan 2024 11:17:00 +0100", opturl="https://link.springer.com/chapter/10.1007/978-3-031-41679-8_32" }