@InProceedings{CiprianCorneanu2020, author="Ciprian Corneanu and Meysam Madadi and Sergio Escalera and Aleix Martinez", title="Explainable Early Stopping for Action Unit Recognition", booktitle="Faces and Gestures in E-health and welfare workshop", year="2020", pages="693--699", abstract="A common technique to avoid overfitting when training deep neural networks (DNN) is to monitor the performance in a dedicated validation data partition and to stoptraining as soon as it saturates. This only focuses on what the model does, while completely ignoring what happens inside it.In this work, we open the {\textquoteleft}{\textquoteleft}black-box{\textquoteright}{\textquoteright} of DNN in order to perform early stopping. We propose to use a novel theoretical framework that analyses meso-scale patterns in the topology of the functional graph of a network while it trains. Based on it,we decide when it transitions from learning towards overfitting in a more explainable way. We exemplify the benefits of this approach on a state-of-the art custom DNN that jointly learns local representations and label structure employing an ensemble of dedicated subnetworks. We show that it is practically equivalent in performance to early stopping with patience, the standard early stopping algorithm in the literature. This proves beneficial for AU recognition performance and provides new insights into how learning of AUs occurs in DNNs.", optnote="HUPBA;", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=3514), last updated on Thu, 03 Feb 2022 09:04:40 +0100", file=":http://refbase.cvc.uab.es/files/CME2020.pdf:PDF" }