@Article{ParichehrBehjatiArdakani2022, author="Parichehr Behjati Ardakani and Pau Rodriguez and Carles Fernandez and Armin Mehri and Xavier Roca and Seiichi Ozawa and Jordi Gonzalez", title="Frequency-based Enhancement Network for Efficient Super-Resolution", journal="IEEE Access", year="2022", publisher="IEEE", volume="10", pages="57383--57397", optkeywords="Deep learning", optkeywords="Frequency-based methods", optkeywords="Lightweight architectures", optkeywords="Single image super-resolution", abstract="Recently, deep convolutional neural networks (CNNs) have provided outstanding performance in single image super-resolution (SISR). Despite their remarkable performance, the lack of high-frequency information in the recovered images remains a core problem. Moreover, as the networks increase in depth and width, deep CNN-based SR methods are faced with the challenge of computational complexity in practice. A promising and under-explored solution is to adapt the amount of compute based on the different frequency bands of the input. To this end, we present a novel Frequency-based Enhancement Block (FEB) which explicitly enhances the information of high frequencies while forwarding low-frequencies to the output. In particular, this block efficiently decomposes features into low- and high-frequency and assigns more computation to high-frequency ones. Thus, it can help the network generate more discriminative representations by explicitly recovering finer details. Our FEB design is simple and generic and can be used as a direct replacement of commonly used SR blocks with no need to change network architectures. We experimentally show that when replacing SR blocks with FEB we consistently improve the reconstruction error, while reducing the number of parameters in the model. Moreover, we propose a lightweight SR model --- Frequency-based Enhancement Network (FENet) --- based on FEB that matches the performance of larger models. Extensive experiments demonstrate that our proposal performs favorably against the state-of-the-art SR algorithms in terms of visual quality, memory footprint, and inference time. The code is available at https://github.com/pbehjatii/FENet", optnote="ISE", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=3747), last updated on Tue, 25 Apr 2023 15:34:29 +0200", doi="10.1109/ACCESS.2022.3176441" }