@Article{ZhenXu2022, author="Zhen Xu and Sergio Escalera and Adrien Pavao and Magali Richard and Wei-Wei Tu and Quanming Yao and Huan Zhao and Isabelle Guyon", title="Codabench: Flexible, easy-to-use, and reproducible meta-benchmark platform", journal="Patterns", year="2022", publisher="Science Direct", volume="3", number="7", pages="100543", optkeywords="Machine learning", optkeywords="data science", optkeywords="benchmark platform", optkeywords="reproducibility", optkeywords="competitions", abstract="Obtaining a standardized benchmark of computational methods is a major issue in data-science communities. Dedicated frameworks enabling fair benchmarking in a unified environment are yet to be developed. Here, we introduce Codabench, a meta-benchmark platform that is open sourced and community driven for benchmarking algorithms or software agents versus datasets or tasks. A public instance of Codabench is open to everyone free of charge and allows benchmark organizers to fairly compare submissions under the same setting (software, hardware, data, algorithms), with custom protocols and data formats. Codabench has unique features facilitating easy organization of flexible and reproducible benchmarks, such as the possibility of reusing templates of benchmarks and supplying compute resources on demand. Codabench has been used internally and externally on various applications, receiving more than 130 users and 2,500 submissions. As illustrative use cases, we introduce four diverse benchmarks covering graph machine learning, cancer heterogeneity, clinical diagnosis, and reinforcement learning.", optnote="HuPBA", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=3764), last updated on Tue, 25 Apr 2023 15:37:45 +0200", doi="10.1016/j.patter.2022.100543" }