@Article{GabrielVillalonga2020, author="Gabriel Villalonga and Antonio Lopez", title="Co-Training for On-Board Deep Object Detection", journal="IEEE Access", year="2020", pages="194441--194456", abstract="Providing ground truth supervision to train visual models has been a bottleneck over the years, exacerbated by domain shifts which degenerate the performance of such models. This was the case when visual tasks relied on handcrafted features and shallow machine learning and, despite its unprecedented performance gains, the problem remains open within the deep learning paradigm due to its data-hungry nature. Best performing deep vision-based object detectors are trained in a supervised manner by relying on human-labeled bounding boxes which localize class instances (i.e. objects) within the training images. Thus, object detection is one of such tasks for which human labeling is a major bottleneck. In this article, we assess co-training as a semi-supervised learning method for self-labeling objects in unlabeled images, so reducing the human-labeling effort for developing deep object detectors. Our study pays special attention to a scenario involving domain shift; in particular, when we have automatically generated virtual-world images with object bounding boxes and we have real-world images which are unlabeled. Moreover, we are particularly interested in using co-training for deep object detection in the context of driver assistance systems and/or self-driving vehicles. Thus, using well-established datasets and protocols for object detection in these application contexts, we will show how co-training is a paradigm worth to pursue for alleviating object labeling, working both alone and together with task-agnostic domain adaptation.", optnote="ADAS; 600.118", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=3488), last updated on Tue, 09 Feb 2021 10:47:59 +0100", doi="10.1109/ACCESS.2020.3032024", file=":http://refbase.cvc.uab.es/files/.pdf:PDF" }