@InProceedings{JiaolongXu2013, author="Jiaolong Xu and David Vazquez and Sebastian Ramos and Antonio Lopez and Daniel Ponsa", title="Adapting a Pedestrian Detector by Boosting LDA Exemplar Classifiers", booktitle="CVPR Workshop on Ground Truth -- What is a good dataset?", year="2013", pages="688--693", optkeywords="Pedestrian Detection", optkeywords="Domain Adaptation", abstract="Training vision-based pedestrian detectors using synthetic datasets (virtual world) is a useful technique to collect automatically the training examples with their pixel-wise ground truth. However, as it is often the case, these detectors must operate in real-world images, experiencing a significant drop of their performance. In fact, this effect also occurs among different real-world datasets, i.e. detectors{\textquoteright} accuracy drops when the training data (source domain) and the application scenario (target domain) have inherent differences. Therefore, in order to avoid this problem, it is required to adapt the detector trained with synthetic data to operate in the real-world scenario. In this paper, we propose a domain adaptation approach based on boosting LDA exemplar classifiers from both virtual and real worlds. We evaluate our proposal on multiple real-world pedestrian detection datasets. The results show that our method can efficiently adapt the exemplar classifiers from virtual to real world, avoiding drops in average precision over the 15\%.", optnote="ADAS; 600.054; 600.057; 601.217", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=2220), last updated on Thu, 10 Nov 2016 12:02:15 +0100", doi="10.1109/CVPRW.2013.104", file=":http://refbase.cvc.uab.es/files/xvr2013a.pdf:PDF", language="English" }