@InProceedings{OzanCaglayan2016, author="Ozan Caglayan and Walid Aransa and Yaxing Wang and Marc Masana and Mercedes Garc{\i}a-Martinez and Fethi Bougares and Loic Barrault and Joost Van de Weijer", title="Does Multimodality Help Human and Machine for Translation and Image Captioning?", booktitle="1st conference on machine translation", year="2016", abstract="This paper presents the systems developed by LIUM and CVC for the WMT16 Multimodal Machine Translation challenge. We explored various comparative methods, namely phrase-based systems and attentional recurrent neural networks models trained using monomodal or multimodal data. We also performed a human evaluation in order to estimate theusefulness of multimodal data for human machine translation and image description generation. Our systems obtained the best results for both tasks according to the automatic evaluation metrics BLEU and METEOR.", optnote="LAMP; 600.106 ; 600.068", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=2761), last updated on Fri, 04 Feb 2022 12:49:46 +0100", file=":http://refbase.cvc.uab.es/files/CAW2016.pdf:PDF" }