@InProceedings{AlvaroPeris2016, author="Alvaro Peris and Marc Bola{\~n}os and Petia Radeva and Francisco Casacuberta", title="Video Description Using Bidirectional Recurrent Neural Networks", booktitle="25th International Conference on Artificial Neural Networks", year="2016", volume="2", pages="3--11", optkeywords="Video description", optkeywords="Neural Machine Translation", optkeywords="Birectional Recurrent Neural Networks", optkeywords="LSTM", optkeywords="Convolutional Neural Networks", abstract="Although traditionally used in the machine translation field, the encoder-decoder framework has been recently applied for the generation of video and image descriptions. The combination of Convolutional and Recurrent Neural Networks in these models has proven to outperform the previous state of the art, obtaining more accurate video descriptions. In this work we propose pushing further this model by introducing two contributions into the encoding stage. First, producing richer image representations by combining object and location information from Convolutional Neural Networks and second, introducing Bidirectional Recurrent Neural Networks for capturing both forward and backward temporal relationships in the input frames.", optnote="MILAB;", optnote="exported from refbase (http://refbase.cvc.uab.es/show.php?record=2833), last updated on Thu, 20 Jan 2022 11:32:59 +0100", file=":http://refbase.cvc.uab.es/files/PBR2016.pdf:PDF" }