dc.contributor.author | Barea Navarro, Rafael | |
dc.contributor.author | Bergasa Pascual, Luis Miguel | |
dc.contributor.author | Romera Carmena, Eduardo | |
dc.contributor.author | López Guillén, María Elena | |
dc.contributor.author | Pérez Gil, Óscar | |
dc.contributor.author | Tradacete Ágreda, Miguel | |
dc.contributor.author | López, Joaquín | |
dc.date.accessioned | 2020-11-16T16:58:46Z | |
dc.date.available | 2020-11-16T16:58:46Z | |
dc.date.issued | 2019-10 | |
dc.identifier.bibliographicCitation | Barea, R., Bergasa, L. M., Romera, E., López Guillén, E., Pérez, O., Tradacete, M. & López, J. 2019, "Integrating state-of-the-art CNNs for multi-sensor 3D vehicle detection in real autonomous driving environments", en 2019 IEEE Intelligent Transportation Systems Conference (ITSC), Auckland, New Zealand, 2019, pp. 1425-1431 | |
dc.identifier.isbn | 978-1-5386-7025-5 | |
dc.identifier.uri | http://hdl.handle.net/10017/45108 | |
dc.description | 2019 IEEE Intelligent Transportation Systems Conference (ITSC), Auckland, New Zealand, 27-30 Oct. 2019. | en |
dc.description.abstract | This paper presents two new approaches to detect
surrounding vehicles in 3D urban driving scenes and their corresponding Bird’s Eye View (BEV). The proposals integrate two
state-of-the-art Convolutional Neural Networks (CNNs), such as
YOLOv3 and Mask-RCNN, in a framework presented by the
authors in [1] for 3D vehicles detection fusing semantic image
segmentation and LIDAR point cloud. Our proposals take
advantage of multimodal fusion, geometrical constrains, and
pre-trained modules inside our framework. The methods have
been tested using the KITTI object detection benchmark and
comparison is presented. Experiments show new approaches
improve results with respect to the baseline and are on par
with other competitive state-of-the-art proposals, being the only
ones that do not apply an end-to-end learning process. In this
way, they remove the need to train on a specific dataset and
show a good capability of generalization to any domain, a
key point for self-driving systems. Finally, we have tested our
best proposal in KITTI in our driving environment, without
any adaptation, obtaining results suitable for our autonomous
driving application. | en |
dc.description.sponsorship | Ministerio de Economía y Competitividad | es_ES |
dc.description.sponsorship | Comunidad de Madrid | es_ES |
dc.format.mimetype | application/pdf | en |
dc.language.iso | eng | en |
dc.publisher | IEEE | |
dc.rights | Attribution-NonCommercial-NoDerivatives 4.0 Internacional | * |
dc.rights | © 2019 IEEE | |
dc.rights.uri | http://creativecommons.org/licenses/by-nc-nd/4.0/ | * |
dc.title | Integrating state-of-the-art CNNs for multi-sensor 3D vehicle detection in real autonomous driving environments | en |
dc.type | info:eu-repo/semantics/conferenceObject | en |
dc.subject.eciencia | Electrónica | es_ES |
dc.subject.eciencia | Electronics | en |
dc.contributor.affiliation | Universidad de Alcalá. Departamento de Electrónica | es_ES |
dc.relation.publisherversion | https://doi.org/10.1109/ITSC.2019.8916973 | |
dc.type.version | info:eu-repo/semantics/acceptedVersion | en |
dc.identifier.doi | 10.1109/ITSC.2019.8916973 | |
dc.relation.projectID | info:eu-repo/grantAgreement/MINECO//TRA2015-70501-C2-1-R/ES/VEHICULO INTELIGENTE PARA PERSONAS MAYORES/ | en |
dc.relation.projectID | info:eu-repo/grantAgreement/MINECO//TRA2015-70501-C2-2-R/ES/SMARTELDERLYCAR. CONTROL Y PLANIFICACION DE RUTAS/ | en |
dc.relation.projectID | info:eu-repo/grantAgreement/CAM//P2018%2FNMT-4331/ES/Madrid Robotics Digital Innovation Hub/RoboCity2030-DIH-CM | en |
dc.rights.accessRights | info:eu-repo/semantics/openAccess | en |
dc.identifier.publicationtitle | 2019 IEEE Intelligent Transportation Systems Conference (ITSC) | |
dc.identifier.publicationlastpage | 1431 | |
dc.identifier.publicationfirstpage | 1425 | |