@inproceedings{fdi:010087181, title = {{R}eproducing {D}eep {L}earning experiments : common challenges and recommendations for improvement [poster]}, author = {{M}achicao, {J}. and {B}en {A}bbes, {A}. and {M}eneguzzi, {L}. and {P}izzigatti {C}orr{\^e}a, {P}. and {S}pecht, {A}. and {D}avid, {R}. and {S}ubsol, {G}. and {V}ellenich, {D}.{F}. and {D}evillers, {R}odolphe and {S}tall, {S}. and {M}ouquet, {N}. and {C}haumont, {M}. and {B}erti-{E}quille, {L}aure and {M}ouillot, {D}.}, editor = {}, language = {{ENG}}, abstract = {{I}n computer science, there are more and more efforts to improve reproducibility. {H}owever, it is still difficult to reproduce the experiments of other scientists, and even more difficult when it comes to {D}eep {L}earning ({DL}). {M}aking a {DL} research experiment reproducible requires a lot of work to document, verify, and make the system usable. {T}hese challenges are increased by the inherent complexity of {DL}, such as the number of (hyper)parameters, the huge amount of data, the versioning of the learning model, among others. {B}ased on the reproduction of three {DL} case studies on real-world tasks, such as poverty estimation from remote sensing imagery, we identified common problems in the reproduction. {T}herefore, we proposed a set of recommendations ('fixes') to overcome these issues that a researcher may encounter in order to improve reproducibility and replicability and reduce the likelihood of wasted effort. {T}hese strategies can be used as "swiss army knife" to move from {DL} to more general areas as they are organized as (i) the quality of the dataset (and associated metadata), (ii) the {D}eep {L}earning method, (iii) the implementation, and the infrastructure used.}, keywords = {}, numero = {}, pages = {1 multigr.}, booktitle = {}, year = {2022}, DOI = {10.5281/zenodo.6587694}, URL = {https://www.documentation.ird.fr/hor/fdi:010087181}, }