2021
Velasco-Mata, J.; Chaves, D.; de Mata, V.; Al-Nabki, M. W.; Fidalgo, Eduardo; Alegre, Enrique; Azzopardi, George
Development of a Hardware Benchmark for Forensic Face Detection Applications Inproceedings
Abstract | Links | BibTeX | Tags: face analysis, forensic image analysis
@inproceedings{velasco2021development,
title = {Development of a Hardware Benchmark for Forensic Face Detection Applications},
author = {J. Velasco-Mata and D. Chaves and V. de Mata and M. W. Al-Nabki and Eduardo Fidalgo and Enrique Alegre and George Azzopardi
},
url = {https://ruidera.uclm.es/xmlui/handle/10578/28635},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Cybersecurity Research National Conferences - INCIBE, Leon, Spain},
organization = {JNIC},
abstract = {Face detection techniques are valuable in forensic investigation since they help criminal investigators to identify victims/offenders in child sexual exploitation material. Deep learning approaches proved successful in these tasks, but their high computational requirements make them unsuitable if there are time constraints. To cope with this problem, we use a resizing strategy over three face detection techniques \textemdashMTCNN, PyramidBox and DSFD\textemdash to improve their speed over samples selected from the WIDER Face and UFDD datasets across several CPUs and GPUs. The best speed-detection trade-off was achieved by reducing the images to 50% of their original size and then applying DSFD. The fastest hardware for this purpose was a Nvidia GPU based on the Turing architecture.},
keywords = {face analysis, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Biswas, R.; Chaves, D.; Jáñez-Martino, F.; Blanco-Medina, P.; Fidalgo, E.; García-Olalla, C.; Azzopardi, G.
Reinforcement of age estimation in forensic tools to detect Child Sexual Exploitation Material Inproceedings
Abstract | Links | BibTeX | Tags: face analysis, forensic image analysis
@inproceedings{biswas2021reinforcement,
title = {Reinforcement of age estimation in forensic tools to detect Child Sexual Exploitation Material},
author = {R. Biswas and D. Chaves and F. J\'{a}\~{n}ez-Martino and P. Blanco-Medina and E. Fidalgo and C. Garc\'{i}a-Olalla and G. Azzopardi},
url = {https://ruidera.uclm.es/xmlui/handle/10578/28631},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
organization = {Cybersecurity Research National Conferences},
abstract = {Several image-based approaches for estimating the age of a person are available in computer vision literature. However, most of them perform poorly on minors and young adults, especially when the eyes are occluded. This type of occlusion is common in Child Sexual Exploitation Materials (CSEM), in order to hide the identity of victims. We introduce an approach that builds Soft Stagewise Regression Network (SSR-Net) models with natural and eye-occluded facial images, to estimate the age of minors and young adults. Our proposal reduces the Mean Absolute Error from 7.26 to 6.5, and 6.81 to 4.07 for SSR-Net pre-trained models on the IMDB and MORPH datasets, respectively.},
keywords = {face analysis, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
Bennabhaktula, Guru; Alegre, Enrique; Karastoyanova, Dimka; Azzopardi, George
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, convnets, deep learning, forensic image analysis
@inproceedings{bennabhaktula2020device,
title = {Device-based Image Matching with Similarity Learning by Convolutional Neural Networks that Exploit the Underlying Camera Sensor Pattern Noise},
author = {Guru Bennabhaktula and Enrique Alegre and Dimka Karastoyanova and George Azzopardi},
doi = {10.5220/0009155505780584},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the 9th International Conference on Pattern Recognition Applications and Methods - ICPRAM},
pages = {578--584},
organization = {SciTePress},
abstract = {One of the challenging problems in digital image forensics is the capability to identify images that are captured by the same camera device. This knowledge can help forensic experts in gathering intelligence about suspects by analyzing digital images. In this paper, we propose a two-part network to quantify the likelihood that a given pair of images have the same source camera, and we evaluated it on the benchmark Dresden data set containing 1851 images from 31 different cameras. To the best of our knowledge, we are the first ones addressing the challenge of device-based image matching. Though the proposed approach is not yet forensics ready, our experiments show that this direction is worth pursuing, achieving at this moment 85 percent accuracy. This ongoing work is part of the EU-funded project 4NSEEK concerned with forensics against child sexual abuse.},
keywords = {camera identification, convnets, deep learning, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Timmerman, Derrick; Bennabhaktula, Swaroop; Alegre, Enrique; Azzopardi, George
Video Camera Identification from Sensor Pattern Noise with a Constrained ConvNet Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, constrained networks, convnets, deep learning, forensic image analysis
@inproceedings{timmerman2020video,
title = {Video Camera Identification from Sensor Pattern Noise with a Constrained ConvNet},
author = {Derrick Timmerman and Swaroop Bennabhaktula and Enrique Alegre and George Azzopardi},
editor = {Maria De Marsico, Gabriella Sanniti di Baja, Ana Fred},
doi = {https://doi.org/10.48550/arXiv.2012.06277},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the 10th International Conference on Pattern Recognition Applications and Methods - ICPRAM},
journal = {arXiv preprint arXiv:2012.06277},
pages = {417-425},
abstract = {The identification of source cameras from videos, though it is a highly relevant forensic analysis topic, has been studied much less than its counterpart that uses images. In this work we propose a method to identify the source camera of a video based on camera specific noise patterns that we extract from video frames. For the extraction of noise pattern features, we propose an extended version of a constrained convolutional layer capable of processing color inputs. Our system is designed to classify individual video frames which are in turn combined by a majority vote to identify the source camera. We evaluated this approach on the benchmark VISION data set consisting of 1539 videos from 28 different cameras. To the best of our knowledge, this is the first work that addresses the challenge of video camera identification on a device level. The experiments show that our approach is very promising, achieving up to 93.1% accuracy while being robust to the WhatsApp and YouTube compression techniques. This work is part of the EU-funded project 4NSEEK focused on forensics against child sexual abuse.},
keywords = {camera identification, constrained networks, convnets, deep learning, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}