2020
1.
Bennabhaktula, Guru; Alegre, Enrique; Karastoyanova, Dimka; Azzopardi, George
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, convnets, deep learning, forensic image analysis
@inproceedings{bennabhaktula2020device,
title = {Device-based Image Matching with Similarity Learning by Convolutional Neural Networks that Exploit the Underlying Camera Sensor Pattern Noise},
author = {Guru Bennabhaktula and Enrique Alegre and Dimka Karastoyanova and George Azzopardi},
doi = {10.5220/0009155505780584},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the 9th International Conference on Pattern Recognition Applications and Methods - ICPRAM},
pages = {578--584},
organization = {SciTePress},
abstract = {One of the challenging problems in digital image forensics is the capability to identify images that are captured by the same camera device. This knowledge can help forensic experts in gathering intelligence about suspects by analyzing digital images. In this paper, we propose a two-part network to quantify the likelihood that a given pair of images have the same source camera, and we evaluated it on the benchmark Dresden data set containing 1851 images from 31 different cameras. To the best of our knowledge, we are the first ones addressing the challenge of device-based image matching. Though the proposed approach is not yet forensics ready, our experiments show that this direction is worth pursuing, achieving at this moment 85 percent accuracy. This ongoing work is part of the EU-funded project 4NSEEK concerned with forensics against child sexual abuse.},
keywords = {camera identification, convnets, deep learning, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
One of the challenging problems in digital image forensics is the capability to identify images that are captured by the same camera device. This knowledge can help forensic experts in gathering intelligence about suspects by analyzing digital images. In this paper, we propose a two-part network to quantify the likelihood that a given pair of images have the same source camera, and we evaluated it on the benchmark Dresden data set containing 1851 images from 31 different cameras. To the best of our knowledge, we are the first ones addressing the challenge of device-based image matching. Though the proposed approach is not yet forensics ready, our experiments show that this direction is worth pursuing, achieving at this moment 85 percent accuracy. This ongoing work is part of the EU-funded project 4NSEEK concerned with forensics against child sexual abuse.
2.
Timmerman, Derrick; Bennabhaktula, Swaroop; Alegre, Enrique; Azzopardi, George
Video Camera Identification from Sensor Pattern Noise with a Constrained ConvNet Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, constrained networks, convnets, deep learning, forensic image analysis
@inproceedings{timmerman2020video,
title = {Video Camera Identification from Sensor Pattern Noise with a Constrained ConvNet},
author = {Derrick Timmerman and Swaroop Bennabhaktula and Enrique Alegre and George Azzopardi},
editor = {Maria De Marsico, Gabriella Sanniti di Baja, Ana Fred},
doi = {https://doi.org/10.48550/arXiv.2012.06277},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the 10th International Conference on Pattern Recognition Applications and Methods - ICPRAM},
journal = {arXiv preprint arXiv:2012.06277},
pages = {417-425},
abstract = {The identification of source cameras from videos, though it is a highly relevant forensic analysis topic, has been studied much less than its counterpart that uses images. In this work we propose a method to identify the source camera of a video based on camera specific noise patterns that we extract from video frames. For the extraction of noise pattern features, we propose an extended version of a constrained convolutional layer capable of processing color inputs. Our system is designed to classify individual video frames which are in turn combined by a majority vote to identify the source camera. We evaluated this approach on the benchmark VISION data set consisting of 1539 videos from 28 different cameras. To the best of our knowledge, this is the first work that addresses the challenge of video camera identification on a device level. The experiments show that our approach is very promising, achieving up to 93.1% accuracy while being robust to the WhatsApp and YouTube compression techniques. This work is part of the EU-funded project 4NSEEK focused on forensics against child sexual abuse.},
keywords = {camera identification, constrained networks, convnets, deep learning, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
The identification of source cameras from videos, though it is a highly relevant forensic analysis topic, has been studied much less than its counterpart that uses images. In this work we propose a method to identify the source camera of a video based on camera specific noise patterns that we extract from video frames. For the extraction of noise pattern features, we propose an extended version of a constrained convolutional layer capable of processing color inputs. Our system is designed to classify individual video frames which are in turn combined by a majority vote to identify the source camera. We evaluated this approach on the benchmark VISION data set consisting of 1539 videos from 28 different cameras. To the best of our knowledge, this is the first work that addresses the challenge of video camera identification on a device level. The experiments show that our approach is very promising, achieving up to 93.1% accuracy while being robust to the WhatsApp and YouTube compression techniques. This work is part of the EU-funded project 4NSEEK focused on forensics against child sexual abuse.