2023
Wang, Xueyi; Talavera, Estefania; Karastoyanova, Dimka; Azzopardi, George
Fall detection with a non-intrusive and first-person vision approach Journal Article
Links | BibTeX | Altmetric | Tags: deep learning, egocentric vision, wearables
@article{nokey,
title = {Fall detection with a non-intrusive and first-person vision approach},
author = {Wang, Xueyi and Talavera, Estefania and Karastoyanova, Dimka and Azzopardi, George},
doi = {10.1109/JSEN.2023.3314828},
year = {2023},
date = {2023-09-19},
urldate = {2023-09-04},
journal = {IEEE Sensors Journal},
keywords = {deep learning, egocentric vision, wearables},
pubstate = {published},
tppubtype = {article}
}
Aswath, Anusha; Alsahaf, Ahmad; Giepmans, Ben N. G.; Azzopardi, George
Segmentation in large-scale cellular electron microscopy with deep learning: A literature survey Journal Article
Links | BibTeX | Altmetric | Tags: deep learning, electron microscopy, medical image analysis, segmentation
@article{Aswath2023,
title = {Segmentation in large-scale cellular electron microscopy with deep learning: A literature survey},
author = {Aswath, Anusha and Alsahaf, Ahmad and Giepmans, Ben N. G. and Azzopardi, George},
doi = {https://doi.org/10.1016/j.media.2023.102920},
year = {2023},
date = {2023-08-06},
urldate = {2023-08-06},
journal = {Medical image Analysis},
number = {102920},
keywords = {deep learning, electron microscopy, medical image analysis, segmentation},
pubstate = {published},
tppubtype = {article}
}
2022
Bennabhaktula, Guru Swaroop; Timmerman, Derrick; Alegre, Enrique; Azzopardi, George
Source Camera Device Identification from Videos Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, constrained networks, convnets, deep learning, forensic image analysis, image noise
@article{Bennabhaktula2022b,
title = {Source Camera Device Identification from Videos},
author = {Guru Swaroop Bennabhaktula and Derrick Timmerman and Enrique Alegre and George Azzopardi},
doi = {https://doi.org/10.1007/s42979-022-01202-0},
year = {2022},
date = {2022-06-01},
urldate = {2022-06-01},
journal = {SN Computer Science},
volume = {3},
number = {316},
abstract = {Source camera identification is an important and challenging problem in digital image forensics. The clues of the device used to capture the digital media are very useful for Law Enforcement Agencies (LEAs), especially to help them collect more intelligence in digital forensics. In our work, we focus on identifying the source camera device based on digital videos using deep learning methods. In particular, we evaluate deep learning models with increasing levels of complexity for source camera identification and show that with such sophistication the scene-suppression techniques do not aid in model performance. In addition, we mention several common machine learning strategies that are counter-productive in achieving a high accuracy for camera identification. We conduct systematic experiments using 28 devices from the VISION data set and evaluate the model performance on various video scenarios - flat (i.e. homogeneous), indoor, and outdoor and evaluate the impact on classification accuracy when the videos are shared via social media platforms such as YouTube and WhatsApp. Unlike traditional PRNU-noise (Photo Response Non-Uniform) based methods which require flat frames to estimate camera reference pattern noise, the proposed method has no such constraint and we achieve an accuracy of $72.75 pm 1.1 %$ on the benchmark VISION data set. Furthermore, we also achieve state-of-the-art accuracy of $71.75%$ on the QUFVD data set in identifying 20 camera devices. These two results are the best ever reported on the VISION and QUFVD data sets. Finally, we demonstrate the runtime efficiency of the proposed approach and its advantages to LEAs. },
keywords = {camera identification, constrained networks, convnets, deep learning, forensic image analysis, image noise},
pubstate = {published},
tppubtype = {article}
}
Bhole, Amey; Udmale, Sandeep S; Falzon, Owen; Azzopardi, George
CORF3D contour maps with application to Holstein cattle recognition from RGB and thermal images Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, convnets, deep learning, noise suppression, pattern recognition, smart farming
@article{bhole2022corf3d,
title = {CORF3D contour maps with application to Holstein cattle recognition from RGB and thermal images},
author = {Amey Bhole and Sandeep S Udmale and Owen Falzon and George Azzopardi},
doi = {https://doi.org/10.1016/j.eswa.2021.116354},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Expert Systems with Applications},
volume = {192},
number = {116354},
publisher = {Pergamon},
abstract = {Livestock management involves the monitoring of farm animals by tracking certain physiological and phenotypical characteristics over time. In the dairy industry, for instance, cattle are typically equipped with RFID ear tags. The corresponding data (e.g. milk properties) can then be automatically assigned to the respective cow when they enter the milking station. In order to move towards a more scalable, affordable, and welfare-friendly approach, automatic non-invasive solutions are more desirable. Thus, a non-invasive approach is proposed in this paper for the automatic identification of individual Holstein cattle from the side view while exiting a milking station. It considers input images from a thermal-RGB camera. The thermal images are used to delineate the cow from the background. Subsequently, any occluding rods from the milking station are removed and inpainted with the fast marching algorithm. Then, it extracts the RGB map of the segmented cattle along with a novel CORF3D contour map. The latter contains three contour maps extracted by the Combination of Receptive Fields (CORF) model with different strengths of push\textendashpull inhibition. This mechanism suppresses noise in the form of grain type texture. The effectiveness of the proposed approach is demonstrated by means of experiments using a 5-fold and a leave-one day-out cross-validation on a new data set of 3694 images of 383 cows collected from the Dairy Campus in Leeuwarden (the Netherlands) over 9 days. In particular, when combining RGB and CORF3D maps by late fusion, an average accuracy of was obtained for the 5-fold cross validation and for the leave-one day-out experiment. The two maps were combined by first learning two ConvNet classification models, one for each type of map. The feature vectors in the two FC layers obtained from training images were then concatenated and used to learn a linear SVM classification model. In principle, the proposed approach with the novel CORF3D contour maps is suitable for various image classification applications, especially where grain type texture is a confounding variable.},
keywords = {brain-inspired, contour detection, convnets, deep learning, noise suppression, pattern recognition, smart farming},
pubstate = {published},
tppubtype = {article}
}
2021
Shi, Chenyu; Meijer, Joost M; Azzopardi, George; Diercks, Gilles FH; Guo, Jiapan; Petkov, Nicolai
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, medical image analysis
@article{shi2021use,
title = {Use of Convolutional Neural Networks for the Detection of u-Serrated Patterns in Direct Immunofluorescence Images to Facilitate the Diagnosis of Epidermolysis Bullosa Acquisita},
author = {Chenyu Shi and Joost M Meijer and George Azzopardi and Gilles FH Diercks and Jiapan Guo and Nicolai Petkov},
doi = {10.1016/j.ajpath.2021.05.024},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {The American journal of pathology},
volume = {191},
number = {9},
pages = {1520--1525},
publisher = {Elsevier},
abstract = {The u-serrated immunodeposition pattern in direct immunofluorescence (DIF) microscopy is a recognizable feature and confirmative for the diagnosis of epidermolysis bullosa acquisita (EBA). Due to unfamiliarity with serrated patterns, serration pattern recognition is still of limited use in routine DIF microscopy. The objective of this study was to investigate the feasibility of using convolutional neural networks (CNNs) for the recognition of u-serrated patterns that can assist in the diagnosis of EBA. The nine most commonly used CNNs were trained and validated by using 220,800 manually delineated DIF image patches from 106 images of 46 different patients. The data set was split into 10 subsets: nine training subsets from 42 patients to train CNNs and the last subset from the remaining four patients for a validation data set of diagnostic accuracy. This process was repeated 10 times with a different subset used for validation. The best-performing CNN achieved a specificity of 89.3% and a corresponding sensitivity of 89.3% in the classification of u-serrated DIF image patches, an expert level of diagnostic accuracy. Experiments and results show the effectiveness of CNN approaches for u-serrated pattern recognition with a high accuracy. The proposed approach can assist clinicians and pathologists in recognition of u-serrated patterns in DIF images and facilitate the diagnosis of EBA.},
keywords = {convnets, deep learning, medical image analysis},
pubstate = {published},
tppubtype = {article}
}
2020
Wang, Xueyi; Ellul, Joshua; Azzopardi, George
Elderly fall detection systems: A literature survey Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, egocentric vision, fall detection, predictive analysis, wearables
@article{wang2020elderly,
title = {Elderly fall detection systems: A literature survey},
author = {Xueyi Wang and Joshua Ellul and George Azzopardi},
doi = {https://doi.org/10.3389/frobt.2020.00071},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Frontiers in Robotics and AI},
volume = {7},
pages = {71},
publisher = {Frontiers},
abstract = {Falling is among the most damaging event elderly people may experience. With the ever-growing aging population, there is an urgent need for the development of fall detection systems. Thanks to the rapid development of sensor networks and the Internet of Things (IoT), human-computer interaction using sensor fusion has been regarded as an effective method to address the problem of fall detection. In this paper, we provide a literature survey of work conducted on elderly fall detection using sensor networks and IoT. Although there are various existing studies which focus on the fall detection with individual sensors, such as wearable ones and depth cameras, the performance of these systems are still not satisfying as they suffer mostly from high false alarms. Literature shows that fusing the signals of different sensors could result in higher accuracy and lower false alarms, while improving the robustness of such systems. We approach this survey from different perspectives, including data collection, data transmission, sensor fusion, data analysis, security, and privacy. We also review the benchmark data sets available that have been used to quantify the performance of the proposed methods. The survey is meant to provide researchers in the field of elderly fall detection using sensor networks with a summary of progress achieved up to date and to identify areas where further effort would be beneficial.},
keywords = {convnets, deep learning, egocentric vision, fall detection, predictive analysis, wearables},
pubstate = {published},
tppubtype = {article}
}
Chaves, Deisy; Fidalgo, Eduardo; Alegre, Enrique; Alaiz-Rodríguez, Rocío; Jáñez-Martino, Francisco; Azzopardi, George
Assessment and Estimation of Face Detection Performance Based on Deep Learning for Forensic Applications Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, face analysis, forensic image analysis
@article{chaves2020assessment,
title = {Assessment and Estimation of Face Detection Performance Based on Deep Learning for Forensic Applications},
author = {Deisy Chaves and Eduardo Fidalgo and Enrique Alegre and Roc\'{i}o Alaiz-Rodr\'{i}guez and Francisco J\'{a}\~{n}ez-Martino and George Azzopardi},
doi = {https://doi.org/10.3390/s20164491},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Sensors},
volume = {20},
number = {4491},
issue = {16},
publisher = {2020},
abstract = {Face recognition is a valuable forensic tool for criminal investigators since it certainly helps in identifying individuals in scenarios of criminal activity like fugitives or child sexual abuse. It is, however, a very challenging task as it must be able to handle low-quality images of real world settings and fulfill real time requirements. Deep learning approaches for face detection have proven to be very successful but they require large computation power and processing time. In this work, we evaluate the speed\textendashaccuracy tradeoff of three popular deep-learning-based face detectors on the WIDER Face and UFDD data sets in several CPUs and GPUs. We also develop a regression model capable to estimate the performance, both in terms of processing time and accuracy. We expect this to become a very useful tool for the end user in forensic laboratories in order to estimate the performance for different face detection options. Experimental results showed that the best speed\textendashaccuracy tradeoff is achieved with images resized to 50% of the original size in GPUs and images resized to 25% of the original size in CPUs. Moreover, performance can be estimated using multiple linear regression models with a Mean Absolute Error (MAE) of 0.113, which is very promising for the forensic field.},
keywords = {convnets, deep learning, face analysis, forensic image analysis},
pubstate = {published},
tppubtype = {article}
}
2018
Azzopardi, George; Greco, Antonio; Saggese, Alessia; Vento, Mario
Fusion of domain-specific and trainable features for gender recognition from face images Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, face analysis, trainable filters
@article{azzopardi2018fusion,
title = {Fusion of domain-specific and trainable features for gender recognition from face images},
author = {George Azzopardi and Antonio Greco and Alessia Saggese and Mario Vento},
doi = {10.1109/ACCESS.2018.2823378},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {IEEE Access},
volume = {6},
pages = {24171--24183},
publisher = {IEEE},
abstract = {The popularity and the appeal of systems which are able to automatically determine the gender from face images are growing rapidly. Such a great interest arises from the wide variety of applications, especially in the fields of retail and video surveillance. In recent years, there have been several attempts to address this challenge, but a definitive solution has not yet been found. In this paper, we propose a novel approach that fuses domain-specific and trainable features to recognize the gender from face images. In particular, we use the SURF descriptors extracted from 51 facial landmarks related to eyes, nose, and mouth as domain-dependent features, and the COSFIRE filters as trainable features. The proposed approach turns out to be very robust with respect to the well-known face variations, including different poses, expressions, and illumination conditions. It achieves state-of-the-art recognition rates on the GENDER-FERET (94.7%) and on the labeled faces in the wild (99.4%) data sets, which are two of the most popular benchmarks for gender recognition. We further evaluated the method on a new data set acquired in real scenarios, the UNISA-Public, recently made publicly available. It consists of 206 training (144 male, 62 female) and 200 test (139 male, 61 female) images that are acquired with a real-time indoor camera capturing people in regular walking motion. Such experiment has the aim to assess the capability of the algorithm to deal with face images extracted from videos, which are definitely more challenging than the still images available in the standard data sets. Also for this data set, we achieved a high recognition rate of 91.5%, that confirms the generalization capabilities of the proposed approach. Of the two types of features, the trainable COSFIRE filters are the most effective and, given their trainable character, they can be applied in any visual pattern recognition problem.},
keywords = {convnets, deep learning, face analysis, trainable filters},
pubstate = {published},
tppubtype = {article}
}