2024
Apap, Adrian; Bhole, Amey; Robles, Laura Fernandez; Castejon-Limas, Manuel; Azzopardi, George
Explainable multi-layer COSFIRE filters robust to corruptions and boundary attack with application to retina and palmprint biometrics Journal Article
Links | BibTeX | Altmetric | Tags: biometrics, brain-inspired, palmprint, retinal images, trainable filters
@article{Apap2024,
title = {Explainable multi-layer COSFIRE filters robust to corruptions and boundary attack with application to retina and palmprint biometrics},
author = {Adrian Apap and Amey Bhole and Laura Fernandez Robles and Manuel Castejon-Limas and George Azzopardi},
doi = {http://dx.doi.org/10.1007/s00521-024-10164-8},
year = {2024},
date = {2024-08-03},
urldate = {2024-07-31},
journal = {Neural Computing with Applications},
keywords = {biometrics, brain-inspired, palmprint, retinal images, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Ndung’u, Steven; Grobler, Trienko; Wijnholds, Stefan J.; Karastoyanova, Dimka; Azzopardi, George
Classification of Radio Galaxies with trainable COSFIRE filters Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, radioastronomy, trainable filters
@article{Ndung’u2024,
title = {Classification of Radio Galaxies with trainable COSFIRE filters},
author = {Steven Ndung’u and Trienko Grobler and Stefan J. Wijnholds and Dimka Karastoyanova and George Azzopardi},
doi = {https://doi.org/10.1093/mnras/stae821},
year = {2024},
date = {2024-03-23},
urldate = {2024-03-23},
journal = {Monthly Notices of the Royal Astronomical Society},
abstract = {Radio galaxies exhibit a rich diversity of morphological characteristics, which make their classification into distinct types a complex challenge. To address this challenge effectively, we introduce an innovative approach for radio galaxy classification using COSFIRE filters. These filters possess the ability to adapt to both the shape and orientation of prototype patterns within images. The COSFIRE approach is explainable, learning-free, rotation-tolerant, efficient, and does not require a large training set. To assess the efficacy of our method, we conducted experiments on a benchmark radio galaxy data set comprising of 1180 training samples and 404 test samples. Notably, our approach achieved an average accuracy rate of 93.36%. This achievement outperforms contemporary deep learning models, and it is the best result ever achieved on this data set. Additionally, COSFIRE filters offer better computational performance, ∼20× fewer operations than the DenseNet-based competing method (when comparing at the same accuracy). Our findings underscore the effectiveness of the COSFIRE filter-based approach in addressing the complexities associated with radio galaxy classification. This research contributes to advancing the field by offering a robust solution that transcends the orientation challenges intrinsic to radio galaxy observations. Our method is versatile in that it is applicable to various image classification approaches.},
keywords = {brain-inspired, radioastronomy, trainable filters},
pubstate = {published},
tppubtype = {article}
}
2023
Ndung'u, Steven; Grobler, Trienko; Wijnholds, Stefan J.; Karastoyanova, Dimka; Azzopardi, George
Advances on the classification of radio image cubes: A review Journal Article
Links | BibTeX | Altmetric | Tags: galaxy, image classification, radioastronomy
@article{Ndung'u2023,
title = {Advances on the classification of radio image cubes: A review },
author = {Steven Ndung'u and Trienko Grobler and Stefan J. Wijnholds and Dimka Karastoyanova and George Azzopardi},
doi = {https://doi.org/10.1016/j.newar.2023.101685},
year = {2023},
date = {2023-10-12},
urldate = {2023-10-12},
journal = {New Astronomy Reviews},
keywords = {galaxy, image classification, radioastronomy},
pubstate = {published},
tppubtype = {article}
}
Wang, Xueyi; Talavera, Estefania; Karastoyanova, Dimka; Azzopardi, George
Fall detection with a non-intrusive and first-person vision approach Journal Article
Links | BibTeX | Altmetric | Tags: deep learning, egocentric vision, wearables
@article{nokey,
title = {Fall detection with a non-intrusive and first-person vision approach},
author = {Wang, Xueyi and Talavera, Estefania and Karastoyanova, Dimka and Azzopardi, George},
doi = {10.1109/JSEN.2023.3314828},
year = {2023},
date = {2023-09-19},
urldate = {2023-09-04},
journal = {IEEE Sensors Journal},
keywords = {deep learning, egocentric vision, wearables},
pubstate = {published},
tppubtype = {article}
}
Shi, Wen; Azzopardi, George; Karastoyanova, Dimka; Huang, Yongming
Bidirectional Piecewise Linear Representation of Time Series with Application to Collective Anomaly Detection Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: anomaly detection, time-series
@article{Shi2023,
title = {Bidirectional Piecewise Linear Representation of Time Series with Application to Collective Anomaly Detection},
author = {Shi, Wen and Azzopardi, George and Karastoyanova, Dimka and Huang, Yongming},
doi = {https://doi.org/10.1016/j.aei.2023.102155},
year = {2023},
date = {2023-09-06},
urldate = {2023-09-06},
journal = {Advanced Engineering Informatics},
volume = {58},
number = {102155},
abstract = {Directly mining high-dimensional time series presents several challenges, such as time and space costs. This study proposes a new approach for representing time series data and evaluates its effectiveness in detecting collective anomalies. The proposed method, called bidirectional piecewise linear representation (BPLR), represents the original time series using a set of linear fitting functions, which allows for dimensionality reduction while maintaining its dynamic characteristics. Similarity measurement is then performed using the piecewise integration (PI) approach, which achieves good detection performance with low computational overhead. Experimental results on synthetic and real-world data sets confirm the effectiveness and advantages of the proposed approach. The ability of the proposed method to capture more dynamic details of time series leads to consistently superior performance compared to other existing methods.},
keywords = {anomaly detection, time-series},
pubstate = {published},
tppubtype = {article}
}
Aswath, Anusha; Alsahaf, Ahmad; Giepmans, Ben N. G.; Azzopardi, George
Segmentation in large-scale cellular electron microscopy with deep learning: A literature survey Journal Article
Links | BibTeX | Altmetric | Tags: deep learning, electron microscopy, medical image analysis, segmentation
@article{Aswath2023,
title = {Segmentation in large-scale cellular electron microscopy with deep learning: A literature survey},
author = {Aswath, Anusha and Alsahaf, Ahmad and Giepmans, Ben N. G. and Azzopardi, George},
doi = {https://doi.org/10.1016/j.media.2023.102920},
year = {2023},
date = {2023-08-06},
urldate = {2023-08-06},
journal = {Medical image Analysis},
number = {102920},
keywords = {deep learning, electron microscopy, medical image analysis, segmentation},
pubstate = {published},
tppubtype = {article}
}
2022
Mohsen, Fadi; Karastoyanova, Dimka; Azzopardi, George
Early detection of violating Mobile Apps: A data-driven predictive model approach Journal Article
Links | BibTeX | Altmetric | Tags: predictive analysis
@article{Mohsen2022,
title = {Early detection of violating Mobile Apps: A data-driven predictive model approach },
author = {Fadi Mohsen and Dimka Karastoyanova and George Azzopardi},
doi = {https://doi.org/10.1016/j.sasc.2022.200045},
year = {2022},
date = {2022-12-01},
urldate = {2022-12-01},
journal = {Systems and Soft Computing},
volume = {4},
number = {200045},
keywords = {predictive analysis},
pubstate = {published},
tppubtype = {article}
}
Ahmad Alsahaf,; Radu Gheorghe,; André Hidalgo,; Nicolai Petkov,; Azzopardi, George
Pre-insemination prediction of dystocia in dairy cattle Journal Article
Links | BibTeX | Altmetric | Tags: predictive analysis, smart farming
@article{Alsahaf2022,
title = {Pre-insemination prediction of dystocia in dairy cattle},
author = {Ahmad Alsahaf, and Radu Gheorghe, and Andr\'{e} Hidalgo, and Nicolai Petkov, and George Azzopardi
},
doi = {https://doi.org/10.1016/j.prevetmed.2022.105812},
year = {2022},
date = {2022-12-01},
urldate = {2022-12-01},
journal = {Preventive Veterinary Medicine},
volume = {210},
number = {105812},
keywords = {predictive analysis, smart farming},
pubstate = {published},
tppubtype = {article}
}
Overschie, Jeroen; Alsahaf, Ahmad; Azzopardi, George
fseval: A Benchmarking Framework for Feature Selection and Feature Ranking Algorithms Journal Article
Links | BibTeX | Altmetric | Tags: feature ranking, feature selection
@article{Overschie2022,
title = {fseval: A Benchmarking Framework for Feature Selection and Feature Ranking Algorithms},
author = {Jeroen Overschie and Ahmad Alsahaf and George Azzopardi},
doi = {https://joss.theoj.org/papers/10.21105/joss.04611},
year = {2022},
date = {2022-11-23},
urldate = {2022-11-23},
journal = {Journal of Open Source Software},
keywords = {feature ranking, feature selection},
pubstate = {published},
tppubtype = {article}
}
Bennabhaktula, Guru Swaroop; Alegre, Enrique; Karastoyanova, Dimka; Azzopardi, George
Camera Model Identification based on Forensic Traces extracted from Homogeneous Patches Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, forensic image analysis, image noise
@article{Bennabhaktula2022,
title = {Camera Model Identification based on Forensic Traces extracted from Homogeneous Patches},
author = {Guru Swaroop Bennabhaktula and Enrique Alegre and Dimka Karastoyanova and George Azzopardi},
doi = {https://doi.org/10.1016/j.eswa.2022.117769},
year = {2022},
date = {2022-11-15},
urldate = {2022-06-03},
journal = {Expert Systems with Applications},
volume = {206},
number = {117769},
abstract = {A crucial challenge in digital image forensics is to identify the source camera model used to generate given images. This is of prime importance, especially for Law Enforcement Agencies in their investigations of Child Sexual Abuse Material found in darknets or seized storage devices. In this work, we address this challenge by proposing a solution that is characterized by two main contributions. It relies on the extraction of rather small homogeneous regions that we extract very efficiently from the integral image, and on a hierarchical classification approach with convolutional neural networks as the underlying models. We rely on homogeneous regions as they contain camera traces that are less distorted than regions with high-level scene content. The hierarchical approach that we propose is important for scaling up and making minimal modifications when new cameras are added. Furthermore, this scheme performs better than the traditional single classifier approach. By means of thorough experimentation on the publicly available Dresden data set, we achieve an accuracy of 99.01% with 5-fold cross-validation on the `natural' subset of this data set. To the best of our knowledge, this is the best result ever reported for Dresden data set.},
keywords = {camera identification, forensic image analysis, image noise},
pubstate = {published},
tppubtype = {article}
}
Bennabhaktula, Guru Swaroop; Timmerman, Derrick; Alegre, Enrique; Azzopardi, George
Source Camera Device Identification from Videos Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, constrained networks, convnets, deep learning, forensic image analysis, image noise
@article{Bennabhaktula2022b,
title = {Source Camera Device Identification from Videos},
author = {Guru Swaroop Bennabhaktula and Derrick Timmerman and Enrique Alegre and George Azzopardi},
doi = {https://doi.org/10.1007/s42979-022-01202-0},
year = {2022},
date = {2022-06-01},
urldate = {2022-06-01},
journal = {SN Computer Science},
volume = {3},
number = {316},
abstract = {Source camera identification is an important and challenging problem in digital image forensics. The clues of the device used to capture the digital media are very useful for Law Enforcement Agencies (LEAs), especially to help them collect more intelligence in digital forensics. In our work, we focus on identifying the source camera device based on digital videos using deep learning methods. In particular, we evaluate deep learning models with increasing levels of complexity for source camera identification and show that with such sophistication the scene-suppression techniques do not aid in model performance. In addition, we mention several common machine learning strategies that are counter-productive in achieving a high accuracy for camera identification. We conduct systematic experiments using 28 devices from the VISION data set and evaluate the model performance on various video scenarios - flat (i.e. homogeneous), indoor, and outdoor and evaluate the impact on classification accuracy when the videos are shared via social media platforms such as YouTube and WhatsApp. Unlike traditional PRNU-noise (Photo Response Non-Uniform) based methods which require flat frames to estimate camera reference pattern noise, the proposed method has no such constraint and we achieve an accuracy of $72.75 pm 1.1 %$ on the benchmark VISION data set. Furthermore, we also achieve state-of-the-art accuracy of $71.75%$ on the QUFVD data set in identifying 20 camera devices. These two results are the best ever reported on the VISION and QUFVD data sets. Finally, we demonstrate the runtime efficiency of the proposed approach and its advantages to LEAs. },
keywords = {camera identification, constrained networks, convnets, deep learning, forensic image analysis, image noise},
pubstate = {published},
tppubtype = {article}
}
Alsahaf, Ahmad; Petkov, Nicolai; Shenoy, Vikram; Azzopardi, George
A framework for feature selection through boosting Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: feature ranking, feature selection, predictive analysis
@article{Alsahaf2022c,
title = {A framework for feature selection through boosting},
author = {Ahmad Alsahaf and Nicolai Petkov and Vikram Shenoy and George Azzopardi},
doi = {https://doi.org/10.1016/j.eswa.2021.115895},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Expert Systems with Applications},
volume = {187},
pages = {115895},
publisher = {Pergamon},
abstract = {As dimensions of datasets in predictive modelling continue to grow, feature selection becomes increasingly practical. Datasets with complex feature interactions and high levels of redundancy still present a challenge to existing feature selection methods. We propose a novel framework for feature selection that relies on boosting, or sample re-weighting, to select sets of informative features in classification problems. The method uses as its basis the feature rankings derived from fast and scalable tree-boosting models, such as XGBoost. We compare the proposed method to standard feature selection algorithms on 9 benchmark datasets. We show that the proposed approach reaches higher accuracies with fewer features on most of the tested datasets, and that the selected features have lower redundancy.},
keywords = {feature ranking, feature selection, predictive analysis},
pubstate = {published},
tppubtype = {article}
}
Bhole, Amey; Udmale, Sandeep S; Falzon, Owen; Azzopardi, George
CORF3D contour maps with application to Holstein cattle recognition from RGB and thermal images Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, convnets, deep learning, noise suppression, pattern recognition, smart farming
@article{bhole2022corf3d,
title = {CORF3D contour maps with application to Holstein cattle recognition from RGB and thermal images},
author = {Amey Bhole and Sandeep S Udmale and Owen Falzon and George Azzopardi},
doi = {https://doi.org/10.1016/j.eswa.2021.116354},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Expert Systems with Applications},
volume = {192},
number = {116354},
publisher = {Pergamon},
abstract = {Livestock management involves the monitoring of farm animals by tracking certain physiological and phenotypical characteristics over time. In the dairy industry, for instance, cattle are typically equipped with RFID ear tags. The corresponding data (e.g. milk properties) can then be automatically assigned to the respective cow when they enter the milking station. In order to move towards a more scalable, affordable, and welfare-friendly approach, automatic non-invasive solutions are more desirable. Thus, a non-invasive approach is proposed in this paper for the automatic identification of individual Holstein cattle from the side view while exiting a milking station. It considers input images from a thermal-RGB camera. The thermal images are used to delineate the cow from the background. Subsequently, any occluding rods from the milking station are removed and inpainted with the fast marching algorithm. Then, it extracts the RGB map of the segmented cattle along with a novel CORF3D contour map. The latter contains three contour maps extracted by the Combination of Receptive Fields (CORF) model with different strengths of push\textendashpull inhibition. This mechanism suppresses noise in the form of grain type texture. The effectiveness of the proposed approach is demonstrated by means of experiments using a 5-fold and a leave-one day-out cross-validation on a new data set of 3694 images of 383 cows collected from the Dairy Campus in Leeuwarden (the Netherlands) over 9 days. In particular, when combining RGB and CORF3D maps by late fusion, an average accuracy of was obtained for the 5-fold cross validation and for the leave-one day-out experiment. The two maps were combined by first learning two ConvNet classification models, one for each type of map. The feature vectors in the two FC layers obtained from training images were then concatenated and used to learn a linear SVM classification model. In principle, the proposed approach with the novel CORF3D contour maps is suitable for various image classification applications, especially where grain type texture is a confounding variable.},
keywords = {brain-inspired, contour detection, convnets, deep learning, noise suppression, pattern recognition, smart farming},
pubstate = {published},
tppubtype = {article}
}
2021
Lövdal, S. Sofie; Hartigh, Ruud J. R. Den; Azzopardi, George
Injury Prediction in Competitive Runners With Machine Learning Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: predictive analysis, sport science, wearables
@article{injury2021b,
title = {Injury Prediction in Competitive Runners With Machine Learning},
author = {S. Sofie L\"{o}vdal and Ruud J.R. Den Hartigh and George Azzopardi},
doi = {https://doi.org/10.1123/ijspp.2020-0518},
year = {2021},
date = {2021-04-29},
urldate = {2021-04-29},
journal = {International Journal of Sports Physiology and Performance},
volume = {16},
issue = {10},
pages = {1522-1531},
abstract = {Purpose: Staying injury free is a major factor for success in sports. Although injuries are difficult to forecast, novel technologies and data-science applications could provide important insights. Our purpose was to use machine learning for the prediction of injuries in runners, based on detailed training logs. Methods: Prediction of injuries was evaluated on a new data set of 74 high-level middle- and long-distance runners, over a period of 7 years. Two analytic approaches were applied. First, the training load from the previous 7 days was expressed as a time series, with each day’s training being described by 10 features. These features were a combination of objective data from a global positioning system watch (eg, duration, distance), together with subjective data about the exertion and success of the training. Second, a training week was summarized by 22 aggregate features, and a time window of 3 weeks before the injury was considered. Results: A predictive system based on bagged XGBoost machine-learning models resulted in receiver operating characteristic curves with average areas under the curves of 0.724 and 0.678 for the day and week approaches, respectively. The results of the day approach especially reflect a reasonably high probability that our system makes correct injury predictions. Conclusions: Our machine-learning-based approach predicts a sizable portion of the injuries, in particular when the model is based on training-load data in the days preceding an injury. Overall, these results demonstrate the possible merits of using machine learning to predict injuries and tailor training programs for athletes.},
keywords = {predictive analysis, sport science, wearables},
pubstate = {published},
tppubtype = {article}
}
Shi, Chenyu; Meijer, Joost M; Azzopardi, George; Diercks, Gilles FH; Guo, Jiapan; Petkov, Nicolai
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, medical image analysis
@article{shi2021use,
title = {Use of Convolutional Neural Networks for the Detection of u-Serrated Patterns in Direct Immunofluorescence Images to Facilitate the Diagnosis of Epidermolysis Bullosa Acquisita},
author = {Chenyu Shi and Joost M Meijer and George Azzopardi and Gilles FH Diercks and Jiapan Guo and Nicolai Petkov},
doi = {10.1016/j.ajpath.2021.05.024},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {The American journal of pathology},
volume = {191},
number = {9},
pages = {1520--1525},
publisher = {Elsevier},
abstract = {The u-serrated immunodeposition pattern in direct immunofluorescence (DIF) microscopy is a recognizable feature and confirmative for the diagnosis of epidermolysis bullosa acquisita (EBA). Due to unfamiliarity with serrated patterns, serration pattern recognition is still of limited use in routine DIF microscopy. The objective of this study was to investigate the feasibility of using convolutional neural networks (CNNs) for the recognition of u-serrated patterns that can assist in the diagnosis of EBA. The nine most commonly used CNNs were trained and validated by using 220,800 manually delineated DIF image patches from 106 images of 46 different patients. The data set was split into 10 subsets: nine training subsets from 42 patients to train CNNs and the last subset from the remaining four patients for a validation data set of diagnostic accuracy. This process was repeated 10 times with a different subset used for validation. The best-performing CNN achieved a specificity of 89.3% and a corresponding sensitivity of 89.3% in the classification of u-serrated DIF image patches, an expert level of diagnostic accuracy. Experiments and results show the effectiveness of CNN approaches for u-serrated pattern recognition with a high accuracy. The proposed approach can assist clinicians and pathologists in recognition of u-serrated patterns in DIF images and facilitate the diagnosis of EBA.},
keywords = {convnets, deep learning, medical image analysis},
pubstate = {published},
tppubtype = {article}
}
2020
Ramachandran, Sivakumar; Strisciuglio, Nicola; Vinekar, Anand; John, Renu; Azzopardi, George
U-COSFIRE filters for vessel tortuosity quantification with application to automated diagnosis of retinopathy of prematurity Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, trainable filters
@article{ramachandran2020u,
title = {U-COSFIRE filters for vessel tortuosity quantification with application to automated diagnosis of retinopathy of prematurity},
author = {Sivakumar Ramachandran and Nicola Strisciuglio and Anand Vinekar and Renu John and George Azzopardi},
doi = {https://doi.org/10.1007/s00521-019-04697-6},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Neural Computing and Applications},
volume = {32},
number = {16},
pages = {12453--12468},
publisher = {Springer London},
abstract = {Retinopathy of prematurity (ROP) is a sight threatening disorder that primarily affects preterm infants. It is the major reason for lifelong vision impairment and childhood blindness. Digital fundus images of preterm infants obtained from a Retcam Ophthalmic Imaging Device are typically used for ROP screening. ROP is often accompanied by Plus disease that is characterized by high levels of arteriolar tortuosity and venous dilation. The recent diagnostic procedures view the prevalence of Plus disease as a factor of prognostic significance in determining its stage, progress and severity. Our aim is to develop a diagnostic method, which can distinguish images of retinas with ROP from healthy ones and that can be interpreted by medical experts. We investigate the quantification of retinal blood vessel tortuosity via a novel U-COSFIRE (Combination Of Shifted Filter Responses) filter and propose a computer-aided diagnosis tool for automated ROP detection. The proposed methodology involves segmentation of retinal blood vessels using a set of B-COSFIRE filters with different scales followed by the detection of tortuous vessels in the obtained vessel map by means of U-COSFIRE filters. We also compare our proposed technique with an angle-based diagnostic method that utilizes the magnitude and orientation responses of the multi-scale B-COSFIRE filters. We carried out experiments on a new data set of 289 infant retinal images (89 with ROP and 200 healthy) that we collected from the programme in India called KIDROP (Karnataka Internet Assisted Diagnosis of Retinopathy of Prematurity). We used 10 images (5 with ROP and 5 healthy) for learning the parameters of our methodology and the remaining 279 images (84 with ROP and 195 healthy) for performance evaluation. We achieved sensitivity and specificity equal to 0.98 and 0.97, respectively, computed on the 279 test images. The obtained results and its explainable character demonstrate the effectiveness of the proposed approach to assist medical experts.},
keywords = {brain-inspired, medical image analysis, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Farrugia, Steven; Ellul, Joshua; Azzopardi, George
Detection of illicit accounts over the Ethereum blockchain Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: blockchain, fraud detection, predictive analysis
@article{farrugia2020detection,
title = {Detection of illicit accounts over the Ethereum blockchain},
author = {Steven Farrugia and Joshua Ellul and George Azzopardi},
doi = {https://doi.org/10.1016/j.eswa.2020.113318},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Expert Systems with Applications},
volume = {150},
pages = {113318},
publisher = {Pergamon},
abstract = {The recent technological advent of cryptocurrencies and their respective benefits have been shrouded with a number of illegal activities operating over the network such as money laundering, bribery, phishing, fraud, among others. In this work we focus on the Ethereum network, which has seen over 400 million transactions since its inception. Using 2179 accounts flagged by the Ethereum community for their illegal activity coupled with 2502 normal accounts, we seek to detect illicit accounts based on their transaction history using the XGBoost classifier. Using 10 fold cross-validation, XGBoost achieved an average accuracy of 0.963 ( ± 0.006) with an average AUC of 0.994 ( ± 0.0007). The top three features with the largest impact on the final model output were established to be ‘Time diff between first and last (Mins)’, ‘Total Ether balance’ and ‘Min value received’. Based on the results we conclude that the proposed approach is highly effective in detecting illicit accounts over the Ethereum network. Our contribution is multi-faceted; firstly, we propose an effective method to detect illicit accounts over the Ethereum network; secondly, we provide insights about the most important features; and thirdly, we publish the compiled data set as a benchmark for future related works.},
keywords = {blockchain, fraud detection, predictive analysis},
pubstate = {published},
tppubtype = {article}
}
Melotti, Damiano; Heimbach, Kevin; Rodríguez-Sánchez, Antonio; Strisciuglio, Nicola; Azzopardi, George
A robust contour detection operator with combined push-pull inhibition and surround suppression Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, noise suppression
@article{melotti2020robust,
title = {A robust contour detection operator with combined push-pull inhibition and surround suppression},
author = {Damiano Melotti and Kevin Heimbach and Antonio Rodr\'{i}guez-S\'{a}nchez and Nicola Strisciuglio and George Azzopardi},
doi = {https://doi.org/10.1016/j.ins.2020.03.026},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Information Sciences},
volume = {524},
pages = {229-240},
publisher = {2020},
abstract = {Contour detection is a salient operation in many computer vision applications as it extracts features that are important for distinguishing objects in scenes. It is believed to be a primary role of simple cells in visual cortex of the mammalian brain. Many of such cells receive push-pull inhibition or surround suppression. We propose a computational model that exhibits a combination of these two phenomena. It is based on two existing models, which have been proven to be very effective for contour detection. In particular, we introduce a brain-inspired contour operator that combines push-pull and surround inhibition. It turns out that this combination results in a more effective contour detector, which suppresses texture while keeping the strongest responses to lines and edges, when compared to existing models. The proposed model consists of a Combination of Receptive Field (or CORF) model with push-pull inhibition, extended with surround suppression. We demonstrate the effectiveness of the proposed approach on the RuG and Berkeley benchmark data sets of 40 and 500 images, respectively. The proposed push-pull CORF operator with surround suppression outperforms the one without suppression with high statistical significance.},
keywords = {brain-inspired, contour detection, noise suppression},
pubstate = {published},
tppubtype = {article}
}
Wang, Xueyi; Ellul, Joshua; Azzopardi, George
Elderly fall detection systems: A literature survey Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, egocentric vision, fall detection, predictive analysis, wearables
@article{wang2020elderly,
title = {Elderly fall detection systems: A literature survey},
author = {Xueyi Wang and Joshua Ellul and George Azzopardi},
doi = {https://doi.org/10.3389/frobt.2020.00071},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Frontiers in Robotics and AI},
volume = {7},
pages = {71},
publisher = {Frontiers},
abstract = {Falling is among the most damaging event elderly people may experience. With the ever-growing aging population, there is an urgent need for the development of fall detection systems. Thanks to the rapid development of sensor networks and the Internet of Things (IoT), human-computer interaction using sensor fusion has been regarded as an effective method to address the problem of fall detection. In this paper, we provide a literature survey of work conducted on elderly fall detection using sensor networks and IoT. Although there are various existing studies which focus on the fall detection with individual sensors, such as wearable ones and depth cameras, the performance of these systems are still not satisfying as they suffer mostly from high false alarms. Literature shows that fusing the signals of different sensors could result in higher accuracy and lower false alarms, while improving the robustness of such systems. We approach this survey from different perspectives, including data collection, data transmission, sensor fusion, data analysis, security, and privacy. We also review the benchmark data sets available that have been used to quantify the performance of the proposed methods. The survey is meant to provide researchers in the field of elderly fall detection using sensor networks with a summary of progress achieved up to date and to identify areas where further effort would be beneficial.},
keywords = {convnets, deep learning, egocentric vision, fall detection, predictive analysis, wearables},
pubstate = {published},
tppubtype = {article}
}
Chaves, Deisy; Fidalgo, Eduardo; Alegre, Enrique; Alaiz-Rodríguez, Rocío; Jáñez-Martino, Francisco; Azzopardi, George
Assessment and Estimation of Face Detection Performance Based on Deep Learning for Forensic Applications Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, face analysis, forensic image analysis
@article{chaves2020assessment,
title = {Assessment and Estimation of Face Detection Performance Based on Deep Learning for Forensic Applications},
author = {Deisy Chaves and Eduardo Fidalgo and Enrique Alegre and Roc\'{i}o Alaiz-Rodr\'{i}guez and Francisco J\'{a}\~{n}ez-Martino and George Azzopardi},
doi = {https://doi.org/10.3390/s20164491},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Sensors},
volume = {20},
number = {4491},
issue = {16},
publisher = {2020},
abstract = {Face recognition is a valuable forensic tool for criminal investigators since it certainly helps in identifying individuals in scenarios of criminal activity like fugitives or child sexual abuse. It is, however, a very challenging task as it must be able to handle low-quality images of real world settings and fulfill real time requirements. Deep learning approaches for face detection have proven to be very successful but they require large computation power and processing time. In this work, we evaluate the speed\textendashaccuracy tradeoff of three popular deep-learning-based face detectors on the WIDER Face and UFDD data sets in several CPUs and GPUs. We also develop a regression model capable to estimate the performance, both in terms of processing time and accuracy. We expect this to become a very useful tool for the end user in forensic laboratories in order to estimate the performance for different face detection options. Experimental results showed that the best speed\textendashaccuracy tradeoff is achieved with images resized to 50% of the original size in GPUs and images resized to 25% of the original size in CPUs. Moreover, performance can be estimated using multiple linear regression models with a Mean Absolute Error (MAE) of 0.113, which is very promising for the forensic field.},
keywords = {convnets, deep learning, face analysis, forensic image analysis},
pubstate = {published},
tppubtype = {article}
}
Heide, EMM; Kamphuis, C; Veerkamp, RF; Athanasiadis, IN; Azzopardi, G; Pelt, ML; Ducro, BJ
Improving predictive performance on survival in dairy cattle using an ensemble learning approach Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: ensemble learning, predictive analysis, smart farming
@article{van2020improving,
title = {Improving predictive performance on survival in dairy cattle using an ensemble learning approach},
author = {EMM Heide and C Kamphuis and RF Veerkamp and IN Athanasiadis and G Azzopardi and ML Pelt and BJ Ducro},
doi = {https://doi.org/10.1016/j.compag.2020.105675},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Computers and Electronics in Agriculture},
volume = {177},
number = {105675},
publisher = {Elsevier},
abstract = {Cow survival is a complex trait that combines traits like milk production, fertility, health and environmental factors such as farm management. This complexity makes survival difficult to predict accurately. This is probably the reason why few studies attempted to address this problem and no studies are published that use ensemble methods for this purpose. We explored if we could improve prediction of cow survival to second lactation, when predicted at five different moments in a cow’s life, by combining the predictions of multiple (weak) methods in an ensemble method. We tested four ensemble methods: majority voting rule, multiple logistic regression, random forest and naive Bayes. Precision, recall, balanced accuracy, area under the curve (AUC) and gains in proportion of surviving cows in a scenario where the best 50% were selected were used to evaluate the ensemble model performance. We also calculated correlations between the ensemble models and obtained McNemar’s test statistics. We compared the performance of the ensemble methods against those of the individual methods. We also tested if there was a difference in performance metrics when continuous (from 0 to 1) and binary (0 or 1) prediction outcomes were used. In general, using continuous prediction output resulted in higher performance metrics than binary ones. AUCs for models ranged from 0.561 to 0.731, with generally increasing performance at moments later in life. Precision, AUC and balanced accuracy values improved significantly for the naive Bayes and multiple logistic regression ensembles in at least one data set, although performance metrics did remain low overall. The multiple logistic regression ensemble method resulted in equal or better precision, AUC, balanced accuracy and proportion of animals surviving on all datasets and was significantly different from the other ensembles in three out of five moments. The random forest ensemble method resulted in the least significant improvement over the individual methods.},
keywords = {ensemble learning, predictive analysis, smart farming},
pubstate = {published},
tppubtype = {article}
}
2019
Shi, Chenyu; Meijer, Joost M; Guo, Jiapan; Azzopardi, George; Diercksr, Gilles FH; Schmidt, Enno; Zillikens, Detlef; Jonkman, Marcel F; Petkov, Nicolai
Detection of u-serrated patterns in direct immunofluorescence images of autoimmune bullous diseases by inhibition-augmented COSFIRE filters Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, noise suppression, trainable filters
@article{shi2019detection,
title = {Detection of u-serrated patterns in direct immunofluorescence images of autoimmune bullous diseases by inhibition-augmented COSFIRE filters},
author = {Chenyu Shi and Joost M Meijer and Jiapan Guo and George Azzopardi and Gilles FH Diercksr and Enno Schmidt and Detlef Zillikens and Marcel F Jonkman and Nicolai Petkov},
doi = {10.1016/j.ijmedinf.2018.11.007},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {International Journal of Medical Informatics},
volume = {122},
pages = {27--36},
publisher = {Elsevier},
abstract = {Direct immunofluorescence (DIF) microscopy of a skin biopsy is used by physicians and pathologists to diagnose autoimmune bullous dermatoses (AIBD). This technique is the reference standard for diagnosis of AIBD, which is used worldwide in medical laboratories. For diagnosis of subepidermal AIBD (sAIBD), two different types of serrated pattern of immunodepositions can be recognized from DIF images, namely n- and u-serrated patterns. The n-serrated pattern is typically found in the most common sAIBD bullous pemphigoid. Presence of the u-serrated pattern indicates the sAIBD subtype epidermolysis bullosa acquisita (EBA), which has a different prognosis and requires a different treatment. The manual identification of these serrated patterns is learnable but challenging. We propose an automatic technique that is able to localize u-serrated patterns for automated computer-assisted diagnosis of EBA. The distinctive feature of u-serrated patterns as compared to n-serrated patterns is the presence of ridge-endings. We introduce a novel ridge-ending detector which uses inhibition-augmented trainable COSFIRE filters. Then, we apply a hierarchical clustering approach to detect the suspicious u-serrated patterns from the detected ridge-endings. For each detected u-serrated pattern we provide a score that indicates the reliability of its detection. In order to evaluate the proposed approach, we created a data set with 180 DIF images for serration pattern analysis. This data set consists of seven subsets which were obtained from various biopsy samples under different conditions. We achieve an average recognition rate of 82.2% of the u-serrated pattern on these 180 DIF images, which is comparable to the recognition rate achieved by experienced medical doctors and pathologists.},
keywords = {brain-inspired, medical image analysis, noise suppression, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Guo, Jiapan; Azzopardi, George; Shi, Chenyu; Jansonius, Nomdo M; Petkov, Nicolai
Automatic Determination of Vertical Cup-to-Disc Ratio in Retinal Fundus Images for Glaucoma Screening Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, trainable filters
@article{guo2019automatic,
title = {Automatic Determination of Vertical Cup-to-Disc Ratio in Retinal Fundus Images for Glaucoma Screening},
author = {Jiapan Guo and George Azzopardi and Chenyu Shi and Nomdo M Jansonius and Nicolai Petkov},
doi = {10.1109/ACCESS.2018.2890544},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {IEEE Access},
volume = {7},
pages = {8527--8541},
publisher = {IEEE},
abstract = {Glaucoma is a chronic progressive optic neuropathy that causes visual impairment or blindness if left untreated. It is crucial to diagnose it at an early stage in order to enable treatment. Fundus photography is a viable option for population-based screening. A fundus photograph enables the observation of the excavation of the optic disk\textemdashthe hallmark of glaucoma. The excavation is quantified as a vertical cup-to-disk ratio (VCDR). The manual assessment of retinal fundus images is, however, time-consuming and costly. Thus, an automated system is necessary to assist human observers. We propose a computer-aided diagnosis system, which consists of the localization of the optic disk, the determination of the height of the optic disk and the cup, and the computation of the VCDR. We evaluated the performance of our approach on eight publicly available datasets, which have, in total, 1712 retinal fundus images. We compared the obtained VCDR values with those provided by an experienced ophthalmologist and achieved a weighted VCDR mean difference of 0.11. The system provides a reliable estimation of the height of the optic disk and the cup in terms of the relative height error (RHE = 0.08 and 0.09, respectively). The Bland\textendashAltman analysis showed that the system achieves a good agreement with the manual annotations, especially for large VCDRs which indicate pathology.},
keywords = {brain-inspired, medical image analysis, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Neocleous, Andreas; Azzopardi, George; Dee, Michael
Identification of possible Δ14C anomalies since 14 ka BP: A computational intelligence approach Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: anomaly detection, brain-inspired, predictive analysis, time-series, trainable filters
@article{neocleous2019identification,
title = {Identification of possible Δ14C anomalies since 14 ka BP: A computational intelligence approach},
author = {Andreas Neocleous and George Azzopardi and Michael Dee},
doi = {10.1016/j.scitotenv.2019.01.251},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {Science of The Total Environment},
volume = {663},
pages = {162--169},
publisher = {Elsevier},
abstract = {Rapid increments in the concentration of the radiocarbon in the atmosphere (Δ14C) have been identified in the years 774-775 CE and 993-994 CE (Miyake events) using annual measurements on known-age tree-rings. The level of cosmic radiation implied by such increases could cause the failure of satellite telecommunication systems, and thus, there is a need to model and predict them. In this work, we investigated several intelligent computational methods to identify similar events in the past. We apply state-of-the-art pattern matching techniques as well as feature representation, a procedure that typically is used in machine learning and classification. To validate our findings, we used as ground truth the two confirmed Miyake events, and several other dates that have been proposed in the literature. We show that some of the methods used in this study successfully identify most of the ground truth events (~1% false positive rate at 75% true positive rate). Our results show that computational methods can be used to identify comparable patterns of interest and hence potentially uncover sudden increments of Δ14C in the past.},
keywords = {anomaly detection, brain-inspired, predictive analysis, time-series, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Neocleous, Andreas; Azzopardi, George; Kuitems, Margot; Scifo, Andrea; Dee, Michael
Trainable Filters for the Identification of Anomalies in Cosmogenic Isotope Data Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: anomaly detection, brain-inspired, predictive analysis, time-series, trainable filters
@article{neocleous2019trainable,
title = {Trainable Filters for the Identification of Anomalies in Cosmogenic Isotope Data},
author = {Andreas Neocleous and George Azzopardi and Margot Kuitems and Andrea Scifo and Michael Dee},
doi = {10.1109/ACCESS.2019.2900123},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {IEEE Access},
volume = {7},
pages = {24585--24592},
publisher = {IEEE},
abstract = {Extreme bursts of radiation from space result in rapid increases in the concentration of radiocarbon in the atmosphere. Such rises, known as Miyake Events, can be detected through the measurement of radiocarbon in dendrochronological archives. The identification of Miyake Events is important because radiation impacts of this magnitude pose an existential threat to satellite communications and aeronautical avionics and may even be detrimental to human health. However, at present, radiocarbon measurements on tree-ring archives are generally only available at decadal resolution, which smooths out the effect of a possible radiation burst. The Miyake Events discovered so far, in tree-rings from the years 3372-3371 BCE, 774-775 CE, and 993-994 CE, have essentially been found by chance, but there may be more. In this paper, we use signal processing techniques, in particular COSFIRE, to train filters with data on annual changes in radiocarbon (Δ 14 C) around those dates. Then, we evaluate the trained filters and attempt to detect similar Miyake Events in the past. The method that we propose is promising, since it identifies the known Miyake Events at a relatively low false positive rate. Using the findings of this paper, we propose a list of 26 calendar years that our system persistently indicates are Miyake Event-like. We are currently examining a short-list of five of the newly identified dates and intend to perform single-year radiocarbon measurements over them. Signal processing techniques, such as COSFIRE filters, can be used as guidance tools since they are able to identify similar patterns of interest, even if they vary in time or in amplitude.},
keywords = {anomaly detection, brain-inspired, predictive analysis, time-series, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Alsahaf, Ahmad; Azzopardi, George; Ducro, Bart; Hanenberg, Egiel; Veerkamp, Roel F; Petkov, Nicolai
Estimation of Muscle Scores of Live Pigs Using a Kinect Camera Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: image processing, pattern recognition, predictive analysis, smart farming
@article{alsahaf2019estimation,
title = {Estimation of Muscle Scores of Live Pigs Using a Kinect Camera},
author = {Ahmad Alsahaf and George Azzopardi and Bart Ducro and Egiel Hanenberg and Roel F Veerkamp and Nicolai Petkov},
doi = {10.1109/ACCESS.2019.2910986},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {IEEE Access},
volume = {7},
pages = {52238--52245},
publisher = {IEEE},
abstract = {The muscle grading of livestock is a primary component of valuation in the meat industry. In pigs, the muscularity of a live animal is traditionally estimated by visual and tactile inspection from an experienced assessor. In addition to being a time-consuming process, scoring of this kind suffers from inconsistencies inherent to the subjectivity of human assessment. On the other hand, accurate, computer-driven methods for carcass composition estimation, such as magnetic resonance imaging (MRI) and computed tomography scans (CT-scans), are expensive and cumbersome to both the animals and their handlers. In this paper, we propose a method that is fast, inexpensive, and non-invasive for estimating the muscularity of live pigs, using RGB-D computer vision and machine learning. We used morphological features extracted from the depth images of pigs to train a classifier that estimates the muscle scores that are likely to be given by a human assessor. The depth images were obtained from a Kinect v1 camera which was placed over an aisle through which the pigs passed freely. The data came from 3246 pigs, each having 20 depth images, and a muscle score from 1 to 7 (reduced later to 5 scores) assigned by an experienced assessor. The classification based on morphological features of the pig's body shape-using a gradient boosted classifier-resulted in a mean absolute error of 0.65 in tenfold cross-validation. Notably, the majority of the errors corresponded to pigs being classified as having muscle scores adjacent to the groundtruth labels given by the assessor. According to the end users of this application, the proposed approach could be used to replace expert assessors at the farm.},
keywords = {image processing, pattern recognition, predictive analysis, smart farming},
pubstate = {published},
tppubtype = {article}
}
Strisciuglio, Nicola; Azzopardi, George; Petkov, Nicolai
Robust Inhibition-augmented Operator for Delineation of Curvilinear Structures Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, noise suppression, trainable filters
@article{strisciuglio2019robust,
title = {Robust Inhibition-augmented Operator for Delineation of Curvilinear Structures},
author = {Nicola Strisciuglio and George Azzopardi and Nicolai Petkov},
doi = {10.1109/TIP.2019.2922096},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {IEEE Transactions on Image Processing},
volume = {28},
number = {12},
pages = {5852--5866},
publisher = {IEEE},
abstract = {Delineation of curvilinear structures in images is an important basic step of several image processing applications, such as segmentation of roads or rivers in aerial images, vessels or staining membranes in medical images, and cracks in pavements and roads, among others. Existing methods suffer from insufficient robustness to noise. In this paper, we propose a novel operator for the detection of curvilinear structures in images, which we demonstrate to be robust to various types of noise and effective in several applications. We call it RUSTICO, which stands for RobUST Inhibition-augmented Curvilinear Operator. It is inspired by the push-pull inhibition in visual cortex and takes as input the responses of two trainable B-COSFIRE filters of opposite polarity. The output of RUSTICO consists of a magnitude map and an orientation map. We carried out experiments on a data set of synthetic stimuli with noise drawn from different distributions, as well as on several benchmark data sets of retinal fundus images, crack pavements, and aerial images and a new data set of rose bushes used for automatic gardening. We evaluated the performance of RUSTICO by a metric that considers the structural properties of line networks (connectivity, area, and length) and demonstrated that RUSTICO outperforms many existing methods with high statistical significance. RUSTICO exhibits high robustness to noise and texture.},
keywords = {brain-inspired, contour detection, noise suppression, trainable filters},
pubstate = {published},
tppubtype = {article}
}
2018
Azzopardi, George; Greco, Antonio; Saggese, Alessia; Vento, Mario
Fusion of domain-specific and trainable features for gender recognition from face images Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, face analysis, trainable filters
@article{azzopardi2018fusion,
title = {Fusion of domain-specific and trainable features for gender recognition from face images},
author = {George Azzopardi and Antonio Greco and Alessia Saggese and Mario Vento},
doi = {10.1109/ACCESS.2018.2823378},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {IEEE Access},
volume = {6},
pages = {24171--24183},
publisher = {IEEE},
abstract = {The popularity and the appeal of systems which are able to automatically determine the gender from face images are growing rapidly. Such a great interest arises from the wide variety of applications, especially in the fields of retail and video surveillance. In recent years, there have been several attempts to address this challenge, but a definitive solution has not yet been found. In this paper, we propose a novel approach that fuses domain-specific and trainable features to recognize the gender from face images. In particular, we use the SURF descriptors extracted from 51 facial landmarks related to eyes, nose, and mouth as domain-dependent features, and the COSFIRE filters as trainable features. The proposed approach turns out to be very robust with respect to the well-known face variations, including different poses, expressions, and illumination conditions. It achieves state-of-the-art recognition rates on the GENDER-FERET (94.7%) and on the labeled faces in the wild (99.4%) data sets, which are two of the most popular benchmarks for gender recognition. We further evaluated the method on a new data set acquired in real scenarios, the UNISA-Public, recently made publicly available. It consists of 206 training (144 male, 62 female) and 200 test (139 male, 61 female) images that are acquired with a real-time indoor camera capturing people in regular walking motion. Such experiment has the aim to assess the capability of the algorithm to deal with face images extracted from videos, which are definitely more challenging than the still images available in the standard data sets. Also for this data set, we achieved a high recognition rate of 91.5%, that confirms the generalization capabilities of the proposed approach. Of the two types of features, the trainable COSFIRE filters are the most effective and, given their trainable character, they can be applied in any visual pattern recognition problem.},
keywords = {convnets, deep learning, face analysis, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Alsahaf, Ahmad; Azzopardi, George; Ducro, Bart; Hanenberg, Egiel; Veerkamp, Roel F; Petkov, Nicolai
Prediction of slaughter age in pigs and assessment of the predictive value of phenotypic and genetic information using random forest Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: predictive analysis, smart farming
@article{alsahaf2018prediction,
title = {Prediction of slaughter age in pigs and assessment of the predictive value of phenotypic and genetic information using random forest},
author = {Ahmad Alsahaf and George Azzopardi and Bart Ducro and Egiel Hanenberg and Roel F Veerkamp and Nicolai Petkov},
doi = {https://doi.org/10.1093/jas/sky359},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {Journal of animal science},
volume = {96},
number = {12},
pages = {4935--4943},
publisher = {Oxford University Press US},
abstract = {The weight of a pig and the rate of its growth are key elements in pig production. In particular, predicting future growth is extremely useful, since it can help in determining feed costs, pen space requirements, and the age at which a pig reaches a desired slaughter weight. However, making these predictions is challenging, due to the natural variation in how individual pigs grow, and the different causes of this variation. In this paper, we used machine learning, namely random forest (RF) regression, for predicting the age at which the slaughter weight of 120 kg is reached. Additionally, we used the variable importance score from RF to quantify the importance of different types of input data for that prediction. Data of 32,979 purebred Large White pigs were provided by Topigs Norsvin, consisting of phenotypic data, estimated breeding values (EBVs), along with pedigree and pedigree-genetic relationships. Moreover, we presented a 2-step data reduction procedure, based on random projections (RPs) and principal component analysis (PCA), to extract features from the pedigree and genetic similarity matrices for use as inputs in the prediction models. Our results showed that relevant phenotypic features were the most effective in predicting the output (age at 120 kg), explaining approximately 62% of its variance (i.e., R2 = 0.62). Estimated breeding value, pedigree, or pedigree-genetic features interchangeably explain 2% of additional variance when added to the phenotypic features, while explaining, respectively, 38%, 39%, and 34% of the variance when used separately.},
keywords = {predictive analysis, smart farming},
pubstate = {published},
tppubtype = {article}
}
2017
Fernández-Robles, Laura; Azzopardi, George; Alegre, Enrique; Petkov, Nicolai
Machine-vision-based identification of broken inserts in edge profile milling heads Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: machine vision, pattern recognition, visual quality inspection
@article{Fernandez2017,
title = {Machine-vision-based identification of broken inserts in edge profile milling heads},
author = {Laura Fern\'{a}ndez-Robles and George Azzopardi and Enrique Alegre and Nicolai Petkov},
doi = {https://doi.org/10.1016/j.rcim.2016.10.004},
year = {2017},
date = {2017-04-01},
urldate = {2017-04-01},
journal = {Robotics and Computer-Integrated Manufacturing},
volume = {44},
pages = {276-283},
abstract = {This paper presents a reliable machine vision system to automatically detect inserts and determine if they are broken. Unlike the machining operations studied in the literature, we are dealing with edge milling head tools for aggressive machining of thick plates (up to 12 centimetres) in a single pass. The studied cutting head tool is characterised by its relatively high number of inserts (up to 30) which makes the localisation of inserts a key aspect. The identification of broken inserts is critical for a proper tool monitoring system. In the method that we propose, we first localise the screws of the inserts and then we determine the expected position and orientation of the cutting edge by applying some geometrical operations. We compute the deviations from the expected cutting edge to the real edge of the inserts to determine if an insert is broken. We evaluated the proposed method on a new dataset that we acquired and made public. The obtained result (a harmonic mean of precision and recall 91.43%) shows that the machine vision system that we present is effective and suitable for the identification of broken inserts in machining head tools and ready to be installed in an on-line system.},
keywords = {machine vision, pattern recognition, visual quality inspection},
pubstate = {published},
tppubtype = {article}
}
Gecer, Baris; Azzopardi, George; Petkov, Nicolai
Color-blob-based COSFIRE filters for object recognition Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, image classification, pattern recognition, trainable filters
@article{gecer2017color,
title = {Color-blob-based COSFIRE filters for object recognition},
author = {Baris Gecer and George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1016/j.imavis.2016.10.006},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
journal = {Image and Vision Computing},
volume = {57},
pages = {165--174},
publisher = {Elsevier},
abstract = {Most object recognition methods rely on contour-defined features obtained by edge detection or region segmentation. They are not robust to diffuse region boundaries. Furthermore, such methods do not exploit region color information. We propose color-blob-based COSFIRE (Combination of Shifted Filter Responses) filters to be selective for combinations of diffuse circular regions (blobs) in specific mutual spatial arrangements. Such a filter combines the responses of a certain selection of Difference-of-Gaussians filters, essentially blob detectors, of different scales, in certain channels of a color space, and at certain relative positions to each other. Its parameters are determined/learned in an automatic configuration process that analyzes the properties of a given prototype object of interest. We use these filters to compute features that are effective for the recognition of the prototype objects. We form feature vectors that we use with an SVM classifier. We evaluate the proposed method on a traffic sign (GTSRB) and a butterfly data sets. For the GTSRB data set we achieve a recognition rate of 98.94%, which is slightly higher than human performance and for the butterfly data set we achieve 89.02%. The proposed color-blob-based COSFIRE filters are very effective and outperform the contour-based COSFIRE filters. A COSFIRE filter is trainable, it can be configured with a single prototype pattern and it does not require domain knowledge.},
keywords = {brain-inspired, image classification, pattern recognition, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Guo, J; Shi, C; Azzopardi, G; Petkov, N
Inhibition-augmented COSFIRE model of shape-selective neurons Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, noise suppression, trainable filters
@article{guo2017inhibition,
title = {Inhibition-augmented COSFIRE model of shape-selective neurons},
author = {J Guo and C Shi and G Azzopardi and N Petkov},
doi = {10.1147/JRD.2017.2679458},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
journal = {IBM Journal of Research and Development},
volume = {61},
number = {2/3},
pages = {1-9},
publisher = {IBM},
abstract = {Inhibition is a phenomenon that occurs in different areas of the brain, including the visual cortex. For instance, the responses of some shape-selective neurons in the inferotemporal cortex are suppressed by the presence of certain shape contour parts in their receptive fields. This suppression phenomenon is thought to increase the selectivity of such neurons. We propose an inhibition-augmented model of shape-selective neurons, as an advancement of the trainable filter approach called combination of shifted filter responses (COSFIRE). We use a positive prototype pattern and a set of negative prototype patterns to automatically configure an inhibition-augmented model. The configuration involves the selection of responses of a bank of Gabor filters (models of V1/V2 neurons) that provide excitatory or inhibitory input(s). We compute the output of the model as the excitatory input minus a fraction of the maximum of the inhibitory inputs. The configured model responds to patterns that are similar to the positive prototype but does not respond to patterns similar to the negative prototype(s). We demonstrate the effectiveness of the proposed model in shape recognition. We use the Graphics Recognition (GREC2011) benchmark dataset and demonstrate that the proposed inhibition-augmented modeling technique increases selectivity of the COSFIRE model.},
keywords = {brain-inspired, noise suppression, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Fernández-Robles, Laura; Azzopardi, George; Alegre, Enrique; Petkov, Nicolai; Castejón-Lima, Manuel
Identification of milling inserts in situ based on a versatile machine vision system Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, machine vision, trainable filters, visual quality inspection
@article{fernandez2017identification,
title = {Identification of milling inserts in situ based on a versatile machine vision system},
author = {Laura Fern\'{a}ndez-Robles and George Azzopardi and Enrique Alegre and Nicolai Petkov and Manuel Castej\'{o}n-Lima},
doi = {https://doi.org/10.1016/j.jmsy.2017.08.002},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
journal = {Journal of Manufacturing Systems},
volume = {45},
pages = {48-57},
publisher = {2017},
abstract = {This paper proposes a novel method for in situ localization of multiple inserts by means of machine vision techniques, a challenging issue in the field of tool wear monitoring. Most existing research works focus on evaluating the wear of isolated inserts after been manually extracted from the head tool. The method proposed solves this issue of paramount importance, as it frees the operator from continuously monitoring the machining process and allows the machine to continue operating without extracting the milling head for wear evaluation. We use trainable COSFIRE filters without requiring any manual intervention. This trainable approach is more versatile and generic than previous works on the topic, as it is not based on, and does not require, any domain knowledge. This allows an automatic application of the method to new machines without the need of specific knowledge on machine vision. We use an experimental dataset that we published to test the effectiveness of the method. We achieved very good performance with an F1 score of 0.9674, in the identification of multiple milling head inserts. The proposed approach can be considered as a general framework for the localization and identification of machining pieces from images taken from mechanical monitoring systems.},
keywords = {brain-inspired, machine vision, trainable filters, visual quality inspection},
pubstate = {published},
tppubtype = {article}
}
2016
Guo, Jiapan; Shi, Chenyu; Azzopardi, George; Petkov, Nicolai
Inhibition-augmented trainable COSFIRE filters for keypoint detection and object recognition Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, keypoint detection, noise suppression, object detection, trainable filters
@article{guo2016inhibition,
title = {Inhibition-augmented trainable COSFIRE filters for keypoint detection and object recognition},
author = {Jiapan Guo and Chenyu Shi and George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1007/s00138-016-0777-3},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
journal = {Machine Vision and Applications},
volume = {27},
pages = {1197-1211},
publisher = {Springer Berlin Heidelberg},
abstract = {The shape and meaning of an object can radically change with the addition of one or more contour parts. For instance, a T-junction can become a crossover. We extend the COSFIRE trainable filter approach which uses a positive prototype pattern for configuration by adding a set of negative prototype patterns. The configured filter responds to patterns that are similar to the positive prototype but not to any of the negative prototypes. The configuration of such a filter comprises selecting given channels of a bank of Gabor filters that provide excitatory or inhibitory input and determining certain blur and shift parameters. We compute the response of such a filter as the excitatory input minus a fraction of the maximum of inhibitory inputs. We use three applications to demonstrate the effectiveness of inhibition: the exclusive detection of vascular bifurcations (i.e., without crossovers) in retinal fundus images (DRIVE data set), the recognition of architectural and electrical symbols (GREC’11 data set) and the recognition of handwritten digits (MNIST data set).},
keywords = {brain-inspired, keypoint detection, noise suppression, object detection, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Strisciuglio, Nicola; Azzopardi, George; Vento, Mario; Petkov, Nicolai
Supervised vessel delineation in retinal fundus images with the automatic selection of B-COSFIRE filters Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, segmentation, trainable filters
@article{strisciuglio2016supervised,
title = {Supervised vessel delineation in retinal fundus images with the automatic selection of B-COSFIRE filters},
author = {Nicola Strisciuglio and George Azzopardi and Mario Vento and Nicolai Petkov},
doi = {https://doi.org/10.1007/s00138-016-0781-7},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
journal = {Machine Vision and Applications},
publisher = {Springer Berlin Heidelberg},
abstract = {The inspection of retinal fundus images allows medical doctors to diagnose various pathologies. Computer-aided diagnosis systems can be used to assist in this process. As a first step, such systems delineate the vessel tree from the background. We propose a method for the delineation of blood vessels in retinal images that is effective for vessels of different thickness. In the proposed method, we employ a set of B-COSFIRE filters selective for vessels and vessel-endings. Such a set is determined in an automatic selection process and can adapt to different applications. We compare the performance of different selection methods based upon machine learning and information theory. The results that we achieve by performing experiments on two public benchmark data sets, namely DRIVE and STARE, demonstrate the effectiveness of the proposed approach.},
keywords = {brain-inspired, medical image analysis, segmentation, trainable filters},
pubstate = {published},
tppubtype = {article}
}
2015
Azzopardi, George; Strisciuglio, Nicola; Vento, Mario; Petkov, Nicolai
Trainable COSFIRE filters for vessel delineation with application to retinal images Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, segmentation, trainable filters
@article{azzopardi2015trainable,
title = {Trainable COSFIRE filters for vessel delineation with application to retinal images},
author = {George Azzopardi and Nicola Strisciuglio and Mario Vento and Nicolai Petkov},
doi = {https://doi.org/10.1016/j.media.2014.08.002},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
journal = {Medical image analysis},
volume = {19},
number = {1},
pages = {46--57},
publisher = {Elsevier},
abstract = {Retinal imaging provides a non-invasive opportunity for the diagnosis of several medical pathologies. The automatic segmentation of the vessel tree is an important pre-processing step which facilitates subsequent automatic processes that contribute to such diagnosis.
We introduce a novel method for the automatic segmentation of vessel trees in retinal fundus images. We propose a filter that selectively responds to vessels and that we call B-COSFIRE with B standing for bar which is an abstraction for a vessel. It is based on the existing COSFIRE (Combination Of Shifted Filter Responses) approach. A B-COSFIRE filter achieves orientation selectivity by computing the weighted geometric mean of the output of a pool of Difference-of-Gaussians filters, whose supports are aligned in a collinear manner. It achieves rotation invariance efficiently by simple shifting operations. The proposed filter is versatile as its selectivity is determined from any given vessel-like prototype pattern in an automatic configuration process. We configure two B-COSFIRE filters, namely symmetric and asymmetric, that are selective for bars and bar-endings, respectively. We achieve vessel segmentation by summing up the responses of the two rotation-invariant B-COSFIRE filters followed by thresholding.
The results that we achieve on three publicly available data sets (DRIVE: Se = 0.7655, Sp = 0.9704; STARE: Se = 0.7716, Sp = 0.9701; CHASE_DB1: Se = 0.7585, Sp = 0.9587) are higher than many of the state-of-the-art methods. The proposed segmentation approach is also very efficient with a time complexity that is significantly lower than existing methods.},
keywords = {brain-inspired, medical image analysis, segmentation, trainable filters},
pubstate = {published},
tppubtype = {article}
}
We introduce a novel method for the automatic segmentation of vessel trees in retinal fundus images. We propose a filter that selectively responds to vessels and that we call B-COSFIRE with B standing for bar which is an abstraction for a vessel. It is based on the existing COSFIRE (Combination Of Shifted Filter Responses) approach. A B-COSFIRE filter achieves orientation selectivity by computing the weighted geometric mean of the output of a pool of Difference-of-Gaussians filters, whose supports are aligned in a collinear manner. It achieves rotation invariance efficiently by simple shifting operations. The proposed filter is versatile as its selectivity is determined from any given vessel-like prototype pattern in an automatic configuration process. We configure two B-COSFIRE filters, namely symmetric and asymmetric, that are selective for bars and bar-endings, respectively. We achieve vessel segmentation by summing up the responses of the two rotation-invariant B-COSFIRE filters followed by thresholding.
The results that we achieve on three publicly available data sets (DRIVE: Se = 0.7655, Sp = 0.9704; STARE: Se = 0.7716, Sp = 0.9701; CHASE_DB1: Se = 0.7585, Sp = 0.9587) are higher than many of the state-of-the-art methods. The proposed segmentation approach is also very efficient with a time complexity that is significantly lower than existing methods.
2014
Azzopardi, George; Petkov, Nicolai
Ventral-stream-like shape representation: from pixel intensity values to trainable object-selective COSFIRE models Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, object detection, trainable filters
@article{azzopardi2014ventral,
title = {Ventral-stream-like shape representation: from pixel intensity values to trainable object-selective COSFIRE models},
author = {George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.3389/fncom.2014.00080},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
journal = {Frontiers in computational neuroscience},
volume = {8},
pages = {80},
publisher = {Frontiers},
abstract = {The remarkable abilities of the primate visual system have inspired the construction of computational models of some visual neurons. We propose a trainable hierarchical object recognition model, which we call S-COSFIRE (S stands for Shape and COSFIRE stands for Combination Of Shifted FIlter REsponses) and use it to localize and recognize objects of interests embedded in complex scenes. It is inspired by the visual processing in the ventral stream (V1/V2 → V4 → TEO). Recognition and localization of objects embedded in complex scenes is important for many computer vision applications. Most existing methods require prior segmentation of the objects from the background which on its turn requires recognition. An S-COSFIRE filter is automatically configured to be selective for an arrangement of contour-based features that belong to a prototype shape specified by an example. The configuration comprises selecting relevant vertex detectors and determining certain blur and shift parameters. The response is computed as the weighted geometric mean of the blurred and shifted responses of the selected vertex detectors. S-COSFIRE filters share similar properties with some neurons in inferotemporal cortex, which provided inspiration for this work. We demonstrate the effectiveness of S-COSFIRE filters in two applications: letter and keyword spotting in handwritten manuscripts and object spotting in complex scenes for the computer vision system of a domestic robot. S-COSFIRE filters are effective to recognize and localize (deformable) objects in images of complex scenes without requiring prior segmentation. They are versatile trainable shape detectors, conceptually simple and easy to implement. The presented hierarchical shape representation contributes to a better understanding of the brain and to more robust computer vision algorithms.},
keywords = {brain-inspired, object detection, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Azzopardi, George; Rodríguez-Sánchez, Antonio; Piater, Justus; Petkov, Nicolai
A push-pull CORF model of a simple cell with antiphase inhibition improves SNR and contour detection Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, noise suppression, trainable filters
@article{azzopardi2014push,
title = {A push-pull CORF model of a simple cell with antiphase inhibition improves SNR and contour detection},
author = {George Azzopardi and Antonio Rodr\'{i}guez-S\'{a}nchez and Justus Piater and Nicolai Petkov},
doi = {https://doi.org/10.1371/journal.pone.0098424},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
journal = {PLoS One},
publisher = {2014},
abstract = {We propose a computational model of a simple cell with push-pull inhibition, a property that is observed in many real simple cells. It is based on an existing model called Combination of Receptive Fields or CORF for brevity. A CORF model uses as afferent inputs the responses of model LGN cells with appropriately aligned center-surround receptive fields, and combines their output with a weighted geometric mean. The output of the proposed model simple cell with push-pull inhibition, which we call push-pull CORF, is computed as the response of a CORF model cell that is selective for a stimulus with preferred orientation and preferred contrast minus a fraction of the response of a CORF model cell that responds to the same stimulus but of opposite contrast. We demonstrate that the proposed push-pull CORF model improves signal-to-noise ratio (SNR) and achieves further properties that are observed in real simple cells, namely separability of spatial frequency and orientation as well as contrast-dependent changes in spatial frequency tuning. We also demonstrate the effectiveness of the proposed push-pull CORF model in contour detection, which is believed to be the primary biological role of simple cells. We use the RuG (40 images) and Berkeley (500 images) benchmark data sets of images with natural scenes and show that the proposed model outperforms, with very high statistical significance, the basic CORF model without inhibition, Gabor-based models with isotropic surround inhibition, and the Canny edge detector. The push-pull CORF model that we propose is a contribution to a better understanding of how visual information is processed in the brain as it provides the ability to reproduce a wider range of properties exhibited by real simple cells. As a result of push-pull inhibition a CORF model exhibits an improved SNR, which is the reason for a more effective contour detection.},
keywords = {brain-inspired, contour detection, noise suppression, trainable filters},
pubstate = {published},
tppubtype = {article}
}
2013
Azzopardi, George; Petkov, Nicolai
Automatic detection of vascular bifurcations in segmented retinal images using trainable COSFIRE filters Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, keypoint detection, medical image analysis, trainable filters
@article{azzopardi2013automatic,
title = {Automatic detection of vascular bifurcations in segmented retinal images using trainable COSFIRE filters},
author = {George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1016/j.patrec.2012.11.002},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
journal = {Pattern Recognition Letters},
volume = {34},
number = {8},
pages = {922--933},
publisher = {North-Holland},
abstract = {Background: The vascular tree observed in a retinal fundus image can provide clues for cardiovascular diseases. Its analysis requires the identification of vessel bifurcations and crossovers.
Methods: We use a set of trainable keypoint detectors that we call Combination Of Shifted FIlter REsponses or COSFIRE filters to automatically detect vascular bifurcations in segmented retinal images. We configure a set of COSFIRE filters that are selective for a number of prototype bifurcations and demonstrate that such filters can be effectively used to detect bifurcations that are similar to the prototypical ones. The automatic configuration of such a filter selects given channels of a bank of Gabor filters and determines certain blur and shift parameters. The response of a COSFIRE filter is computed as the weighted geometric mean of the blurred and shifted responses of the selected Gabor filters. The COSFIRE approach is inspired by the function of a specific type of shape-selective neuron in area V4 of visual cortex.
Results: We ran experiments on three data sets and achieved the following results: (a) a recall of 97.88% at precision of 96.94% on 40 manually segmented images provided in the DRIVE data set, (b) a recall of 97.32% at precision of 96.04% on 20 manually segmented images provided in the STARE data set, and (c) a recall of 97.02% at precision of 96.53% on a set of 10 automatically segmented images obtained from images in the DRIVE data set.
Conclusions: The COSFIRE filters that we use are conceptually simple and easy to implement: the filter output is computed as the weighted geometric mean of blurred and shifted Gabor filter responses. They are versatile keypoint detectors as they can be configured with any given local contour pattern and are subsequently able to detect the same and similar patterns.},
keywords = {brain-inspired, keypoint detection, medical image analysis, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Methods: We use a set of trainable keypoint detectors that we call Combination Of Shifted FIlter REsponses or COSFIRE filters to automatically detect vascular bifurcations in segmented retinal images. We configure a set of COSFIRE filters that are selective for a number of prototype bifurcations and demonstrate that such filters can be effectively used to detect bifurcations that are similar to the prototypical ones. The automatic configuration of such a filter selects given channels of a bank of Gabor filters and determines certain blur and shift parameters. The response of a COSFIRE filter is computed as the weighted geometric mean of the blurred and shifted responses of the selected Gabor filters. The COSFIRE approach is inspired by the function of a specific type of shape-selective neuron in area V4 of visual cortex.
Results: We ran experiments on three data sets and achieved the following results: (a) a recall of 97.88% at precision of 96.94% on 40 manually segmented images provided in the DRIVE data set, (b) a recall of 97.32% at precision of 96.04% on 20 manually segmented images provided in the STARE data set, and (c) a recall of 97.02% at precision of 96.53% on a set of 10 automatically segmented images obtained from images in the DRIVE data set.
Conclusions: The COSFIRE filters that we use are conceptually simple and easy to implement: the filter output is computed as the weighted geometric mean of blurred and shifted Gabor filter responses. They are versatile keypoint detectors as they can be configured with any given local contour pattern and are subsequently able to detect the same and similar patterns.
2012
Azzopardi, George; Petkov, Nicolai
A CORF computational model of a simple cell that relies on LGN input outperforms the Gabor function model Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection
@article{azzopardi2012corf,
title = {A CORF computational model of a simple cell that relies on LGN input outperforms the Gabor function model},
author = {George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1007/s00422-012-0486-6},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
journal = {Biological cybernetics},
volume = {106},
pages = {177-189},
publisher = {Springer-Verlag},
abstract = {Simple cells in primary visual cortex are believed to extract local contour information from a visual scene. The 2D Gabor function (GF) model has gained particular popularity as a computational model of a simple cell. However, it short-cuts the LGN, it cannot reproduce a number of properties of real simple cells, and its effectiveness in contour detection tasks has never been compared with the effectiveness of alternative models. We propose a computational model that uses as afferent inputs the responses of model LGN cells with center\textendashsurround receptive fields (RFs) and we refer to it as a Combination of Receptive Fields (CORF) model. We use shifted gratings as test stimuli and simulated reverse correlation to explore the nature of the proposed model. We study its behavior regarding the effect of contrast on its response and orientation bandwidth as well as the effect of an orthogonal mask on the response to an optimally oriented stimulus. We also evaluate and compare the performances of the CORF and GF models regarding contour detection, using two public data sets of images of natural scenes with associated contour ground truths. The RF map of the proposed CORF model, determined with simulated reverse correlation, can be divided in elongated excitatory and inhibitory regions typical of simple cells. The modulated response to shifted gratings that this model shows is also characteristic of a simple cell. Furthermore, the CORF model exhibits cross orientation suppression, contrast invariant orientation tuning and response saturation. These properties are observed in real simple cells, but are not possessed by the GF model. The proposed CORF model outperforms the GF model in contour detection with high statistical confidence (RuG data set: p < 10−4, and Berkeley data set: p < 10−4). The proposed CORF model is more realistic than the GF model and is more effective in contour detection, which is assumed to be the primary biological role of simple cells.},
keywords = {brain-inspired, contour detection},
pubstate = {published},
tppubtype = {article}
}
Azzopardi, George; Petkov, Nicolai
Trainable COSFIRE filters for keypoint detection and pattern recognition Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, image classification, keypoint detection, object detection, segmentation, trainable filters
@article{azzopardi2013trainable,
title = {Trainable COSFIRE filters for keypoint detection and pattern recognition},
author = {George Azzopardi and Nicolai Petkov},
doi = {10.1109/TPAMI.2012.106},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
journal = { IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume = {35},
issue = {2},
pages = {490 - 503},
publisher = {IEEE},
abstract = {Background: Keypoint detection is important for many computer vision applications. Existing methods suffer from insufficient selectivity regarding the shape properties of features and are vulnerable to contrast variations and to the presence of noise or texture. Methods: We propose a trainable filter which we call Combination Of Shifted FIlter REsponses (COSFIRE) and use for keypoint detection and pattern recognition. It is automatically configured to be selective for a local contour pattern specified by an example. The configuration comprises selecting given channels of a bank of Gabor filters and determining certain blur and shift parameters. A COSFIRE filter response is computed as the weighted geometric mean of the blurred and shifted responses of the selected Gabor filters. It shares similar properties with some shape-selective neurons in visual cortex, which provided inspiration for this work. Results: We demonstrate the effectiveness of the proposed filters in three applications: the detection of retinal vascular bifurcations (DRIVE dataset: 98.50 percent recall, 96.09 percent precision), the recognition of handwritten digits (MNIST dataset: 99.48 percent correct classification), and the detection and recognition of traffic signs in complex scenes (100 percent recall and precision). Conclusions: The proposed COSFIRE filters are conceptually simple and easy to implement. They are versatile keypoint detectors and are highly effective in practical computer vision applications.},
keywords = {brain-inspired, image classification, keypoint detection, object detection, segmentation, trainable filters},
pubstate = {published},
tppubtype = {article}
}