2024
Trienko Grobler Steven Ndung’u, Stefan J. Wijnholds; Azzopardi, George
Classification of Radio Galaxies with trainable COSFIRE filters Journal Article Forthcoming
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, radioastronomy, trainable filters
@article{Ndung’u2024,
title = {Classification of Radio Galaxies with trainable COSFIRE filters},
author = {Steven Ndung’u, Trienko Grobler, Stefan J. Wijnholds, Dimka Karastoyanova, and George Azzopardi},
doi = {https://doi.org/10.1093/mnras/stae821},
year = {2024},
date = {2024-03-18},
urldate = {2024-03-18},
journal = {Monthly Notices of the Royal Astronomical Society},
abstract = {Radio galaxies exhibit a rich diversity of morphological characteristics, which make their classification into distinct types a complex challenge. To address this challenge effectively, we introduce an innovative approach for radio galaxy classification using COSFIRE filters. These filters possess the ability to adapt to both the shape and orientation of prototype patterns within images. The COSFIRE approach is explainable, learning-free, rotation-tolerant, efficient, and does not require a large training set. To assess the efficacy of our method, we conducted experiments on a benchmark radio galaxy data set comprising of 1180 training samples and 404 test samples. Notably, our approach achieved an average accuracy rate of 93.36%. This achievement outperforms contemporary deep learning models, and it is the best result ever achieved on this data set. Additionally, COSFIRE filters offer better computational performance, ∼20× fewer operations than the DenseNet-based competing method (when comparing at the same accuracy). Our findings underscore the effectiveness of the COSFIRE filter-based approach in addressing the complexities associated with radio galaxy classification. This research contributes to advancing the field by offering a robust solution that transcends the orientation challenges intrinsic to radio galaxy observations. Our method is versatile in that it is applicable to various image classification approaches.},
keywords = {brain-inspired, radioastronomy, trainable filters},
pubstate = {forthcoming},
tppubtype = {article}
}
2022
Bhole, Amey; Udmale, Sandeep S; Falzon, Owen; Azzopardi, George
CORF3D contour maps with application to Holstein cattle recognition from RGB and thermal images Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, convnets, deep learning, noise suppression, pattern recognition, smart farming
@article{bhole2022corf3d,
title = {CORF3D contour maps with application to Holstein cattle recognition from RGB and thermal images},
author = {Amey Bhole and Sandeep S Udmale and Owen Falzon and George Azzopardi},
doi = {https://doi.org/10.1016/j.eswa.2021.116354},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Expert Systems with Applications},
volume = {192},
number = {116354},
publisher = {Pergamon},
abstract = {Livestock management involves the monitoring of farm animals by tracking certain physiological and phenotypical characteristics over time. In the dairy industry, for instance, cattle are typically equipped with RFID ear tags. The corresponding data (e.g. milk properties) can then be automatically assigned to the respective cow when they enter the milking station. In order to move towards a more scalable, affordable, and welfare-friendly approach, automatic non-invasive solutions are more desirable. Thus, a non-invasive approach is proposed in this paper for the automatic identification of individual Holstein cattle from the side view while exiting a milking station. It considers input images from a thermal-RGB camera. The thermal images are used to delineate the cow from the background. Subsequently, any occluding rods from the milking station are removed and inpainted with the fast marching algorithm. Then, it extracts the RGB map of the segmented cattle along with a novel CORF3D contour map. The latter contains three contour maps extracted by the Combination of Receptive Fields (CORF) model with different strengths of push\textendashpull inhibition. This mechanism suppresses noise in the form of grain type texture. The effectiveness of the proposed approach is demonstrated by means of experiments using a 5-fold and a leave-one day-out cross-validation on a new data set of 3694 images of 383 cows collected from the Dairy Campus in Leeuwarden (the Netherlands) over 9 days. In particular, when combining RGB and CORF3D maps by late fusion, an average accuracy of was obtained for the 5-fold cross validation and for the leave-one day-out experiment. The two maps were combined by first learning two ConvNet classification models, one for each type of map. The feature vectors in the two FC layers obtained from training images were then concatenated and used to learn a linear SVM classification model. In principle, the proposed approach with the novel CORF3D contour maps is suitable for various image classification applications, especially where grain type texture is a confounding variable.},
keywords = {brain-inspired, contour detection, convnets, deep learning, noise suppression, pattern recognition, smart farming},
pubstate = {published},
tppubtype = {article}
}
2020
Ramachandran, Sivakumar; Strisciuglio, Nicola; Vinekar, Anand; John, Renu; Azzopardi, George
U-COSFIRE filters for vessel tortuosity quantification with application to automated diagnosis of retinopathy of prematurity Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, trainable filters
@article{ramachandran2020u,
title = {U-COSFIRE filters for vessel tortuosity quantification with application to automated diagnosis of retinopathy of prematurity},
author = {Sivakumar Ramachandran and Nicola Strisciuglio and Anand Vinekar and Renu John and George Azzopardi},
doi = {https://doi.org/10.1007/s00521-019-04697-6},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Neural Computing and Applications},
volume = {32},
number = {16},
pages = {12453--12468},
publisher = {Springer London},
abstract = {Retinopathy of prematurity (ROP) is a sight threatening disorder that primarily affects preterm infants. It is the major reason for lifelong vision impairment and childhood blindness. Digital fundus images of preterm infants obtained from a Retcam Ophthalmic Imaging Device are typically used for ROP screening. ROP is often accompanied by Plus disease that is characterized by high levels of arteriolar tortuosity and venous dilation. The recent diagnostic procedures view the prevalence of Plus disease as a factor of prognostic significance in determining its stage, progress and severity. Our aim is to develop a diagnostic method, which can distinguish images of retinas with ROP from healthy ones and that can be interpreted by medical experts. We investigate the quantification of retinal blood vessel tortuosity via a novel U-COSFIRE (Combination Of Shifted Filter Responses) filter and propose a computer-aided diagnosis tool for automated ROP detection. The proposed methodology involves segmentation of retinal blood vessels using a set of B-COSFIRE filters with different scales followed by the detection of tortuous vessels in the obtained vessel map by means of U-COSFIRE filters. We also compare our proposed technique with an angle-based diagnostic method that utilizes the magnitude and orientation responses of the multi-scale B-COSFIRE filters. We carried out experiments on a new data set of 289 infant retinal images (89 with ROP and 200 healthy) that we collected from the programme in India called KIDROP (Karnataka Internet Assisted Diagnosis of Retinopathy of Prematurity). We used 10 images (5 with ROP and 5 healthy) for learning the parameters of our methodology and the remaining 279 images (84 with ROP and 195 healthy) for performance evaluation. We achieved sensitivity and specificity equal to 0.98 and 0.97, respectively, computed on the 279 test images. The obtained results and its explainable character demonstrate the effectiveness of the proposed approach to assist medical experts.},
keywords = {brain-inspired, medical image analysis, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Melotti, Damiano; Heimbach, Kevin; Rodríguez-Sánchez, Antonio; Strisciuglio, Nicola; Azzopardi, George
A robust contour detection operator with combined push-pull inhibition and surround suppression Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, noise suppression
@article{melotti2020robust,
title = {A robust contour detection operator with combined push-pull inhibition and surround suppression},
author = {Damiano Melotti and Kevin Heimbach and Antonio Rodr\'{i}guez-S\'{a}nchez and Nicola Strisciuglio and George Azzopardi},
doi = {https://doi.org/10.1016/j.ins.2020.03.026},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Information Sciences},
volume = {524},
pages = {229-240},
publisher = {2020},
abstract = {Contour detection is a salient operation in many computer vision applications as it extracts features that are important for distinguishing objects in scenes. It is believed to be a primary role of simple cells in visual cortex of the mammalian brain. Many of such cells receive push-pull inhibition or surround suppression. We propose a computational model that exhibits a combination of these two phenomena. It is based on two existing models, which have been proven to be very effective for contour detection. In particular, we introduce a brain-inspired contour operator that combines push-pull and surround inhibition. It turns out that this combination results in a more effective contour detector, which suppresses texture while keeping the strongest responses to lines and edges, when compared to existing models. The proposed model consists of a Combination of Receptive Field (or CORF) model with push-pull inhibition, extended with surround suppression. We demonstrate the effectiveness of the proposed approach on the RuG and Berkeley benchmark data sets of 40 and 500 images, respectively. The proposed push-pull CORF operator with surround suppression outperforms the one without suppression with high statistical significance.},
keywords = {brain-inspired, contour detection, noise suppression},
pubstate = {published},
tppubtype = {article}
}
2019
Shi, Chenyu; Meijer, Joost M; Guo, Jiapan; Azzopardi, George; Diercksr, Gilles FH; Schmidt, Enno; Zillikens, Detlef; Jonkman, Marcel F; Petkov, Nicolai
Detection of u-serrated patterns in direct immunofluorescence images of autoimmune bullous diseases by inhibition-augmented COSFIRE filters Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, noise suppression, trainable filters
@article{shi2019detection,
title = {Detection of u-serrated patterns in direct immunofluorescence images of autoimmune bullous diseases by inhibition-augmented COSFIRE filters},
author = {Chenyu Shi and Joost M Meijer and Jiapan Guo and George Azzopardi and Gilles FH Diercksr and Enno Schmidt and Detlef Zillikens and Marcel F Jonkman and Nicolai Petkov},
doi = {10.1016/j.ijmedinf.2018.11.007},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {International Journal of Medical Informatics},
volume = {122},
pages = {27--36},
publisher = {Elsevier},
abstract = {Direct immunofluorescence (DIF) microscopy of a skin biopsy is used by physicians and pathologists to diagnose autoimmune bullous dermatoses (AIBD). This technique is the reference standard for diagnosis of AIBD, which is used worldwide in medical laboratories. For diagnosis of subepidermal AIBD (sAIBD), two different types of serrated pattern of immunodepositions can be recognized from DIF images, namely n- and u-serrated patterns. The n-serrated pattern is typically found in the most common sAIBD bullous pemphigoid. Presence of the u-serrated pattern indicates the sAIBD subtype epidermolysis bullosa acquisita (EBA), which has a different prognosis and requires a different treatment. The manual identification of these serrated patterns is learnable but challenging. We propose an automatic technique that is able to localize u-serrated patterns for automated computer-assisted diagnosis of EBA. The distinctive feature of u-serrated patterns as compared to n-serrated patterns is the presence of ridge-endings. We introduce a novel ridge-ending detector which uses inhibition-augmented trainable COSFIRE filters. Then, we apply a hierarchical clustering approach to detect the suspicious u-serrated patterns from the detected ridge-endings. For each detected u-serrated pattern we provide a score that indicates the reliability of its detection. In order to evaluate the proposed approach, we created a data set with 180 DIF images for serration pattern analysis. This data set consists of seven subsets which were obtained from various biopsy samples under different conditions. We achieve an average recognition rate of 82.2% of the u-serrated pattern on these 180 DIF images, which is comparable to the recognition rate achieved by experienced medical doctors and pathologists.},
keywords = {brain-inspired, medical image analysis, noise suppression, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Guo, Jiapan; Azzopardi, George; Shi, Chenyu; Jansonius, Nomdo M; Petkov, Nicolai
Automatic Determination of Vertical Cup-to-Disc Ratio in Retinal Fundus Images for Glaucoma Screening Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, trainable filters
@article{guo2019automatic,
title = {Automatic Determination of Vertical Cup-to-Disc Ratio in Retinal Fundus Images for Glaucoma Screening},
author = {Jiapan Guo and George Azzopardi and Chenyu Shi and Nomdo M Jansonius and Nicolai Petkov},
doi = {10.1109/ACCESS.2018.2890544},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {IEEE Access},
volume = {7},
pages = {8527--8541},
publisher = {IEEE},
abstract = {Glaucoma is a chronic progressive optic neuropathy that causes visual impairment or blindness if left untreated. It is crucial to diagnose it at an early stage in order to enable treatment. Fundus photography is a viable option for population-based screening. A fundus photograph enables the observation of the excavation of the optic disk\textemdashthe hallmark of glaucoma. The excavation is quantified as a vertical cup-to-disk ratio (VCDR). The manual assessment of retinal fundus images is, however, time-consuming and costly. Thus, an automated system is necessary to assist human observers. We propose a computer-aided diagnosis system, which consists of the localization of the optic disk, the determination of the height of the optic disk and the cup, and the computation of the VCDR. We evaluated the performance of our approach on eight publicly available datasets, which have, in total, 1712 retinal fundus images. We compared the obtained VCDR values with those provided by an experienced ophthalmologist and achieved a weighted VCDR mean difference of 0.11. The system provides a reliable estimation of the height of the optic disk and the cup in terms of the relative height error (RHE = 0.08 and 0.09, respectively). The Bland\textendashAltman analysis showed that the system achieves a good agreement with the manual annotations, especially for large VCDRs which indicate pathology.},
keywords = {brain-inspired, medical image analysis, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Neocleous, Andreas; Azzopardi, George; Dee, Michael
Identification of possible Δ14C anomalies since 14 ka BP: A computational intelligence approach Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: anomaly detection, brain-inspired, predictive analysis, time-series, trainable filters
@article{neocleous2019identification,
title = {Identification of possible Δ14C anomalies since 14 ka BP: A computational intelligence approach},
author = {Andreas Neocleous and George Azzopardi and Michael Dee},
doi = {10.1016/j.scitotenv.2019.01.251},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {Science of The Total Environment},
volume = {663},
pages = {162--169},
publisher = {Elsevier},
abstract = {Rapid increments in the concentration of the radiocarbon in the atmosphere (Δ14C) have been identified in the years 774-775 CE and 993-994 CE (Miyake events) using annual measurements on known-age tree-rings. The level of cosmic radiation implied by such increases could cause the failure of satellite telecommunication systems, and thus, there is a need to model and predict them. In this work, we investigated several intelligent computational methods to identify similar events in the past. We apply state-of-the-art pattern matching techniques as well as feature representation, a procedure that typically is used in machine learning and classification. To validate our findings, we used as ground truth the two confirmed Miyake events, and several other dates that have been proposed in the literature. We show that some of the methods used in this study successfully identify most of the ground truth events (~1% false positive rate at 75% true positive rate). Our results show that computational methods can be used to identify comparable patterns of interest and hence potentially uncover sudden increments of Δ14C in the past.},
keywords = {anomaly detection, brain-inspired, predictive analysis, time-series, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Neocleous, Andreas; Azzopardi, George; Kuitems, Margot; Scifo, Andrea; Dee, Michael
Trainable Filters for the Identification of Anomalies in Cosmogenic Isotope Data Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: anomaly detection, brain-inspired, predictive analysis, time-series, trainable filters
@article{neocleous2019trainable,
title = {Trainable Filters for the Identification of Anomalies in Cosmogenic Isotope Data},
author = {Andreas Neocleous and George Azzopardi and Margot Kuitems and Andrea Scifo and Michael Dee},
doi = {10.1109/ACCESS.2019.2900123},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {IEEE Access},
volume = {7},
pages = {24585--24592},
publisher = {IEEE},
abstract = {Extreme bursts of radiation from space result in rapid increases in the concentration of radiocarbon in the atmosphere. Such rises, known as Miyake Events, can be detected through the measurement of radiocarbon in dendrochronological archives. The identification of Miyake Events is important because radiation impacts of this magnitude pose an existential threat to satellite communications and aeronautical avionics and may even be detrimental to human health. However, at present, radiocarbon measurements on tree-ring archives are generally only available at decadal resolution, which smooths out the effect of a possible radiation burst. The Miyake Events discovered so far, in tree-rings from the years 3372-3371 BCE, 774-775 CE, and 993-994 CE, have essentially been found by chance, but there may be more. In this paper, we use signal processing techniques, in particular COSFIRE, to train filters with data on annual changes in radiocarbon (Δ 14 C) around those dates. Then, we evaluate the trained filters and attempt to detect similar Miyake Events in the past. The method that we propose is promising, since it identifies the known Miyake Events at a relatively low false positive rate. Using the findings of this paper, we propose a list of 26 calendar years that our system persistently indicates are Miyake Event-like. We are currently examining a short-list of five of the newly identified dates and intend to perform single-year radiocarbon measurements over them. Signal processing techniques, such as COSFIRE filters, can be used as guidance tools since they are able to identify similar patterns of interest, even if they vary in time or in amplitude.},
keywords = {anomaly detection, brain-inspired, predictive analysis, time-series, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Strisciuglio, Nicola; Azzopardi, George; Petkov, Nicolai
Robust Inhibition-augmented Operator for Delineation of Curvilinear Structures Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, noise suppression, trainable filters
@article{strisciuglio2019robust,
title = {Robust Inhibition-augmented Operator for Delineation of Curvilinear Structures},
author = {Nicola Strisciuglio and George Azzopardi and Nicolai Petkov},
doi = {10.1109/TIP.2019.2922096},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {IEEE Transactions on Image Processing},
volume = {28},
number = {12},
pages = {5852--5866},
publisher = {IEEE},
abstract = {Delineation of curvilinear structures in images is an important basic step of several image processing applications, such as segmentation of roads or rivers in aerial images, vessels or staining membranes in medical images, and cracks in pavements and roads, among others. Existing methods suffer from insufficient robustness to noise. In this paper, we propose a novel operator for the detection of curvilinear structures in images, which we demonstrate to be robust to various types of noise and effective in several applications. We call it RUSTICO, which stands for RobUST Inhibition-augmented Curvilinear Operator. It is inspired by the push-pull inhibition in visual cortex and takes as input the responses of two trainable B-COSFIRE filters of opposite polarity. The output of RUSTICO consists of a magnitude map and an orientation map. We carried out experiments on a data set of synthetic stimuli with noise drawn from different distributions, as well as on several benchmark data sets of retinal fundus images, crack pavements, and aerial images and a new data set of rose bushes used for automatic gardening. We evaluated the performance of RUSTICO by a metric that considers the structural properties of line networks (connectivity, area, and length) and demonstrated that RUSTICO outperforms many existing methods with high statistical significance. RUSTICO exhibits high robustness to noise and texture.},
keywords = {brain-inspired, contour detection, noise suppression, trainable filters},
pubstate = {published},
tppubtype = {article}
}
2017
Gecer, Baris; Azzopardi, George; Petkov, Nicolai
Color-blob-based COSFIRE filters for object recognition Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, image classification, pattern recognition, trainable filters
@article{gecer2017color,
title = {Color-blob-based COSFIRE filters for object recognition},
author = {Baris Gecer and George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1016/j.imavis.2016.10.006},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
journal = {Image and Vision Computing},
volume = {57},
pages = {165--174},
publisher = {Elsevier},
abstract = {Most object recognition methods rely on contour-defined features obtained by edge detection or region segmentation. They are not robust to diffuse region boundaries. Furthermore, such methods do not exploit region color information. We propose color-blob-based COSFIRE (Combination of Shifted Filter Responses) filters to be selective for combinations of diffuse circular regions (blobs) in specific mutual spatial arrangements. Such a filter combines the responses of a certain selection of Difference-of-Gaussians filters, essentially blob detectors, of different scales, in certain channels of a color space, and at certain relative positions to each other. Its parameters are determined/learned in an automatic configuration process that analyzes the properties of a given prototype object of interest. We use these filters to compute features that are effective for the recognition of the prototype objects. We form feature vectors that we use with an SVM classifier. We evaluate the proposed method on a traffic sign (GTSRB) and a butterfly data sets. For the GTSRB data set we achieve a recognition rate of 98.94%, which is slightly higher than human performance and for the butterfly data set we achieve 89.02%. The proposed color-blob-based COSFIRE filters are very effective and outperform the contour-based COSFIRE filters. A COSFIRE filter is trainable, it can be configured with a single prototype pattern and it does not require domain knowledge.},
keywords = {brain-inspired, image classification, pattern recognition, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Guo, J; Shi, C; Azzopardi, G; Petkov, N
Inhibition-augmented COSFIRE model of shape-selective neurons Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, noise suppression, trainable filters
@article{guo2017inhibition,
title = {Inhibition-augmented COSFIRE model of shape-selective neurons},
author = {J Guo and C Shi and G Azzopardi and N Petkov},
doi = {10.1147/JRD.2017.2679458},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
journal = {IBM Journal of Research and Development},
volume = {61},
number = {2/3},
pages = {1-9},
publisher = {IBM},
abstract = {Inhibition is a phenomenon that occurs in different areas of the brain, including the visual cortex. For instance, the responses of some shape-selective neurons in the inferotemporal cortex are suppressed by the presence of certain shape contour parts in their receptive fields. This suppression phenomenon is thought to increase the selectivity of such neurons. We propose an inhibition-augmented model of shape-selective neurons, as an advancement of the trainable filter approach called combination of shifted filter responses (COSFIRE). We use a positive prototype pattern and a set of negative prototype patterns to automatically configure an inhibition-augmented model. The configuration involves the selection of responses of a bank of Gabor filters (models of V1/V2 neurons) that provide excitatory or inhibitory input(s). We compute the output of the model as the excitatory input minus a fraction of the maximum of the inhibitory inputs. The configured model responds to patterns that are similar to the positive prototype but does not respond to patterns similar to the negative prototype(s). We demonstrate the effectiveness of the proposed model in shape recognition. We use the Graphics Recognition (GREC2011) benchmark dataset and demonstrate that the proposed inhibition-augmented modeling technique increases selectivity of the COSFIRE model.},
keywords = {brain-inspired, noise suppression, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Fernández-Robles, Laura; Azzopardi, George; Alegre, Enrique; Petkov, Nicolai; Castejón-Lima, Manuel
Identification of milling inserts in situ based on a versatile machine vision system Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, machine vision, trainable filters, visual quality inspection
@article{fernandez2017identification,
title = {Identification of milling inserts in situ based on a versatile machine vision system},
author = {Laura Fern\'{a}ndez-Robles and George Azzopardi and Enrique Alegre and Nicolai Petkov and Manuel Castej\'{o}n-Lima},
doi = {https://doi.org/10.1016/j.jmsy.2017.08.002},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
journal = {Journal of Manufacturing Systems},
volume = {45},
pages = {48-57},
publisher = {2017},
abstract = {This paper proposes a novel method for in situ localization of multiple inserts by means of machine vision techniques, a challenging issue in the field of tool wear monitoring. Most existing research works focus on evaluating the wear of isolated inserts after been manually extracted from the head tool. The method proposed solves this issue of paramount importance, as it frees the operator from continuously monitoring the machining process and allows the machine to continue operating without extracting the milling head for wear evaluation. We use trainable COSFIRE filters without requiring any manual intervention. This trainable approach is more versatile and generic than previous works on the topic, as it is not based on, and does not require, any domain knowledge. This allows an automatic application of the method to new machines without the need of specific knowledge on machine vision. We use an experimental dataset that we published to test the effectiveness of the method. We achieved very good performance with an F1 score of 0.9674, in the identification of multiple milling head inserts. The proposed approach can be considered as a general framework for the localization and identification of machining pieces from images taken from mechanical monitoring systems.},
keywords = {brain-inspired, machine vision, trainable filters, visual quality inspection},
pubstate = {published},
tppubtype = {article}
}
2016
Guo, Jiapan; Shi, Chenyu; Azzopardi, George; Petkov, Nicolai
Inhibition-augmented trainable COSFIRE filters for keypoint detection and object recognition Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, keypoint detection, noise suppression, object detection, trainable filters
@article{guo2016inhibition,
title = {Inhibition-augmented trainable COSFIRE filters for keypoint detection and object recognition},
author = {Jiapan Guo and Chenyu Shi and George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1007/s00138-016-0777-3},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
journal = {Machine Vision and Applications},
volume = {27},
pages = {1197-1211},
publisher = {Springer Berlin Heidelberg},
abstract = {The shape and meaning of an object can radically change with the addition of one or more contour parts. For instance, a T-junction can become a crossover. We extend the COSFIRE trainable filter approach which uses a positive prototype pattern for configuration by adding a set of negative prototype patterns. The configured filter responds to patterns that are similar to the positive prototype but not to any of the negative prototypes. The configuration of such a filter comprises selecting given channels of a bank of Gabor filters that provide excitatory or inhibitory input and determining certain blur and shift parameters. We compute the response of such a filter as the excitatory input minus a fraction of the maximum of inhibitory inputs. We use three applications to demonstrate the effectiveness of inhibition: the exclusive detection of vascular bifurcations (i.e., without crossovers) in retinal fundus images (DRIVE data set), the recognition of architectural and electrical symbols (GREC’11 data set) and the recognition of handwritten digits (MNIST data set).},
keywords = {brain-inspired, keypoint detection, noise suppression, object detection, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Strisciuglio, Nicola; Azzopardi, George; Vento, Mario; Petkov, Nicolai
Supervised vessel delineation in retinal fundus images with the automatic selection of B-COSFIRE filters Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, segmentation, trainable filters
@article{strisciuglio2016supervised,
title = {Supervised vessel delineation in retinal fundus images with the automatic selection of B-COSFIRE filters},
author = {Nicola Strisciuglio and George Azzopardi and Mario Vento and Nicolai Petkov},
doi = {https://doi.org/10.1007/s00138-016-0781-7},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
journal = {Machine Vision and Applications},
publisher = {Springer Berlin Heidelberg},
abstract = {The inspection of retinal fundus images allows medical doctors to diagnose various pathologies. Computer-aided diagnosis systems can be used to assist in this process. As a first step, such systems delineate the vessel tree from the background. We propose a method for the delineation of blood vessels in retinal images that is effective for vessels of different thickness. In the proposed method, we employ a set of B-COSFIRE filters selective for vessels and vessel-endings. Such a set is determined in an automatic selection process and can adapt to different applications. We compare the performance of different selection methods based upon machine learning and information theory. The results that we achieve by performing experiments on two public benchmark data sets, namely DRIVE and STARE, demonstrate the effectiveness of the proposed approach.},
keywords = {brain-inspired, medical image analysis, segmentation, trainable filters},
pubstate = {published},
tppubtype = {article}
}
2015
Azzopardi, George; Strisciuglio, Nicola; Vento, Mario; Petkov, Nicolai
Trainable COSFIRE filters for vessel delineation with application to retinal images Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, segmentation, trainable filters
@article{azzopardi2015trainable,
title = {Trainable COSFIRE filters for vessel delineation with application to retinal images},
author = {George Azzopardi and Nicola Strisciuglio and Mario Vento and Nicolai Petkov},
doi = {https://doi.org/10.1016/j.media.2014.08.002},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
journal = {Medical image analysis},
volume = {19},
number = {1},
pages = {46--57},
publisher = {Elsevier},
abstract = {Retinal imaging provides a non-invasive opportunity for the diagnosis of several medical pathologies. The automatic segmentation of the vessel tree is an important pre-processing step which facilitates subsequent automatic processes that contribute to such diagnosis.
We introduce a novel method for the automatic segmentation of vessel trees in retinal fundus images. We propose a filter that selectively responds to vessels and that we call B-COSFIRE with B standing for bar which is an abstraction for a vessel. It is based on the existing COSFIRE (Combination Of Shifted Filter Responses) approach. A B-COSFIRE filter achieves orientation selectivity by computing the weighted geometric mean of the output of a pool of Difference-of-Gaussians filters, whose supports are aligned in a collinear manner. It achieves rotation invariance efficiently by simple shifting operations. The proposed filter is versatile as its selectivity is determined from any given vessel-like prototype pattern in an automatic configuration process. We configure two B-COSFIRE filters, namely symmetric and asymmetric, that are selective for bars and bar-endings, respectively. We achieve vessel segmentation by summing up the responses of the two rotation-invariant B-COSFIRE filters followed by thresholding.
The results that we achieve on three publicly available data sets (DRIVE: Se = 0.7655, Sp = 0.9704; STARE: Se = 0.7716, Sp = 0.9701; CHASE_DB1: Se = 0.7585, Sp = 0.9587) are higher than many of the state-of-the-art methods. The proposed segmentation approach is also very efficient with a time complexity that is significantly lower than existing methods.},
keywords = {brain-inspired, medical image analysis, segmentation, trainable filters},
pubstate = {published},
tppubtype = {article}
}
We introduce a novel method for the automatic segmentation of vessel trees in retinal fundus images. We propose a filter that selectively responds to vessels and that we call B-COSFIRE with B standing for bar which is an abstraction for a vessel. It is based on the existing COSFIRE (Combination Of Shifted Filter Responses) approach. A B-COSFIRE filter achieves orientation selectivity by computing the weighted geometric mean of the output of a pool of Difference-of-Gaussians filters, whose supports are aligned in a collinear manner. It achieves rotation invariance efficiently by simple shifting operations. The proposed filter is versatile as its selectivity is determined from any given vessel-like prototype pattern in an automatic configuration process. We configure two B-COSFIRE filters, namely symmetric and asymmetric, that are selective for bars and bar-endings, respectively. We achieve vessel segmentation by summing up the responses of the two rotation-invariant B-COSFIRE filters followed by thresholding.
The results that we achieve on three publicly available data sets (DRIVE: Se = 0.7655, Sp = 0.9704; STARE: Se = 0.7716, Sp = 0.9701; CHASE_DB1: Se = 0.7585, Sp = 0.9587) are higher than many of the state-of-the-art methods. The proposed segmentation approach is also very efficient with a time complexity that is significantly lower than existing methods.
2014
Azzopardi, George; Petkov, Nicolai
Ventral-stream-like shape representation: from pixel intensity values to trainable object-selective COSFIRE models Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, object detection, trainable filters
@article{azzopardi2014ventral,
title = {Ventral-stream-like shape representation: from pixel intensity values to trainable object-selective COSFIRE models},
author = {George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.3389/fncom.2014.00080},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
journal = {Frontiers in computational neuroscience},
volume = {8},
pages = {80},
publisher = {Frontiers},
abstract = {The remarkable abilities of the primate visual system have inspired the construction of computational models of some visual neurons. We propose a trainable hierarchical object recognition model, which we call S-COSFIRE (S stands for Shape and COSFIRE stands for Combination Of Shifted FIlter REsponses) and use it to localize and recognize objects of interests embedded in complex scenes. It is inspired by the visual processing in the ventral stream (V1/V2 → V4 → TEO). Recognition and localization of objects embedded in complex scenes is important for many computer vision applications. Most existing methods require prior segmentation of the objects from the background which on its turn requires recognition. An S-COSFIRE filter is automatically configured to be selective for an arrangement of contour-based features that belong to a prototype shape specified by an example. The configuration comprises selecting relevant vertex detectors and determining certain blur and shift parameters. The response is computed as the weighted geometric mean of the blurred and shifted responses of the selected vertex detectors. S-COSFIRE filters share similar properties with some neurons in inferotemporal cortex, which provided inspiration for this work. We demonstrate the effectiveness of S-COSFIRE filters in two applications: letter and keyword spotting in handwritten manuscripts and object spotting in complex scenes for the computer vision system of a domestic robot. S-COSFIRE filters are effective to recognize and localize (deformable) objects in images of complex scenes without requiring prior segmentation. They are versatile trainable shape detectors, conceptually simple and easy to implement. The presented hierarchical shape representation contributes to a better understanding of the brain and to more robust computer vision algorithms.},
keywords = {brain-inspired, object detection, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Azzopardi, George; Rodríguez-Sánchez, Antonio; Piater, Justus; Petkov, Nicolai
A push-pull CORF model of a simple cell with antiphase inhibition improves SNR and contour detection Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, noise suppression, trainable filters
@article{azzopardi2014push,
title = {A push-pull CORF model of a simple cell with antiphase inhibition improves SNR and contour detection},
author = {George Azzopardi and Antonio Rodr\'{i}guez-S\'{a}nchez and Justus Piater and Nicolai Petkov},
doi = {https://doi.org/10.1371/journal.pone.0098424},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
journal = {PLoS One},
publisher = {2014},
abstract = {We propose a computational model of a simple cell with push-pull inhibition, a property that is observed in many real simple cells. It is based on an existing model called Combination of Receptive Fields or CORF for brevity. A CORF model uses as afferent inputs the responses of model LGN cells with appropriately aligned center-surround receptive fields, and combines their output with a weighted geometric mean. The output of the proposed model simple cell with push-pull inhibition, which we call push-pull CORF, is computed as the response of a CORF model cell that is selective for a stimulus with preferred orientation and preferred contrast minus a fraction of the response of a CORF model cell that responds to the same stimulus but of opposite contrast. We demonstrate that the proposed push-pull CORF model improves signal-to-noise ratio (SNR) and achieves further properties that are observed in real simple cells, namely separability of spatial frequency and orientation as well as contrast-dependent changes in spatial frequency tuning. We also demonstrate the effectiveness of the proposed push-pull CORF model in contour detection, which is believed to be the primary biological role of simple cells. We use the RuG (40 images) and Berkeley (500 images) benchmark data sets of images with natural scenes and show that the proposed model outperforms, with very high statistical significance, the basic CORF model without inhibition, Gabor-based models with isotropic surround inhibition, and the Canny edge detector. The push-pull CORF model that we propose is a contribution to a better understanding of how visual information is processed in the brain as it provides the ability to reproduce a wider range of properties exhibited by real simple cells. As a result of push-pull inhibition a CORF model exhibits an improved SNR, which is the reason for a more effective contour detection.},
keywords = {brain-inspired, contour detection, noise suppression, trainable filters},
pubstate = {published},
tppubtype = {article}
}
2013
Azzopardi, George; Petkov, Nicolai
Automatic detection of vascular bifurcations in segmented retinal images using trainable COSFIRE filters Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, keypoint detection, medical image analysis, trainable filters
@article{azzopardi2013automatic,
title = {Automatic detection of vascular bifurcations in segmented retinal images using trainable COSFIRE filters},
author = {George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1016/j.patrec.2012.11.002},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
journal = {Pattern Recognition Letters},
volume = {34},
number = {8},
pages = {922--933},
publisher = {North-Holland},
abstract = {Background: The vascular tree observed in a retinal fundus image can provide clues for cardiovascular diseases. Its analysis requires the identification of vessel bifurcations and crossovers.
Methods: We use a set of trainable keypoint detectors that we call Combination Of Shifted FIlter REsponses or COSFIRE filters to automatically detect vascular bifurcations in segmented retinal images. We configure a set of COSFIRE filters that are selective for a number of prototype bifurcations and demonstrate that such filters can be effectively used to detect bifurcations that are similar to the prototypical ones. The automatic configuration of such a filter selects given channels of a bank of Gabor filters and determines certain blur and shift parameters. The response of a COSFIRE filter is computed as the weighted geometric mean of the blurred and shifted responses of the selected Gabor filters. The COSFIRE approach is inspired by the function of a specific type of shape-selective neuron in area V4 of visual cortex.
Results: We ran experiments on three data sets and achieved the following results: (a) a recall of 97.88% at precision of 96.94% on 40 manually segmented images provided in the DRIVE data set, (b) a recall of 97.32% at precision of 96.04% on 20 manually segmented images provided in the STARE data set, and (c) a recall of 97.02% at precision of 96.53% on a set of 10 automatically segmented images obtained from images in the DRIVE data set.
Conclusions: The COSFIRE filters that we use are conceptually simple and easy to implement: the filter output is computed as the weighted geometric mean of blurred and shifted Gabor filter responses. They are versatile keypoint detectors as they can be configured with any given local contour pattern and are subsequently able to detect the same and similar patterns.},
keywords = {brain-inspired, keypoint detection, medical image analysis, trainable filters},
pubstate = {published},
tppubtype = {article}
}
Methods: We use a set of trainable keypoint detectors that we call Combination Of Shifted FIlter REsponses or COSFIRE filters to automatically detect vascular bifurcations in segmented retinal images. We configure a set of COSFIRE filters that are selective for a number of prototype bifurcations and demonstrate that such filters can be effectively used to detect bifurcations that are similar to the prototypical ones. The automatic configuration of such a filter selects given channels of a bank of Gabor filters and determines certain blur and shift parameters. The response of a COSFIRE filter is computed as the weighted geometric mean of the blurred and shifted responses of the selected Gabor filters. The COSFIRE approach is inspired by the function of a specific type of shape-selective neuron in area V4 of visual cortex.
Results: We ran experiments on three data sets and achieved the following results: (a) a recall of 97.88% at precision of 96.94% on 40 manually segmented images provided in the DRIVE data set, (b) a recall of 97.32% at precision of 96.04% on 20 manually segmented images provided in the STARE data set, and (c) a recall of 97.02% at precision of 96.53% on a set of 10 automatically segmented images obtained from images in the DRIVE data set.
Conclusions: The COSFIRE filters that we use are conceptually simple and easy to implement: the filter output is computed as the weighted geometric mean of blurred and shifted Gabor filter responses. They are versatile keypoint detectors as they can be configured with any given local contour pattern and are subsequently able to detect the same and similar patterns.
2012
Azzopardi, George; Petkov, Nicolai
A CORF computational model of a simple cell that relies on LGN input outperforms the Gabor function model Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection
@article{azzopardi2012corf,
title = {A CORF computational model of a simple cell that relies on LGN input outperforms the Gabor function model},
author = {George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1007/s00422-012-0486-6},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
journal = {Biological cybernetics},
volume = {106},
pages = {177-189},
publisher = {Springer-Verlag},
abstract = {Simple cells in primary visual cortex are believed to extract local contour information from a visual scene. The 2D Gabor function (GF) model has gained particular popularity as a computational model of a simple cell. However, it short-cuts the LGN, it cannot reproduce a number of properties of real simple cells, and its effectiveness in contour detection tasks has never been compared with the effectiveness of alternative models. We propose a computational model that uses as afferent inputs the responses of model LGN cells with center\textendashsurround receptive fields (RFs) and we refer to it as a Combination of Receptive Fields (CORF) model. We use shifted gratings as test stimuli and simulated reverse correlation to explore the nature of the proposed model. We study its behavior regarding the effect of contrast on its response and orientation bandwidth as well as the effect of an orthogonal mask on the response to an optimally oriented stimulus. We also evaluate and compare the performances of the CORF and GF models regarding contour detection, using two public data sets of images of natural scenes with associated contour ground truths. The RF map of the proposed CORF model, determined with simulated reverse correlation, can be divided in elongated excitatory and inhibitory regions typical of simple cells. The modulated response to shifted gratings that this model shows is also characteristic of a simple cell. Furthermore, the CORF model exhibits cross orientation suppression, contrast invariant orientation tuning and response saturation. These properties are observed in real simple cells, but are not possessed by the GF model. The proposed CORF model outperforms the GF model in contour detection with high statistical confidence (RuG data set: p < 10−4, and Berkeley data set: p < 10−4). The proposed CORF model is more realistic than the GF model and is more effective in contour detection, which is assumed to be the primary biological role of simple cells.},
keywords = {brain-inspired, contour detection},
pubstate = {published},
tppubtype = {article}
}
Azzopardi, George; Petkov, Nicolai
Trainable COSFIRE filters for keypoint detection and pattern recognition Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, image classification, keypoint detection, object detection, segmentation, trainable filters
@article{azzopardi2013trainable,
title = {Trainable COSFIRE filters for keypoint detection and pattern recognition},
author = {George Azzopardi and Nicolai Petkov},
doi = {10.1109/TPAMI.2012.106},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
journal = { IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume = {35},
issue = {2},
pages = {490 - 503},
publisher = {IEEE},
abstract = {Background: Keypoint detection is important for many computer vision applications. Existing methods suffer from insufficient selectivity regarding the shape properties of features and are vulnerable to contrast variations and to the presence of noise or texture. Methods: We propose a trainable filter which we call Combination Of Shifted FIlter REsponses (COSFIRE) and use for keypoint detection and pattern recognition. It is automatically configured to be selective for a local contour pattern specified by an example. The configuration comprises selecting given channels of a bank of Gabor filters and determining certain blur and shift parameters. A COSFIRE filter response is computed as the weighted geometric mean of the blurred and shifted responses of the selected Gabor filters. It shares similar properties with some shape-selective neurons in visual cortex, which provided inspiration for this work. Results: We demonstrate the effectiveness of the proposed filters in three applications: the detection of retinal vascular bifurcations (DRIVE dataset: 98.50 percent recall, 96.09 percent precision), the recognition of handwritten digits (MNIST dataset: 99.48 percent correct classification), and the detection and recognition of traffic signs in complex scenes (100 percent recall and precision). Conclusions: The proposed COSFIRE filters are conceptually simple and easy to implement. They are versatile keypoint detectors and are highly effective in practical computer vision applications.},
keywords = {brain-inspired, image classification, keypoint detection, object detection, segmentation, trainable filters},
pubstate = {published},
tppubtype = {article}
}