2016
Guo, Jiapan; Shi, Chenyu; Azzopardi, George; Petkov, Nicolai
Inhibition-augmented trainable COSFIRE filters for keypoint detection and object recognition Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, keypoint detection, noise suppression, object detection, trainable filters
@article{guo2016inhibition,
title = {Inhibition-augmented trainable COSFIRE filters for keypoint detection and object recognition},
author = {Jiapan Guo and Chenyu Shi and George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1007/s00138-016-0777-3},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
journal = {Machine Vision and Applications},
volume = {27},
pages = {1197-1211},
publisher = {Springer Berlin Heidelberg},
abstract = {The shape and meaning of an object can radically change with the addition of one or more contour parts. For instance, a T-junction can become a crossover. We extend the COSFIRE trainable filter approach which uses a positive prototype pattern for configuration by adding a set of negative prototype patterns. The configured filter responds to patterns that are similar to the positive prototype but not to any of the negative prototypes. The configuration of such a filter comprises selecting given channels of a bank of Gabor filters that provide excitatory or inhibitory input and determining certain blur and shift parameters. We compute the response of such a filter as the excitatory input minus a fraction of the maximum of inhibitory inputs. We use three applications to demonstrate the effectiveness of inhibition: the exclusive detection of vascular bifurcations (i.e., without crossovers) in retinal fundus images (DRIVE data set), the recognition of architectural and electrical symbols (GREC’11 data set) and the recognition of handwritten digits (MNIST data set).},
keywords = {brain-inspired, keypoint detection, noise suppression, object detection, trainable filters},
pubstate = {published},
tppubtype = {article}
}
2014
Azzopardi, George; Petkov, Nicolai
Ventral-stream-like shape representation: from pixel intensity values to trainable object-selective COSFIRE models Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, object detection, trainable filters
@article{azzopardi2014ventral,
title = {Ventral-stream-like shape representation: from pixel intensity values to trainable object-selective COSFIRE models},
author = {George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.3389/fncom.2014.00080},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
journal = {Frontiers in computational neuroscience},
volume = {8},
pages = {80},
publisher = {Frontiers},
abstract = {The remarkable abilities of the primate visual system have inspired the construction of computational models of some visual neurons. We propose a trainable hierarchical object recognition model, which we call S-COSFIRE (S stands for Shape and COSFIRE stands for Combination Of Shifted FIlter REsponses) and use it to localize and recognize objects of interests embedded in complex scenes. It is inspired by the visual processing in the ventral stream (V1/V2 → V4 → TEO). Recognition and localization of objects embedded in complex scenes is important for many computer vision applications. Most existing methods require prior segmentation of the objects from the background which on its turn requires recognition. An S-COSFIRE filter is automatically configured to be selective for an arrangement of contour-based features that belong to a prototype shape specified by an example. The configuration comprises selecting relevant vertex detectors and determining certain blur and shift parameters. The response is computed as the weighted geometric mean of the blurred and shifted responses of the selected vertex detectors. S-COSFIRE filters share similar properties with some neurons in inferotemporal cortex, which provided inspiration for this work. We demonstrate the effectiveness of S-COSFIRE filters in two applications: letter and keyword spotting in handwritten manuscripts and object spotting in complex scenes for the computer vision system of a domestic robot. S-COSFIRE filters are effective to recognize and localize (deformable) objects in images of complex scenes without requiring prior segmentation. They are versatile trainable shape detectors, conceptually simple and easy to implement. The presented hierarchical shape representation contributes to a better understanding of the brain and to more robust computer vision algorithms.},
keywords = {brain-inspired, object detection, trainable filters},
pubstate = {published},
tppubtype = {article}
}
2012
Azzopardi, George; Petkov, Nicolai
Trainable COSFIRE filters for keypoint detection and pattern recognition Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, image classification, keypoint detection, object detection, segmentation, trainable filters
@article{azzopardi2013trainable,
title = {Trainable COSFIRE filters for keypoint detection and pattern recognition},
author = {George Azzopardi and Nicolai Petkov},
doi = {10.1109/TPAMI.2012.106},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
journal = { IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume = {35},
issue = {2},
pages = {490 - 503},
publisher = {IEEE},
abstract = {Background: Keypoint detection is important for many computer vision applications. Existing methods suffer from insufficient selectivity regarding the shape properties of features and are vulnerable to contrast variations and to the presence of noise or texture. Methods: We propose a trainable filter which we call Combination Of Shifted FIlter REsponses (COSFIRE) and use for keypoint detection and pattern recognition. It is automatically configured to be selective for a local contour pattern specified by an example. The configuration comprises selecting given channels of a bank of Gabor filters and determining certain blur and shift parameters. A COSFIRE filter response is computed as the weighted geometric mean of the blurred and shifted responses of the selected Gabor filters. It shares similar properties with some shape-selective neurons in visual cortex, which provided inspiration for this work. Results: We demonstrate the effectiveness of the proposed filters in three applications: the detection of retinal vascular bifurcations (DRIVE dataset: 98.50 percent recall, 96.09 percent precision), the recognition of handwritten digits (MNIST dataset: 99.48 percent correct classification), and the detection and recognition of traffic signs in complex scenes (100 percent recall and precision). Conclusions: The proposed COSFIRE filters are conceptually simple and easy to implement. They are versatile keypoint detectors and are highly effective in practical computer vision applications.},
keywords = {brain-inspired, image classification, keypoint detection, object detection, segmentation, trainable filters},
pubstate = {published},
tppubtype = {article}
}