2021
Bennabhaktula, Guru Swaroop; Antonisse, Joey; Azzopardi, George
Abstract | Links | BibTeX | Altmetric | Tags: adversarial attacks, brain-inspired, convnets, deep learning, image classification, noise suppression
@inproceedings{bennabhaktula2021improving,
title = {On Improving Generalization of CNN-Based Image Classification with Delineation Maps Using the CORF Push-Pull Inhibition Operator},
author = {Guru Swaroop Bennabhaktula and Joey Antonisse and George Azzopardi},
doi = {10.1007/978-3-030-89128-2_42},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {434--444},
organization = {Springer},
abstract = {Deployed image classification pipelines are typically dependent on the images captured in real-world environments. This means that images might be affected by different sources of perturbations (e.g. sensor noise in low-light environments). The main challenge arises by the fact that image quality directly impacts the reliability and consistency of classification tasks. This challenge has, hence, attracted wide interest within the computer vision communities. We propose a transformation step that attempts to enhance the generalization ability of CNN models in the presence of unseen noise in the test set. Concretely, the delineation maps of given images are determined using the CORF push-pull inhibition operator. Such an operation transforms an input image into a space that is more robust to noise before being processed by a CNN. We evaluated our approach on the Fashion MNIST data set with an AlexNet model. It turned out that the proposed CORF-augmented pipeline achieved comparable results on noise-free images to those of a conventional AlexNet classification model without CORF delineation maps, but it consistently achieved significantly superior performance on test images perturbed with different levels of Gaussian and uniform noise.},
keywords = {adversarial attacks, brain-inspired, convnets, deep learning, image classification, noise suppression},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Bhole, Amey; Falzon, Owen; Biehl, Michael; Azzopardi, George
A Computer Vision Pipeline that Uses Thermal and RGB Images for the Recognition of Holstein Cattle Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, image classification, pattern recognition, smart farming
@inproceedings{bhole2019computer,
title = {A Computer Vision Pipeline that Uses Thermal and RGB Images for the Recognition of Holstein Cattle},
author = {Amey Bhole and Owen Falzon and Michael Biehl and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-030-29891-3_10},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
volume = {11679},
pages = {108--119},
organization = {Springer},
abstract = {The monitoring of farm animals is important as it allows farmers keeping track of the performance indicators and any signs of health issues, which is useful to improve the production of milk, meat, eggs and others. In Europe, bovine identification is mostly dependent upon the electronic ID/RFID ear tags, as opposed to branding and tattooing. The RFID based ear-tagging approach has been called into question because of implementation and management costs, physical damage and animal welfare concerns. In this paper, we conduct a case study for individual identification of Holstein cattle, characterized by black, brown and white patterns, in collaboration with the Dairy campus in Leeuwarden. We use a FLIR E6 thermal camera to collect an infrared and RGB image of the side view of each cow just after leaving the milking station. We apply a fully automatic pipeline, which consists of image processing, computer vision and machine learning techniques on a data set containing 1237 images and 136 classes (i.e. individual animals). In particular, we use the thermal images to segment the cattle from the background and remove horizontal and vertical pipes that occlude the cattle in the station, followed by filling the blank areas with an inpainting algorithm. We use the segmented image and apply transfer learning to a pre-trained AlexNet convolutional neural network. We apply five-fold cross-validation and achieve an average accuracy rate of 0.9754 ± 0.0097. The results obtained suggest that the proposed non-invasive approach is highly effective in the automatic recognition of Holstein cattle from the side view. In principle, this approach is applicable to any farm animals that are characterized by distinctive coat patterns.},
keywords = {convnets, deep learning, image classification, pattern recognition, smart farming},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Buhagiar, Juan; Strisciuglio, Nicola; Petkov, Nicolai; Azzopardi, George
Automatic Segmentation of Indoor and Outdoor Scenes from Visual Lifelogging Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: egocentric vision, image classification
@inproceedings{buhagiar2018automatic,
title = {Automatic Segmentation of Indoor and Outdoor Scenes from Visual Lifelogging},
author = {Juan Buhagiar and Nicola Strisciuglio and Nicolai Petkov and George Azzopardi},
doi = {10.3233/978-1-61499-929-4-194},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Applications of Intelligent Systems, Proceedings published in Frontiers in Artificial Intelligence and Applications},
volume = {310},
pages = {194--202},
abstract = {Visual Lifelogging is the process of keeping track of one's life through wearable cameras. The focus of this research is to automatically classify images, captured from a wearable camera, into indoor and outdoor scenes. The results of this classification may be used in several applications. For instance, one can quantify the time a person spends outdoors and indoors which may give insights about the psychology of the concerned person. We use transfer learning from two VGG convolutional neural networks (CNN), one that is pre-trained on the ImageNet data set and the other on the Places data set. We investigate two methods of combining features from the two pre-trained CNNs. We evaluate the performance on the new UBRug data set and the benchmark SUN397 data set and achieve accuracy rates of 98.24% and 97.06%, respectively. Features obtained from the ImageNet pretrained CNN turned out to be more effective than those obtained from the Places pre-trained CNN. Fusing the feature vectors obtained from these two CNNs is an effective way to improve the classification. In particular, the performance that we achieve on the SUN397 data set outperforms the state-of-the-art.},
keywords = {egocentric vision, image classification},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
Bouma, Henri; Eendebak, Pieter T; Schutte, Klamer; Azzopardi, George; Burghouts, Gertjan J
Incremental concept learning with few training examples and hierarchical classification Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: image classification
@inproceedings{bouma2015incremental,
title = {Incremental concept learning with few training examples and hierarchical classification},
author = {Henri Bouma and Pieter T Eendebak and Klamer Schutte and George Azzopardi and Gertjan J Burghouts},
doi = {10.1117/12.2194438},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Optics and Photonics for Counterterrorism, Crime Fighting, and Defence XI; and Optical Materials and Biomaterials in Security and Defence Systems Technology XII},
volume = {9652},
pages = {96520E},
organization = {International Society for Optics and Photonics},
abstract = {Object recognition and localization are important to automatically interpret video and allow better querying on its content. We propose a method for object localization that learns incrementally and addresses four key aspects. Firstly, we show that for certain applications, recognition is feasible with only a few training samples. Secondly, we show that novel objects can be added incrementally without retraining existing objects, which is important for fast interaction. Thirdly, we show that an unbalanced number of positive training samples leads to biased classifier scores that can be corrected by modifying weights. Fourthly, we show that the detector performance can deteriorate due to hard-negative mining for similar or closely related classes (e.g., for Barbie and dress, because the doll is wearing a dress). This can be solved by our hierarchical classification. We introduce a new dataset, which we call TOSO, and use it to demonstrate the effectiveness of the proposed method for the localization and recognition of multiple objects in images.},
keywords = {image classification},
pubstate = {published},
tppubtype = {inproceedings}
}
Schutte, Klamer; Bouma, Henri; Schavemaker, John; Daniele, Laura; Sappelli, Maya; Koot, Gijs; Eendebak, Pieter; Azzopardi, George; Spitters, Martijn; Boer, Maaike; others,
Interactive detection of incrementally learned concepts in images with ranking and semantic query interpretation Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: image classification, image retrieval
@inproceedings{schutte2015interactive,
title = {Interactive detection of incrementally learned concepts in images with ranking and semantic query interpretation},
author = {Klamer Schutte and Henri Bouma and John Schavemaker and Laura Daniele and Maya Sappelli and Gijs Koot and Pieter Eendebak and George Azzopardi and Martijn Spitters and Maaike Boer and others},
doi = {10.1109/CBMI.2015.7153623},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {2015 13th International Workshop on Content-Based Multimedia Indexing (CBMI)},
pages = {1--4},
organization = {IEEE},
abstract = {The number of networked cameras is growing exponentially. Multiple applications in different domains result in an increasing need to search semantically over video sensor data. In this paper, we present the GOOSE demonstrator, which is a real-time general-purpose search engine that allows users to pose natural language queries to retrieve corresponding images. Top-down, this demonstrator interprets queries, which are presented as an intuitive graph to collect user feedback. Bottom-up, the system automatically recognizes and localizes concepts in images and it can incrementally learn novel concepts. A smart ranking combines both and allows effective retrieval of relevant images.},
keywords = {image classification, image retrieval},
pubstate = {published},
tppubtype = {inproceedings}
}
2013
Azzopardi, George; Petkov, Nicolai
A shape descriptor based on trainable COSFIRE filters for the recognition of handwritten digits Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, image classification, trainable filters
@inproceedings{azzopardi2013shape,
title = {A shape descriptor based on trainable COSFIRE filters for the recognition of handwritten digits},
author = {George Azzopardi and Nicolai Petkov},
doi = {10.1007/978-3-642-40246-3_2},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {9--16},
organization = {Springer Berlin Heidelberg},
abstract = {The recognition of handwritten digits is an application which has been used as a benchmark for comparing shape recognition methods. We train COSFIRE filters to be selective for different parts of handwritten digits. In analogy with the neurophysiological concept of population coding we use the responses of multiple COSFIRE filters as a shape descriptor of a handwritten digit. We demonstrate the effectiveness of the proposed approach on two data sets of handwritten digits: Western Arabic (MNIST) and Farsi for which we achieve high recognition rates of 99.52% and 99.33%, respectively. COSFIRE filters are conceptually simple, easy to implement and they are versatile trainable feature detectors. The shape descriptor that we propose is highly effective to the automatic recognition of handwritten digits.},
keywords = {brain-inspired, image classification, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Azzopardi, George; Petkov, Nicolai
COSFIRE: A Brain-Inspired Approach to Visual Pattern Recognition Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: image classification, trainable filters
@inproceedings{azzopardi2013cosfirec,
title = {COSFIRE: A Brain-Inspired Approach to Visual Pattern Recognition},
author = {George Azzopardi and Nicolai Petkov},
doi = {10.1007/978-3-319-12084-3_7},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
booktitle = {International Workshop on Brain-Inspired Computing},
pages = {76--87},
organization = {Springer, Cham},
abstract = {The primate visual system has an impressive ability to generalize and to discriminate between numerous objects and it is robust to many geometrical transformations as well as lighting conditions. The study of the visual system has been an active reasearch field in neuropysiology for more than half a century. The construction of computational models of visual neurons can help us gain insight in the processing of information in visual cortex which we can use to provide more robust solutions to computer vision applications. Here, we demonstrate how inspiration from the functions of shape-selective V4 neurons can be used to design trainable filters for visual pattern recognition. We call this approach COSFIRE, which stands for Combination of Shifted Filter Responses. We illustrate how a COSFIRE filter can be configured to be selective for the spatial arrangement of lines and/or edges that form the shape of a given prototype pattern. Finally, we demonstrate the effectiveness of the COSFIRE approach in three applications: the detection of vascular bifurcations in retinal fundus images, the localization and recognition of traffic signs in complex scenes and the recognition of handwritten digits. This work is a further step in understanding how visual information is processed in the brain and how information on pixel intensities is converted into information about objects. We demonstrate how this understanding can be used for the design of effective computer vision algorithms.},
keywords = {image classification, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
2009
Azzopardi, George; Smeraldi, Fabrizio
Variance Ranklets: Orientation-selective Rank Features for Contrast Modulations Inproceedings
Abstract | Links | BibTeX | Tags: brain-inspired, image classification
@inproceedings{azzopardi2009variance,
title = {Variance Ranklets: Orientation-selective Rank Features for Contrast Modulations},
author = {George Azzopardi and Fabrizio Smeraldi},
url = {http://www.bmva.org/bmvc/2009/Papers/Paper456/Paper456.html},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
booktitle = {BMVC},
pages = {1--11},
abstract = {We introduce a novel type of orientation\textendashselective rank features that are sensitive to contrast modulations (second\textendashorder stimuli). Variance Ranklets are designed in close analogy with the standard Ranklets, but use the Siegel\textendashTukey statistics for dispersion instead of the Wilcoxon statistics. Their response shows the same orientation selectivity pattern of Haar wavelets on second\textendashorder signals that are not detectable by linear filters. To the best of our knowledge, this is the first family of rank filters designed to detect orientation in variance modulations. We validate our descriptors with an application to texture classification over a subset of the VisTex and Brodatz databases. The combination of standard (intensity) Ranklets with Variance Ranklets greatly improves on the performance of Ranklets alone. Comparison with other published results shows that state\textendashof\textendashthe\textendashart recognition rates can be achieved with a simple Nearest Neighbour classifier.},
keywords = {brain-inspired, image classification},
pubstate = {published},
tppubtype = {inproceedings}
}
2006
Azzopardi, George
Offline handwritten signature verification using Radial Basis Function neural networks Inproceedings
BibTeX | Tags: image classification, pattern recognition
@inproceedings{azzopardi2006offline,
title = {Offline handwritten signature verification using Radial Basis Function neural networks},
author = {George Azzopardi},
year = {2006},
date = {2006-01-01},
urldate = {2006-01-01},
booktitle = {WICT2008},
publisher = {University of Malta},
keywords = {image classification, pattern recognition},
pubstate = {published},
tppubtype = {inproceedings}
}