2019
Bhole, Amey; Falzon, Owen; Biehl, Michael; Azzopardi, George
A Computer Vision Pipeline that Uses Thermal and RGB Images for the Recognition of Holstein Cattle Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, image classification, pattern recognition, smart farming
@inproceedings{bhole2019computer,
title = {A Computer Vision Pipeline that Uses Thermal and RGB Images for the Recognition of Holstein Cattle},
author = {Amey Bhole and Owen Falzon and Michael Biehl and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-030-29891-3_10},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
volume = {11679},
pages = {108--119},
organization = {Springer},
abstract = {The monitoring of farm animals is important as it allows farmers keeping track of the performance indicators and any signs of health issues, which is useful to improve the production of milk, meat, eggs and others. In Europe, bovine identification is mostly dependent upon the electronic ID/RFID ear tags, as opposed to branding and tattooing. The RFID based ear-tagging approach has been called into question because of implementation and management costs, physical damage and animal welfare concerns. In this paper, we conduct a case study for individual identification of Holstein cattle, characterized by black, brown and white patterns, in collaboration with the Dairy campus in Leeuwarden. We use a FLIR E6 thermal camera to collect an infrared and RGB image of the side view of each cow just after leaving the milking station. We apply a fully automatic pipeline, which consists of image processing, computer vision and machine learning techniques on a data set containing 1237 images and 136 classes (i.e. individual animals). In particular, we use the thermal images to segment the cattle from the background and remove horizontal and vertical pipes that occlude the cattle in the station, followed by filling the blank areas with an inpainting algorithm. We use the segmented image and apply transfer learning to a pre-trained AlexNet convolutional neural network. We apply five-fold cross-validation and achieve an average accuracy rate of 0.9754 ± 0.0097. The results obtained suggest that the proposed non-invasive approach is highly effective in the automatic recognition of Holstein cattle from the side view. In principle, this approach is applicable to any farm animals that are characterized by distinctive coat patterns.},
keywords = {convnets, deep learning, image classification, pattern recognition, smart farming},
pubstate = {published},
tppubtype = {inproceedings}
}
Kind, Adrian; Azzopardi, George
An Explainable AI-Based Computer Aided Detection System for Diabetic Retinopathy Using Retinal Fundus Images Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, medical image analysis, pattern recognition
@inproceedings{kind2019explainable,
title = {An Explainable AI-Based Computer Aided Detection System for Diabetic Retinopathy Using Retinal Fundus Images},
author = {Adrian Kind and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-030-29888-3_37},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {457--468},
organization = {Springer},
abstract = {Diabetic patients have a high risk of developing diabetic retinopathy (DR), which is one of the major causes of blindness. With early detection and the right treatment patients may be spared from losing their vision. We propose a computer-aided detection system, which uses retinal fundus images as input and it detects all types of lesions that define diabetic retinopathy. The aim of our system is to assist eye specialists by automatically detecting the healthy retinas and referring the images of the unhealthy ones. For the latter cases, the system offers an interactive tool where the doctor can examine the local lesions that our system marks as suspicious. The final decision remains in the hands of the ophthalmologists. Our approach consists of a multi-class detector, that is able to locate and recognize all candidate DR-defining lesions. If the system detects at least one lesion, then the image is marked as unhealthy. The lesion detector is built on the faster R-CNN ResNet 101 architecture, which we train by transfer learning. We evaluate our approach on three benchmark data sets, namely Messidor-2, IDRiD, and E-Ophtha by measuring the sensitivity (SE) and specificity (SP) based on the binary classification of healthy and unhealthy images. The results that we obtain for Messidor-2 and IDRiD are (SE: 0.965, SP: 0.843), and (SE: 0.83, SP: 0.94), respectively. For the E-Ophtha data set we follow the literature and perform two experiments, one where we detect only lesions of the type micro aneurysms (SE: 0.939, SP: 0.82) and the other when we detect only exudates (SE: 0.851, SP: 0.971). Besides the high effectiveness that we achieve, the other important contribution of our work is the interactive tool, which we offer to the medical experts, highlighting all suspicious lesions detected by the proposed system.},
keywords = {convnets, deep learning, medical image analysis, pattern recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Bonnici, Alexandra; Bugeja, Dorian; Azzopardi, George
Vectorisation of sketches with shadows and shading using COSFIRE filters Inproceedings
Links | BibTeX | Altmetric | Tags: brain-inspired, pattern recognition, trainable filters
@inproceedings{bonnici2018vectorisation,
title = {Vectorisation of sketches with shadows and shading using COSFIRE filters},
author = {Alexandra Bonnici and Dorian Bugeja and George Azzopardi},
doi = {https://doi.org/10.1145/3209280.3209525},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Proceedings of the ACM Symposium on Document Engineering 2018},
pages = {1--10},
keywords = {brain-inspired, pattern recognition, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonnici, Alexandra; Abela, Julian; Zammit, Nicholas; Azzopardi, George
Automatic ornament localisation, recognition and expression from music sheets Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, pattern recognition, trainable filters
@inproceedings{bonnici2018automatic,
title = {Automatic ornament localisation, recognition and expression from music sheets},
author = {Alexandra Bonnici and Julian Abela and Nicholas Zammit and George Azzopardi},
doi = {10.1145/3209280.3209536},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Proceedings of the ACM Symposium on Document Engineering 2018},
pages = {1--11},
abstract = {Musical notation is a means of passing on performance instructions with fidelity to others. Composers, however, often introduced embellishments to the music they performed notating these embellishments with symbols next to the relevant notes. In time, these symbols, known as ornaments, and their interpretation became standardized such that there are acceptable ways of interpreting an ornament. Although music books may contain footnotes which express the ornament in full notation, these remain cumbersome to read. Ideally, a music student will have the possibility of selecting ornamented notes and express them as full notation. The student should also have the possibility to collapse the expressed ornament back to its symbolic representation, giving the student the possibility of also becoming familiar with playing from the ornamented score. In this paper, we propose a complete pipeline that achieves this goal. We compare the use of COSFIRE and template matching for optical music recognition to identify and extract musical content from the score. We then express the score using MusicXML and design a simple user interface which allows the user to select ornamented notes, view their expressed notation and decide whether they want to retain the expressed notation, modify it, or revert to the symbolic representation of the ornament. The performance results that we achieve indicate the effectiveness of our proposed approach.},
keywords = {brain-inspired, pattern recognition, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Strisciuglio, Nicola; Azzopardi, George; Petkov, Nicolai
Brain-inspired robust delineation operator Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, noise suppression, pattern recognition
@inproceedings{strisciuglio2018brain,
title = {Brain-inspired robust delineation operator},
author = {Nicola Strisciuglio and George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1007/978-3-030-11015-4_41},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV) Workshops},
pages = {555--565},
publisher = {Springer},
abstract = {In this paper we present a novel filter, based on the existing COSFIRE filter, for the delineation of patterns of interest. It includes a mechanism of push-pull inhibition that improves robustness to noise in terms of spurious texture. Push-pull inhibition is a phenomenon that is observed in neurons in area V1 of the visual cortex, which suppresses the response of certain simple cells for stimuli of preferred orientation but of non-preferred contrast. This type of inhibition allows for sharper detection of the patterns of interest and improves the quality of delineation especially in images with spurious texture.
We performed experiments on images from different applications, namely the detection of rose stems for automatic gardening, the delineation of cracks in pavements and road surfaces, and the segmentation of blood vessels in retinal images. Push-pull inhibition helped to improve results considerably in all applications.},
keywords = {brain-inspired, noise suppression, pattern recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
We performed experiments on images from different applications, namely the detection of rose stems for automatic gardening, the delineation of cracks in pavements and road surfaces, and the segmentation of blood vessels in retinal images. Push-pull inhibition helped to improve results considerably in all applications.
Spiteri, Maria; Azzopardi, George
Customer Churn Prediction for a Motor Insurance Company Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: pattern recognition, predictive analysis
@inproceedings{spiteri2018customer,
title = {Customer Churn Prediction for a Motor Insurance Company},
author = {Maria Spiteri and George Azzopardi},
doi = {10.1109/ICDIM.2018.8847066},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {2018 Thirteenth International Conference on Digital Information Management (ICDIM)},
pages = {173--178},
organization = {IEEE},
abstract = {Customer churn poses a significant challenge in various industries, including motor insurance. Retaining customers within insurance companies is much more challenging than in any other industry as policies are generally renewed every year. The main aim of this research is to identify the risk factors associated with churn, establish who are the churning customers and to model time until churn. The dataset used includes 72,445 policy holders and covers a period of one year. The data comprises information related to premiums, claims, policies and policy holders. The random forest algorithm turns out to be a very effective model for forecasting customer churn, reaching an accuracy rate of 91.18%. On the other hand, survival analysis was used to model time until churn and it was concluded that approximately 90% of the policy holders survived for the first five years while the majority of the policy holders survived till the end of the policy period. These results could be used to target the identified customers in marketing campaigns aimed at reducing the rate of churn while increasing profitability.},
keywords = {pattern recognition, predictive analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
Guo, Jiapan; Shi, Chenyu; Azzopardi, George; Petkov, Nicolai
Recognition of architectural and electrical symbols by COSFIRE filters with inhibition Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: object detection, pattern recognition, trainable filters
@inproceedings{guo2015recognition,
title = {Recognition of architectural and electrical symbols by COSFIRE filters with inhibition},
author = {Jiapan Guo and Chenyu Shi and George Azzopardi and Nicolai Petkov},
doi = {10.1007/978-3-319-23117-4_30},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {348--358},
organization = {Springer, Cham},
abstract = {The automatic recognition of symbols can be used to automatically convert scanned drawings into digital representations compatible with computer aided design software. We propose a novel approach to automatically recognize architectural and electrical symbols. The proposed method extends the existing trainable COSFIRE approach by adding an inhibition mechanism that is inspired by shape-selective TEO neurons in visual cortex. A COSFIRE filter with inhibition takes as input excitatory and inhibitory responses from line and edge detectors. The type (excitatory or inhibitory) and the spatial arrangement of low level features are determined in an automatic configuration step that analyzes two types of prototype pattern called positive and negative. Excitatory features are extracted from a positive pattern and inhibitory features are extracted from one or more negative patterns. In our experiments we use four subsets of images with different noise levels from the Graphics Recognition data set (GREC 2011) and demonstrate that the inhibition mechanism that we introduce improves the effectiveness of recognition substantially.},
keywords = {object detection, pattern recognition, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
2006
Azzopardi, George
Offline handwritten signature verification using Radial Basis Function neural networks Inproceedings
BibTeX | Tags: image classification, pattern recognition
@inproceedings{azzopardi2006offline,
title = {Offline handwritten signature verification using Radial Basis Function neural networks},
author = {George Azzopardi},
year = {2006},
date = {2006-01-01},
urldate = {2006-01-01},
booktitle = {WICT2008},
publisher = {University of Malta},
keywords = {image classification, pattern recognition},
pubstate = {published},
tppubtype = {inproceedings}
}