2025
Ndung'u, Steven; Grobler, Trienko; Wijnholds, Stefan J.; Azzopardi, George
Anomaly detection of radio galaxies with trainable COSFIRE filters Inproceedings
BibTeX | Tags: anomaly detection, radioastronomy, trainable filters
@inproceedings{Ndungu2025,
title = {Anomaly detection of radio galaxies with trainable COSFIRE filters},
author = {Steven Ndung'u and Trienko Grobler and Stefan J. Wijnholds and George Azzopardi},
year = {2025},
date = {2025-04-15},
urldate = {2025-04-15},
booktitle = {URSI Asia-Pacific Radio Science Conference (URSI AP-RASC 2025)!},
keywords = {anomaly detection, radioastronomy, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Aswath, Anusha; Duinkerken, B. H. Peter; Giepmans, Ben N. G.; Azzopardi, George; Alsahaf, Ahmad
Interactive Segmentation of biostructures through hyperspectral electron microscopy Inproceedings
Links | BibTeX | Altmetric | Tags: electron microscopy, medical image analysis, segmentation
@inproceedings{Aswath2024,
title = {Interactive Segmentation of biostructures through hyperspectral electron microscopy},
author = {Anusha Aswath and B.H. Peter Duinkerken and Ben N. G. Giepmans and George Azzopardi and Ahmad Alsahaf},
doi = {10.1109/WHISPERS65427.2024.10876486},
year = {2024},
date = {2024-10-30},
urldate = {2024-10-30},
booktitle = {Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS)},
publisher = {IEEE},
keywords = {electron microscopy, medical image analysis, segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
Bennabhaktula, Guru Swaroop; Alegre, Enrique; Strisciuglio, Nicola; Azzopardi, George
PushPull-Net Inhibition-driven ResNet robust to image corruptions Inproceedings
Links | BibTeX | Altmetric | Tags: brain-inspired, conv, convnets, deep learning, generalization, out of distribution, robustness
@inproceedings{Bennabhaktula2024,
title = {PushPull-Net Inhibition-driven ResNet robust to image corruptions},
author = {Guru Swaroop Bennabhaktula and Enrique Alegre and Nicola Strisciuglio and George Azzopardi},
doi = {10.1007/978-3-031-78186-5_26},
year = {2024},
date = {2024-08-07},
urldate = {2024-08-07},
booktitle = {International Conference of Pattern Recognition},
keywords = {brain-inspired, conv, convnets, deep learning, generalization, out of distribution, robustness},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Aswath, Anusha; Alsahaf, Ahmad; Westenbrink, B. Daan; Giepmans, Ben N. G.; Azzopardi, George
COFI - Coarse-semantic to fine-instance unsupervised mitochondria segmentation in EM Inproceedings
Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, convnets, deep learning, segmentation
@inproceedings{Anusha2023,
title = {COFI - Coarse-semantic to fine-instance unsupervised mitochondria segmentation in EM},
author = {Anusha Aswath and Ahmad Alsahaf and B. Daan Westenbrink and Ben N. G. Giepmans and George Azzopardi
},
doi = {https://doi.org/10.1007/978-3-031-44240-7_9},
year = {2023},
date = {2023-09-20},
urldate = {2023-07-01},
booktitle = {Computer Analysis of Images and Patterns. CAIP 2023. Lecture Notes in Computer Science},
volume = {14185},
publisher = {Springer},
keywords = {brain-inspired, contour detection, convnets, deep learning, segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Xueyi; Risi, Nicoletta; Martinez, Estefania Talavera; Chicca, Elisabetta; Karastoyanova, Dimka; Azzopardi, George
Fall detection with event-based data: A case study Inproceedings
Links | BibTeX | Altmetric | Tags: deep learning, event-based, fall detection
@inproceedings{Wang2023,
title = {Fall detection with event-based data: A case study},
author = {Xueyi Wang and Nicoletta Risi and Estefania Talavera Martinez and Elisabetta Chicca and Dimka Karastoyanova and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-031-44240-7_4},
year = {2023},
date = {2023-09-20},
urldate = {2023-09-20},
booktitle = {Computer Analysis of Images and Patterns. CAIP 2023. Lecture Notes in Computer Science},
volume = {14185},
publisher = {Springer},
keywords = {deep learning, event-based, fall detection},
pubstate = {published},
tppubtype = {inproceedings}
}
Prins, Fabian L.; Tomanin, Dario; Kamenz, Julia; Azzopardi, George
Biometric Recognition of African Clawed Frogs Inproceedings
Links | BibTeX | Altmetric | Tags: biometrics, brain-inspired, contour detection
@inproceedings{Prins2023,
title = {Biometric Recognition of African Clawed Frogs},
author = {Fabian L. Prins and Dario Tomanin and Julia Kamenz and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-031-44240-7_15},
year = {2023},
date = {2023-09-20},
urldate = {2023-07-01},
booktitle = {Computer Analysis of Images and Patterns. CAIP 2023. Lecture Notes in Computer Science},
keywords = {biometrics, brain-inspired, contour detection},
pubstate = {published},
tppubtype = {inproceedings}
}
Jokar, Fatemeh; Azzopardi, George; Palotti, Joao
Towards Accurate and Efficient Sleep Period Detection using Wearable Devices Inproceedings
Links | BibTeX | Altmetric | Tags: predictive analysis, wearables
@inproceedings{Jokar2023,
title = {Towards Accurate and Efficient Sleep Period Detection using Wearable Devices},
author = {Fatemeh Jokar and George Azzopardi and Joao Palotti},
doi = {https://doi.org/10.1007/978-3-031-44240-7_5},
year = {2023},
date = {2023-09-20},
urldate = {2023-07-01},
booktitle = {Computer Analysis of Images and Patterns. CAIP 2023. Lecture Notes in Computer Science},
keywords = {predictive analysis, wearables},
pubstate = {published},
tppubtype = {inproceedings}
}
Ndung'u, Steven; Grobler, Trienko; Wijnholds, Stefan J.; Karastoyanova, Dimka; Azzopardi, George
Deep supervised hashing for fast retrieval of radio image cubes Inproceedings Forthcoming
BibTeX | Tags: deep hashing, information retrieval
@inproceedings{Ndung'u0000,
title = {Deep supervised hashing for fast retrieval of radio image cubes},
author = {Steven Ndung'u and Trienko Grobler and Stefan J. Wijnholds and Dimka Karastoyanova and George Azzopardi},
year = {2023},
date = {2023-09-14},
booktitle = {Proceedings of the 35th URSI General Assembly and Scientific Symposium},
keywords = {deep hashing, information retrieval},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
2022
Strisciuglio, Nicola; Azzopardi, George
Visual response inhibition for increased robustness of convolutional networks to distribution shifts Inproceedings
BibTeX | Tags: brain-inspired, inhibition, push-pull
@inproceedings{nokey,
title = {Visual response inhibition for increased robustness of convolutional networks to distribution shifts},
author = {Nicola Strisciuglio and George Azzopardi},
year = {2022},
date = {2022-11-02},
urldate = {2022-11-02},
booktitle = {Distribution Shift workshop, NeurIPS},
keywords = {brain-inspired, inhibition, push-pull},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
Velasco-Mata, J.; Chaves, D.; de Mata, V.; Al-Nabki, M. W.; Fidalgo, Eduardo; Alegre, Enrique; Azzopardi, George
Development of a Hardware Benchmark for Forensic Face Detection Applications Inproceedings
Abstract | Links | BibTeX | Tags: face analysis, forensic image analysis
@inproceedings{velasco2021development,
title = {Development of a Hardware Benchmark for Forensic Face Detection Applications},
author = {J. Velasco-Mata and D. Chaves and V. de Mata and M. W. Al-Nabki and Eduardo Fidalgo and Enrique Alegre and George Azzopardi
},
url = {https://ruidera.uclm.es/xmlui/handle/10578/28635},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Cybersecurity Research National Conferences - INCIBE, Leon, Spain},
organization = {JNIC},
abstract = {Face detection techniques are valuable in forensic investigation since they help criminal investigators to identify victims/offenders in child sexual exploitation material. Deep learning approaches proved successful in these tasks, but their high computational requirements make them unsuitable if there are time constraints. To cope with this problem, we use a resizing strategy over three face detection techniques \textemdashMTCNN, PyramidBox and DSFD\textemdash to improve their speed over samples selected from the WIDER Face and UFDD datasets across several CPUs and GPUs. The best speed-detection trade-off was achieved by reducing the images to 50% of their original size and then applying DSFD. The fastest hardware for this purpose was a Nvidia GPU based on the Turing architecture.},
keywords = {face analysis, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Biswas, R.; Chaves, D.; Jáñez-Martino, F.; Blanco-Medina, P.; Fidalgo, E.; García-Olalla, C.; Azzopardi, G.
Reinforcement of age estimation in forensic tools to detect Child Sexual Exploitation Material Inproceedings
Abstract | Links | BibTeX | Tags: face analysis, forensic image analysis
@inproceedings{biswas2021reinforcement,
title = {Reinforcement of age estimation in forensic tools to detect Child Sexual Exploitation Material},
author = {R. Biswas and D. Chaves and F. J\'{a}\~{n}ez-Martino and P. Blanco-Medina and E. Fidalgo and C. Garc\'{i}a-Olalla and G. Azzopardi},
url = {https://ruidera.uclm.es/xmlui/handle/10578/28631},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
organization = {Cybersecurity Research National Conferences},
abstract = {Several image-based approaches for estimating the age of a person are available in computer vision literature. However, most of them perform poorly on minors and young adults, especially when the eyes are occluded. This type of occlusion is common in Child Sexual Exploitation Materials (CSEM), in order to hide the identity of victims. We introduce an approach that builds Soft Stagewise Regression Network (SSR-Net) models with natural and eye-occluded facial images, to estimate the age of minors and young adults. Our proposal reduces the Mean Absolute Error from 7.26 to 6.5, and 6.81 to 4.07 for SSR-Net pre-trained models on the IMDB and MORPH datasets, respectively.},
keywords = {face analysis, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Xueyi; Martinez, Estefania Talavera; Karastoyanova, Dimka; Azzopardi, George
Fall detection and recognition from egocentric visual data: A case study Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: egocentric vision, fall detection, wearables
@inproceedings{Wang2021,
title = {Fall detection and recognition from egocentric visual data: A case study},
author = {Xueyi Wang and Estefania Talavera Martinez and Dimka Karastoyanova and George Azzopardi},
editor = {Alberto Del Bimbo and Rita Cucchiara and Stan Sclaroff and Giovanni Maria Farinella and Tao Mei and Marco Bertini and others},
url = {https://doi.org/10.34894/3DV8BF},
doi = {https://doi.org/10.1007/978-3-030-68763-2_33},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {25th International Conference on Pattern Recognition Workshops, ICPR 2020},
abstract = {Falling is among the most damaging events for elderly people, which sometimes may end with significant injuries. Due to fear of falling, many elderly people choose to stay more at home in order to feel safer. In this work, we propose a new fall detection and recognition approach, which analyses egocentric videos collected by wearable cameras through a computer vision/machine learning pipeline. More specifically, we conduct a case study with one volunteer who collected video data from two cameras; one attached to the chest and the other one attached to the waist. A total of 776 videos were collected describing four types of falls and nine kinds of non-falls. Our method works as follows: extracts several uniformly distributed frames from the videos, uses a pre-trained ConvNet model to describe each frame by a feature vector, followed by feature fusion and a classification model. Our proposed model demonstrates its suitability for the detection and recognition of falls from the data captured by the two cameras together. For this case study, we detect all falls with only one false positive, and reach a balanced accuracy of 93% in the recognition of the 13 types of activities. Similar results are obtained for videos of the two cameras when considered separately. Moreover, we observe better performance of videos collected in indoor scenes.},
note = {The data set can be downloaded from https://doi.org/10.34894/3DV8BF},
keywords = {egocentric vision, fall detection, wearables},
pubstate = {published},
tppubtype = {inproceedings}
}
Bennabhaktula, Guru Swaroop; Antonisse, Joey; Azzopardi, George
Abstract | Links | BibTeX | Altmetric | Tags: adversarial attacks, brain-inspired, convnets, deep learning, image classification, noise suppression
@inproceedings{bennabhaktula2021improving,
title = {On Improving Generalization of CNN-Based Image Classification with Delineation Maps Using the CORF Push-Pull Inhibition Operator},
author = {Guru Swaroop Bennabhaktula and Joey Antonisse and George Azzopardi},
doi = {10.1007/978-3-030-89128-2_42},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {434--444},
organization = {Springer},
abstract = {Deployed image classification pipelines are typically dependent on the images captured in real-world environments. This means that images might be affected by different sources of perturbations (e.g. sensor noise in low-light environments). The main challenge arises by the fact that image quality directly impacts the reliability and consistency of classification tasks. This challenge has, hence, attracted wide interest within the computer vision communities. We propose a transformation step that attempts to enhance the generalization ability of CNN models in the presence of unseen noise in the test set. Concretely, the delineation maps of given images are determined using the CORF push-pull inhibition operator. Such an operation transforms an input image into a space that is more robust to noise before being processed by a CNN. We evaluated our approach on the Fashion MNIST data set with an AlexNet model. It turned out that the proposed CORF-augmented pipeline achieved comparable results on noise-free images to those of a conventional AlexNet classification model without CORF delineation maps, but it consistently achieved significantly superior performance on test images perturbed with different levels of Gaussian and uniform noise.},
keywords = {adversarial attacks, brain-inspired, convnets, deep learning, image classification, noise suppression},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
Chirtoaca, Dan; Ellul, Joshua; Azzopardi, George
A framework for creating deployable smart contracts for non-fungible tokens on the Ethereum blockchain Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: blockchain, NFT, smart contracts
@inproceedings{chirtoaca2020framework,
title = {A framework for creating deployable smart contracts for non-fungible tokens on the Ethereum blockchain},
author = {Dan Chirtoaca and Joshua Ellul and George Azzopardi},
doi = {10.1109/DAPPS49028.2020.00012},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {2020 IEEE International Conference on Decentralized Applications and Infrastructures (DAPPS)},
pages = {100--105},
organization = {IEEE},
abstract = {Non-fungible tokens are an up and coming application domain for smart contracts. Ethereum is the first blockchain-based decentralized computing platform that has standardized this type of tokens into a well-defined interface, namely ERC721. We propose a framework that provides developers with a smart contract suite that offers complete implementations of the ERC721 standard and common extensions and features frequently encountered in ERC721-based applications. We introduce a specification language that enables customization and configuration of the smart contract suite by including and excluding the supported features and extensions. We evaluate the smart contract suite for its extensibility and reusability and compare the metrics with four reference implementations tackling a similar problem. In addition to this, we evaluate and analyze the effort and efficiency of the specification language in comparison to manual configuration of the smart contract suite. Our contribution lies in examining quality metrics for code extensibility and reusability and determining the more insightful metrics for assessing these quality attributes in the context of Solidity smart contracts. Additionally, from the lines of code metric, We conclude that our specification language offers a simple and efficient alternative to manual smart contract suite customization.},
keywords = {blockchain, NFT, smart contracts},
pubstate = {published},
tppubtype = {inproceedings}
}
Timmerman, Derrick; Bennabhaktula, Swaroop; Alegre, Enrique; Azzopardi, George
Video Camera Identification from Sensor Pattern Noise with a Constrained ConvNet Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, constrained networks, convnets, deep learning, forensic image analysis
@inproceedings{timmerman2020video,
title = {Video Camera Identification from Sensor Pattern Noise with a Constrained ConvNet},
author = {Derrick Timmerman and Swaroop Bennabhaktula and Enrique Alegre and George Azzopardi},
editor = {Maria De Marsico, Gabriella Sanniti di Baja, Ana Fred},
doi = {https://doi.org/10.48550/arXiv.2012.06277},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the 10th International Conference on Pattern Recognition Applications and Methods - ICPRAM},
journal = {arXiv preprint arXiv:2012.06277},
pages = {417-425},
abstract = {The identification of source cameras from videos, though it is a highly relevant forensic analysis topic, has been studied much less than its counterpart that uses images. In this work we propose a method to identify the source camera of a video based on camera specific noise patterns that we extract from video frames. For the extraction of noise pattern features, we propose an extended version of a constrained convolutional layer capable of processing color inputs. Our system is designed to classify individual video frames which are in turn combined by a majority vote to identify the source camera. We evaluated this approach on the benchmark VISION data set consisting of 1539 videos from 28 different cameras. To the best of our knowledge, this is the first work that addresses the challenge of video camera identification on a device level. The experiments show that our approach is very promising, achieving up to 93.1% accuracy while being robust to the WhatsApp and YouTube compression techniques. This work is part of the EU-funded project 4NSEEK focused on forensics against child sexual abuse.},
keywords = {camera identification, constrained networks, convnets, deep learning, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Bennabhaktula, Guru; Alegre, Enrique; Karastoyanova, Dimka; Azzopardi, George
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, convnets, deep learning, forensic image analysis
@inproceedings{bennabhaktula2020device,
title = {Device-based Image Matching with Similarity Learning by Convolutional Neural Networks that Exploit the Underlying Camera Sensor Pattern Noise},
author = {Guru Bennabhaktula and Enrique Alegre and Dimka Karastoyanova and George Azzopardi},
doi = {10.5220/0009155505780584},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the 9th International Conference on Pattern Recognition Applications and Methods - ICPRAM},
pages = {578--584},
organization = {SciTePress},
abstract = {One of the challenging problems in digital image forensics is the capability to identify images that are captured by the same camera device. This knowledge can help forensic experts in gathering intelligence about suspects by analyzing digital images. In this paper, we propose a two-part network to quantify the likelihood that a given pair of images have the same source camera, and we evaluated it on the benchmark Dresden data set containing 1851 images from 31 different cameras. To the best of our knowledge, we are the first ones addressing the challenge of device-based image matching. Though the proposed approach is not yet forensics ready, our experiments show that this direction is worth pursuing, achieving at this moment 85 percent accuracy. This ongoing work is part of the EU-funded project 4NSEEK concerned with forensics against child sexual abuse.},
keywords = {camera identification, convnets, deep learning, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Simanjuntak, Frans; Azzopardi, George
Fusion of CNN-and COSFIRE-Based Features with Application to Gender Recognition from Face Images Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, convnets, deep learning, face analysis, trainable filters
@inproceedings{simanjuntak2019fusion,
title = {Fusion of CNN-and COSFIRE-Based Features with Application to Gender Recognition from Face Images},
author = {Frans Simanjuntak and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-030-17795-9_33},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Science and Information Conference},
pages = {444--458},
organization = {Springer},
abstract = {Convolution neural networks (CNNs) have been demonstrated to be very eective in various computer vision tasks. The main strength of such networks is that features are learned from some training data. In cases where training data is not abundant, transfer learning can be used in order to adapt features that are pre-trained from other tasks. Similarly, the COSFIRE approach is also trainable as it configures lters to be selective for features selected from training data. In this study we propose a fusion method of these two approaches and evaluate their performance on the application of gender recognition from face images. In particular, we use the pre-trained VGGFace CNN, which when used as standalone, it achieved 97.45% on the GENDER-FERET data set. With one of the proposed fusion approaches the recognition rate on the same task is improved to 98.9%, that is reducing the error rate by more than 50%. Our experiments demonstrate that COSFIRE filters can provide complementary features to CNNs, which contribute to a better performance.},
keywords = {brain-inspired, convnets, deep learning, face analysis, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Tabone, Wilbert; Wilkinson, Michael HF; Gaalen, Anne EJV; Georgiadis, Janniko; Azzopardi, George
Alpha-tree segmentation of human anatomical photographic imagery Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: medical image analysis, segmentation
@inproceedings{tabone2019alpha,
title = {Alpha-tree segmentation of human anatomical photographic imagery},
author = {Wilbert Tabone and Michael HF Wilkinson and Anne EJV Gaalen and Janniko Georgiadis and George Azzopardi},
doi = {10.1145/3309772.3309776},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Proceedings of the 2nd International Conference on Applications of Intelligent Systems},
pages = {1--6},
abstract = {Segmentation of anatomical imagery is important in several areas, such as forensics, medical analysis and educational material. The manual segmentation of such images and the subsequent labelling of regions is a very laborious task. We propose an interactive segmentation scheme which we evaluate on a new data set of anatomical imagery. We use a morphological tree-based segmentation method, known as the alpha-tree, together with a Hu-moment thresholding mechanism in order to extract segments from a number of structures. Both qualitative and quantitative results in anatomical imagery of embalmed head, arm and leg specimens indicate that the proposed method can produce meaningful segmentation outputs, which could facilitate further refined labelling.},
keywords = {medical image analysis, segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
Demajo, Lara Marie; Guillaumier, Kristian; Azzopardi, George
Age group recognition from face images using a fusion of CNN-and COSFIRE-based features Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, convnets, deep learning, face analysis, trainable filters
@inproceedings{demajo2019age,
title = {Age group recognition from face images using a fusion of CNN-and COSFIRE-based features},
author = {Lara Marie Demajo and Kristian Guillaumier and George Azzopardi},
doi = {https://doi.org/10.1145/3309772.3309784},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Proceedings of the 2nd International Conference on Applications of Intelligent Systems},
pages = {1--6},
abstract = {Automatic age group classification is the ability of an algorithm to classify face images into predetermined age groups. It is an important task due to its numerous applications such as monitoring, biometrics and commercial profiling. In this work we propose a fusion technique that combines CNN- and COSFIRE-based features for the recognition of age groups from face images. Both CNN and COSFIRE are trainable approaches that have been demonstrated to be effective in various computer vision applications. As to CNN, we use the pre-trained VGG-Face architecture and for COSFIRE we configure new COSFIRE filters from training data. Since recent literature suggests that CNNs deliver the highest accuracy rates within such problems, the hypothesis which we want to investigate in this work is whether combining CNN and COSFIRE approaches together will improve results. The proposed fusion technique using stacked Support Vector Machine (SVM) classifiers, and trained and tested with the FERET data set images has shown that, indeed, CNN- and COSFIRE-based features are complimentary as their combination reduces the error rate by more than 25%.},
keywords = {brain-inspired, convnets, deep learning, face analysis, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Kind, Adrian; Azzopardi, George
An Explainable AI-Based Computer Aided Detection System for Diabetic Retinopathy Using Retinal Fundus Images Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, medical image analysis, pattern recognition
@inproceedings{kind2019explainable,
title = {An Explainable AI-Based Computer Aided Detection System for Diabetic Retinopathy Using Retinal Fundus Images},
author = {Adrian Kind and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-030-29888-3_37},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {457--468},
organization = {Springer},
abstract = {Diabetic patients have a high risk of developing diabetic retinopathy (DR), which is one of the major causes of blindness. With early detection and the right treatment patients may be spared from losing their vision. We propose a computer-aided detection system, which uses retinal fundus images as input and it detects all types of lesions that define diabetic retinopathy. The aim of our system is to assist eye specialists by automatically detecting the healthy retinas and referring the images of the unhealthy ones. For the latter cases, the system offers an interactive tool where the doctor can examine the local lesions that our system marks as suspicious. The final decision remains in the hands of the ophthalmologists. Our approach consists of a multi-class detector, that is able to locate and recognize all candidate DR-defining lesions. If the system detects at least one lesion, then the image is marked as unhealthy. The lesion detector is built on the faster R-CNN ResNet 101 architecture, which we train by transfer learning. We evaluate our approach on three benchmark data sets, namely Messidor-2, IDRiD, and E-Ophtha by measuring the sensitivity (SE) and specificity (SP) based on the binary classification of healthy and unhealthy images. The results that we obtain for Messidor-2 and IDRiD are (SE: 0.965, SP: 0.843), and (SE: 0.83, SP: 0.94), respectively. For the E-Ophtha data set we follow the literature and perform two experiments, one where we detect only lesions of the type micro aneurysms (SE: 0.939, SP: 0.82) and the other when we detect only exudates (SE: 0.851, SP: 0.971). Besides the high effectiveness that we achieve, the other important contribution of our work is the interactive tool, which we offer to the medical experts, highlighting all suspicious lesions detected by the proposed system.},
keywords = {convnets, deep learning, medical image analysis, pattern recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
Bhole, Amey; Falzon, Owen; Biehl, Michael; Azzopardi, George
A Computer Vision Pipeline that Uses Thermal and RGB Images for the Recognition of Holstein Cattle Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, image classification, pattern recognition, smart farming
@inproceedings{bhole2019computer,
title = {A Computer Vision Pipeline that Uses Thermal and RGB Images for the Recognition of Holstein Cattle},
author = {Amey Bhole and Owen Falzon and Michael Biehl and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-030-29891-3_10},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
volume = {11679},
pages = {108--119},
organization = {Springer},
abstract = {The monitoring of farm animals is important as it allows farmers keeping track of the performance indicators and any signs of health issues, which is useful to improve the production of milk, meat, eggs and others. In Europe, bovine identification is mostly dependent upon the electronic ID/RFID ear tags, as opposed to branding and tattooing. The RFID based ear-tagging approach has been called into question because of implementation and management costs, physical damage and animal welfare concerns. In this paper, we conduct a case study for individual identification of Holstein cattle, characterized by black, brown and white patterns, in collaboration with the Dairy campus in Leeuwarden. We use a FLIR E6 thermal camera to collect an infrared and RGB image of the side view of each cow just after leaving the milking station. We apply a fully automatic pipeline, which consists of image processing, computer vision and machine learning techniques on a data set containing 1237 images and 136 classes (i.e. individual animals). In particular, we use the thermal images to segment the cattle from the background and remove horizontal and vertical pipes that occlude the cattle in the station, followed by filling the blank areas with an inpainting algorithm. We use the segmented image and apply transfer learning to a pre-trained AlexNet convolutional neural network. We apply five-fold cross-validation and achieve an average accuracy rate of 0.9754 ± 0.0097. The results obtained suggest that the proposed non-invasive approach is highly effective in the automatic recognition of Holstein cattle from the side view. In principle, this approach is applicable to any farm animals that are characterized by distinctive coat patterns.},
keywords = {convnets, deep learning, image classification, pattern recognition, smart farming},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Alsahaf, Ahmad; Azzopardi, George; Ducro, Bart; Veerkamp, Roel F; Petkov, Nicolai
Assigning pigs to uniform target weight groups using machine learning Inproceedings
Abstract | Links | BibTeX | Tags: predictive analysis, smart farming
@inproceedings{alsahaf2018assigning,
title = {Assigning pigs to uniform target weight groups using machine learning},
author = {Ahmad Alsahaf and George Azzopardi and Bart Ducro and Roel F Veerkamp and Nicolai Petkov},
url = {https://research.rug.nl/en/publications/assigning-pigs-to-uniform-target-weight-groups-using-machine-lear},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Proceedings of the World Congress on Genetics Applied to Livestock Production, vol. Species-Porcine},
volume = {1},
pages = {112},
abstract = {A standard practice at pig farms is to assign finisher pigs to groups based on their live weight measurements or based on visual inspection of their sizes. As an alternative, we used machine learning classification, namely the random forest algorithm, for assigning finisher pigs to groups for the purpose of increasing body weight uniformity in each group. Instead of relying solely on weight measurements, random forest enabled us to combine weight measurements with other phenotypes and genetic data (in the form of EBV’s). We found that using random forest with the combination of phenotypic and genetic data achieves the lowest classification error (0.3409) in 10-fold cross-validation, followed by random forest with phenotypic and genetic data separately (0.3460 and 0.4591), then standard assignment based on birth weight (0.5611), and finally standard assignment based on the weight at the start of the finishing phase (0.7015).},
keywords = {predictive analysis, smart farming},
pubstate = {published},
tppubtype = {inproceedings}
}
Abadi, Fthi Arefayne; Ellul, Joshua; Azzopardi, George
The Blockchain of Things, Beyond Bitcoin: A Systematic Review Inproceedings
Links | BibTeX | Altmetric | Tags: blockchain, internet of things, interoperability
@inproceedings{abadi2018blockchain,
title = {The Blockchain of Things, Beyond Bitcoin: A Systematic Review},
author = {Fthi Arefayne Abadi and Joshua Ellul and George Azzopardi},
doi = {10.1109/Cybermatics_2018.2018.00278},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {IEEE Cybermatics, The 2018 IEEE International Conference on Blockchain},
pages = {1666--1672},
organization = {IEEE},
keywords = {blockchain, internet of things, interoperability},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonnici, Alexandra; Bugeja, Dorian; Azzopardi, George
Vectorisation of sketches with shadows and shading using COSFIRE filters Inproceedings
Links | BibTeX | Altmetric | Tags: brain-inspired, pattern recognition, trainable filters
@inproceedings{bonnici2018vectorisation,
title = {Vectorisation of sketches with shadows and shading using COSFIRE filters},
author = {Alexandra Bonnici and Dorian Bugeja and George Azzopardi},
doi = {https://doi.org/10.1145/3209280.3209525},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Proceedings of the ACM Symposium on Document Engineering 2018},
pages = {1--10},
keywords = {brain-inspired, pattern recognition, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonnici, Alexandra; Abela, Julian; Zammit, Nicholas; Azzopardi, George
Automatic ornament localisation, recognition and expression from music sheets Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, pattern recognition, trainable filters
@inproceedings{bonnici2018automatic,
title = {Automatic ornament localisation, recognition and expression from music sheets},
author = {Alexandra Bonnici and Julian Abela and Nicholas Zammit and George Azzopardi},
doi = {10.1145/3209280.3209536},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Proceedings of the ACM Symposium on Document Engineering 2018},
pages = {1--11},
abstract = {Musical notation is a means of passing on performance instructions with fidelity to others. Composers, however, often introduced embellishments to the music they performed notating these embellishments with symbols next to the relevant notes. In time, these symbols, known as ornaments, and their interpretation became standardized such that there are acceptable ways of interpreting an ornament. Although music books may contain footnotes which express the ornament in full notation, these remain cumbersome to read. Ideally, a music student will have the possibility of selecting ornamented notes and express them as full notation. The student should also have the possibility to collapse the expressed ornament back to its symbolic representation, giving the student the possibility of also becoming familiar with playing from the ornamented score. In this paper, we propose a complete pipeline that achieves this goal. We compare the use of COSFIRE and template matching for optical music recognition to identify and extract musical content from the score. We then express the score using MusicXML and design a simple user interface which allows the user to select ornamented notes, view their expressed notation and decide whether they want to retain the expressed notation, modify it, or revert to the symbolic representation of the ornament. The performance results that we achieve indicate the effectiveness of our proposed approach.},
keywords = {brain-inspired, pattern recognition, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Strisciuglio, Nicola; Azzopardi, George; Petkov, Nicolai
Brain-inspired robust delineation operator Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, noise suppression, pattern recognition
@inproceedings{strisciuglio2018brain,
title = {Brain-inspired robust delineation operator},
author = {Nicola Strisciuglio and George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1007/978-3-030-11015-4_41},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV) Workshops},
pages = {555--565},
publisher = {Springer},
abstract = {In this paper we present a novel filter, based on the existing COSFIRE filter, for the delineation of patterns of interest. It includes a mechanism of push-pull inhibition that improves robustness to noise in terms of spurious texture. Push-pull inhibition is a phenomenon that is observed in neurons in area V1 of the visual cortex, which suppresses the response of certain simple cells for stimuli of preferred orientation but of non-preferred contrast. This type of inhibition allows for sharper detection of the patterns of interest and improves the quality of delineation especially in images with spurious texture.
We performed experiments on images from different applications, namely the detection of rose stems for automatic gardening, the delineation of cracks in pavements and road surfaces, and the segmentation of blood vessels in retinal images. Push-pull inhibition helped to improve results considerably in all applications.},
keywords = {brain-inspired, noise suppression, pattern recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
We performed experiments on images from different applications, namely the detection of rose stems for automatic gardening, the delineation of cracks in pavements and road surfaces, and the segmentation of blood vessels in retinal images. Push-pull inhibition helped to improve results considerably in all applications.
Azzopardi, George; Foggia, Pasquale; Greco, Antonio; Saggese, Alessia; Vento, Mario
Gender recognition from face images using trainable shape and color features Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, face analysis, trainable filters
@inproceedings{azzopardi2018gender,
title = {Gender recognition from face images using trainable shape and color features},
author = {George Azzopardi and Pasquale Foggia and Antonio Greco and Alessia Saggese and Mario Vento},
doi = {10.1109/ICPR.2018.8545771},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {2018 24th International Conference on Pattern Recognition (ICPR)},
pages = {1983-1988},
organization = {IEEE},
abstract = {Gender recognition from face images is an important application and it is still an open computer vision problem, even though it is something trivial from the human visual system. Variations in pose, lighting, and expression are few of the problems that make such an application challenging for a computer system. Neurophysiological studies demonstrate that the human brain is able to distinguish men and women also in absence of external cues, by analyzing the shape of specific parts of the face. In this paper, we describe an automatic procedure that combines trainable shape and color features for gender classification. In particular the proposed method fuses edge-based and color-blob-based features by means of trainable COSFIRE filters. The former types of feature are able to extract information about the shape of a face whereas the latter extract information about shades of colors in different parts of the face. We use these two sets of features to create a stacked classification SVM model and demonstrate its effectiveness on the GENDER-COLOR-FERET dataset, where we achieve an accuracy of 96.4%.},
keywords = {brain-inspired, face analysis, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Alsahaf, Ahmad; Azzopardi, George; Ducro, Bart; Veerkamp, Roel F; Petkov, Nicolai
Predicting Slaughter Weight in Pigs with Regression Tree Ensembles. Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: predictive analysis, smart farming
@inproceedings{alsahaf2018predicting,
title = {Predicting Slaughter Weight in Pigs with Regression Tree Ensembles.},
author = {Ahmad Alsahaf and George Azzopardi and Bart Ducro and Roel F Veerkamp and Nicolai Petkov},
doi = {10.3233/978-1-61499-929-4-1},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Frontiers in Artificial Intelligence and Applications},
volume = {310},
pages = {1--9},
abstract = {Domestic pigs vary in the age at which they reach slaughter weight even under the controlled conditions of modern pig farming. Early and accurate estimates of when a pig will reach slaughter weight can lead to logistic efficiency in farms. In this study, we compare four methods in predicting the age at which a pig reaches slaughter weight (120 kg). Namely, we compare the following regression tree-based ensemble methods: random forest (RF), extremely randomized trees (ET), gradient boosted machines (GBM), and XGBoost. Data from 32979 pigs is used, comprising a combination of phenotypic features and estimated breeding values (EBV). We found that the boosting ensemble methods, GBM and XGBoost, achieve lower prediction errors than the parallel ensembles methods, RF and ET. On the other hand, RF and ET have fewer parameters to tune, and perform adequately well with default parameter settings.},
keywords = {predictive analysis, smart farming},
pubstate = {published},
tppubtype = {inproceedings}
}
Spiteri, Maria; Azzopardi, George
Customer Churn Prediction for a Motor Insurance Company Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: pattern recognition, predictive analysis
@inproceedings{spiteri2018customer,
title = {Customer Churn Prediction for a Motor Insurance Company},
author = {Maria Spiteri and George Azzopardi},
doi = {10.1109/ICDIM.2018.8847066},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {2018 Thirteenth International Conference on Digital Information Management (ICDIM)},
pages = {173--178},
organization = {IEEE},
abstract = {Customer churn poses a significant challenge in various industries, including motor insurance. Retaining customers within insurance companies is much more challenging than in any other industry as policies are generally renewed every year. The main aim of this research is to identify the risk factors associated with churn, establish who are the churning customers and to model time until churn. The dataset used includes 72,445 policy holders and covers a period of one year. The data comprises information related to premiums, claims, policies and policy holders. The random forest algorithm turns out to be a very effective model for forecasting customer churn, reaching an accuracy rate of 91.18%. On the other hand, survival analysis was used to model time until churn and it was concluded that approximately 90% of the policy holders survived for the first five years while the majority of the policy holders survived till the end of the policy period. These results could be used to target the identified customers in marketing campaigns aimed at reducing the rate of churn while increasing profitability.},
keywords = {pattern recognition, predictive analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Buhagiar, Juan; Strisciuglio, Nicola; Petkov, Nicolai; Azzopardi, George
Automatic Segmentation of Indoor and Outdoor Scenes from Visual Lifelogging Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: egocentric vision, image classification
@inproceedings{buhagiar2018automatic,
title = {Automatic Segmentation of Indoor and Outdoor Scenes from Visual Lifelogging},
author = {Juan Buhagiar and Nicola Strisciuglio and Nicolai Petkov and George Azzopardi},
doi = {10.3233/978-1-61499-929-4-194},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Applications of Intelligent Systems, Proceedings published in Frontiers in Artificial Intelligence and Applications},
volume = {310},
pages = {194--202},
abstract = {Visual Lifelogging is the process of keeping track of one's life through wearable cameras. The focus of this research is to automatically classify images, captured from a wearable camera, into indoor and outdoor scenes. The results of this classification may be used in several applications. For instance, one can quantify the time a person spends outdoors and indoors which may give insights about the psychology of the concerned person. We use transfer learning from two VGG convolutional neural networks (CNN), one that is pre-trained on the ImageNet data set and the other on the Places data set. We investigate two methods of combining features from the two pre-trained CNNs. We evaluate the performance on the new UBRug data set and the benchmark SUN397 data set and achieve accuracy rates of 98.24% and 97.06%, respectively. Features obtained from the ImageNet pretrained CNN turned out to be more effective than those obtained from the Places pre-trained CNN. Fusing the feature vectors obtained from these two CNNs is an effective way to improve the classification. In particular, the performance that we achieve on the SUN397 data set outperforms the state-of-the-art.},
keywords = {egocentric vision, image classification},
pubstate = {published},
tppubtype = {inproceedings}
}
Alsahaf, Ahmad; Azzopardi, George; Ducro, Bart; Veerkamp, Roel; Petkov, Nicolai
Predicting slaughter age in pigs using random forest regression Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: predictive analysis, smart farming
@inproceedings{alsahaf2018predictingb,
title = {Predicting slaughter age in pigs using random forest regression},
author = {Ahmad Alsahaf and George Azzopardi and Bart Ducro and Roel Veerkamp and Nicolai Petkov},
doi = {10.3233/978-1-61499-929-4-1},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Applications of Intelligent Systems 2018},
pages = {1-9},
organization = {IOS Press},
abstract = {Domestic pigs vary in the age at which they reach slaughter weight even under the controlled conditions of modern pig farming. Early and accurate estimates of when a pig will reach slaughter weight can lead to logistic efficiency in farms. In this study, we compare four methods in predicting the age at which a pig reaches slaughter weight (120 kg). Namely, we compare the following regression tree-based ensemble methods: random forest (RF), extremely randomized trees (ET), gradient boosted machines (GBM), and XGBoost. Data from 32979 pigs is used, comprising a combination of phenotypic features and estimated breeding values (EBV). We found that the boosting ensemble methods, GBM and XGBoost, achieve lower prediction errors than the parallel ensembles methods, RF and ET. On the other hand, RF and ET have fewer parameters to tune, and perform adequately well with default parameter settings.},
keywords = {predictive analysis, smart farming},
pubstate = {published},
tppubtype = {inproceedings}
}
Apap, Adrian; Robles, Laura Fernandez; Azzopardi, George
Person Identification with Retinal Fundus Biometric Analysis Using COSFIRE Filters Inproceedings
Links | BibTeX | Altmetric | Tags: biometrics, brain-inspired, trainable filters
@inproceedings{apap2018retinal,
title = {Person Identification with Retinal Fundus Biometric Analysis Using COSFIRE Filters},
author = {Adrian Apap and Laura Fernandez Robles and George Azzopardi},
doi = {10.3233/978-1-61499-929-4-10},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Proceedings of the first international APPIS conference, Gran Canaria, Spain},
volume = {310},
pages = {10-18},
keywords = {biometrics, brain-inspired, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
Strisciuglio, Nicola; Azzopardi, George; Petkov, Nicolai
Detection of curved lines with B-COSFIRE filters: A case study on crack delineation Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, trainable filters
@inproceedings{strisciuglio2017detection,
title = {Detection of curved lines with B-COSFIRE filters: A case study on crack delineation},
author = {Nicola Strisciuglio and George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1007/978-3-319-64689-3_9},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {108--120},
organization = {Springer, Cham},
abstract = {The detection of curvilinear structures is an important step for various computer vision applications, ranging from medical image analysis for segmentation of blood vessels, to remote sensing for the identification of roads and rivers, and to biometrics and robotics, among others. This is a nontrivial task especially for the detection of thin or incomplete curvilinear structures surrounded with noise. We propose a general purpose curvilinear structure detector that uses the brain-inspired trainable B-COSFIRE filters. It consists of four main steps, namely nonlinear filtering with B-COSFIRE, thinning with non-maximum suppression, hysteresis thresholding and morphological closing. We demonstrate its effectiveness on a data set of noisy images with cracked pavements, where we achieve state-of-the-art results (F-measure = 0.865). The proposed method can be employed in any computer vision methodology that requires the delineation of curvilinear and elongated structures.},
keywords = {brain-inspired, contour detection, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Azzopardi, George; Greco, Antonio; Saggese, Alessia; Vento, Mario
Fast gender recognition in videos using a novel descriptor based on the gradient magnitudes of facial landmarks Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: face analysis
@inproceedings{azzopardi2017fast,
title = {Fast gender recognition in videos using a novel descriptor based on the gradient magnitudes of facial landmarks},
author = {George Azzopardi and Antonio Greco and Alessia Saggese and Mario Vento},
doi = {10.1109/AVSS.2017.8078525},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {2017 14th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)},
pages = {1--6},
organization = {IEEE},
abstract = {The growing interest in recent years for gender recognition from face images is mainly attributable to the wide range of possible applications that can be used for commercial and marketing purposes. It is desirable that such algorithms process high resolution video frames acquired by using surveillance cameras in real-time. To the best of our knowledge, however, there are no studies which analyze the computational impact of the methods and the difficulties related to the processing of faces extracted from videos captured in the wild. We propose a novel face descriptor based on the gradient magnitudes of facial landmarks, which are points automatically extracted from the face contour, eyes, eyebrows, nose, mouth and chin. We evaluate the effectiveness and efficiency of the proposed approach on two new datasets, which we made available online and that consist of color face images and color video sequences acquired in real scenarios. The proposed approach is more efficient and effective than three commercial libraries.},
keywords = {face analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Rodríguez-Sánchez, Antonio; Chea, Daly; Azzopardi, George; Stabinger, Sebastian
A deep learning approach for detecting and correcting highlights in endoscopic images Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, medical image analysis
@inproceedings{8310082,
title = {A deep learning approach for detecting and correcting highlights in endoscopic images},
author = {Rodr\'{i}guez-S\'{a}nchez, Antonio and Chea, Daly and Azzopardi, George and Stabinger, Sebastian},
doi = {10.1109/IPTA.2017.8310082},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {2017 Seventh International Conference on Image Processing Theory, Tools and Applications (IPTA)},
pages = {1-6},
abstract = {The image of an object changes dramatically depending on the lightning conditions surrounding that object. Shadows, reflections and highlights can make the object very difficult to be recognized for an automatic system. Additionally, images used in medical applications, such as endoscopic images and videos contain a large amount of such reflective components. This can pose an extra difficulty for experts to analyze such type of videos and images. It can then be useful to detect - and possibly correct - the locations where those highlights happen. In this work we designed a Convolutional Neural Network for that task. We trained such a network using a dataset that contains groundtruth highlights showing that those reflective elements can be learnt and thus located and extracted. We then used that trained network to localize and correct the highlights in endoscopic images from the El Salvador Atlas Gastrointestinal videos obtaining promising results.},
keywords = {convnets, deep learning, medical image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
Azzopardi, George; Greco, Antonio; Vento, Mario
Gender recognition from face images using a fusion of svm classifiers Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: face analysis
@inproceedings{azzopardi2016gender,
title = {Gender recognition from face images using a fusion of svm classifiers},
author = {George Azzopardi and Antonio Greco and Mario Vento},
doi = {https://doi.org/10.1007/978-3-319-41501-7_59},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
booktitle = {International Conference on Image Analysis and Recognition},
pages = {533--538},
organization = {Springer, Cham},
abstract = {The recognition of gender from face images is an important application, especially in the fields of security, marketing and intelligent user interfaces. We propose an approach to gender recognition from faces by fusing the decisions of SVM classifiers. Each classifier is trained with different types of features, namely HOG (shape), LBP (texture) and raw pixel values. For the latter features we use an SVM with a linear kernel and for the two former ones we use SVMs with histogram intersection kernels. We come to a decision by fusing the three classifiers with a majority vote. We demonstrate the effectiveness of our approach on a new dataset that we extract from FERET. We achieve an accuracy of 92.6 %, which outperforms the commercial products Face++ and Luxand.},
keywords = {face analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Azzopardi, George; Greco, Antonio; Vento, Mario
Gender recognition from face images with trainable COSFIRE filters Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, face analysis, trainable filters
@inproceedings{azzopardi2016genderb,
title = {Gender recognition from face images with trainable COSFIRE filters},
author = {George Azzopardi and Antonio Greco and Mario Vento},
doi = {10.1109/AVSS.2016.7738068},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
booktitle = {13th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)},
pages = {235--241},
organization = {IEEE},
abstract = {Gender recognition from face images is an important application in the fields of security, retail advertising and marketing. We propose a novel descriptor based on COSFIRE filters for gender recognition. A COSFIRE filter is trainable, in that its selectivity is determined in an automatic configuration process that analyses a given prototype pattern of interest. We demonstrate the effectiveness of the proposed approach on a new dataset called GENDER-FERET with 474 training and 472 test samples and achieve an accuracy rate of 93.7%. It also outperforms an approach that relies on handcrafted features and an ensemble of classifiers. Furthermore, we perform another experiment by using the images of the Labeled Faces in the Wild (LFW) dataset to train our classifier and the test images of the GENDER-FERET dataset for evaluation. This experiment demonstrates the generalization ability of the proposed approach and it also outperforms two commercial libraries, namely Face++ and Luxand.},
keywords = {brain-inspired, face analysis, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Azzopardi, George; Robles, Laura Fernandez; Alegre, Enrique; Petkov, Nicolai
Increased Generalization Capability of Trainable COSFIRE Filters with Application to Machine Vision Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, machine vision, trainable filters
@inproceedings{Azzopardi2016,
title = {Increased Generalization Capability of Trainable COSFIRE Filters with Application to Machine Vision},
author = {George Azzopardi and Laura Fernandez Robles and Enrique Alegre and Nicolai Petkov},
doi = {10.1109/ICPR.2016.7900152},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
booktitle = {23rd International Conference on Pattern Recognition (ICPR)},
publisher = {IEEE},
abstract = {The recently proposed trainable COSFIRE filters are highly effective in a wide range of computer vision applications, including object recognition, image classification, contour detection and retinal vessel segmentation. A COSFIRE filter is selective for a collection of contour parts in a certain spatial arrangement. These contour parts and their spatial arrangement are determined in an automatic configuration procedure from a single user-specified pattern of interest. The traditional configuration, however, does not guarantee the selection of the most distinctive contour parts. We propose a genetic algorithm-based optimization step in the configuration of COSFIRE filters that determines the minimum subset of contour parts that best characterize the pattern of interest. We use a public dataset of images of an edge milling head machine equipped with multiple cutting tools to demonstrate the effectiveness of the proposed optimization step for the detection and localization of such tools. The optimization process that we propose yields COSFIRE filters with substantially higher generalization capability. With an average of only six COSFIRE filters we achieve high precision P and recall R rates (P = 91.99%; R = 96.22%). This outperforms the original COSFIRE filter approach (without optimization) mostly in terms of recall. The proposed optimization procedure increases the efficiency of COSFIRE filters with little effect on the selectivity.},
keywords = {brain-inspired, machine vision, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
Schutte, Klamer; Bouma, Henri; Schavemaker, John; Daniele, Laura; Sappelli, Maya; Koot, Gijs; Eendebak, Pieter; Azzopardi, George; Spitters, Martijn; Boer, Maaike; others,
Interactive detection of incrementally learned concepts in images with ranking and semantic query interpretation Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: image classification, image retrieval
@inproceedings{schutte2015interactive,
title = {Interactive detection of incrementally learned concepts in images with ranking and semantic query interpretation},
author = {Klamer Schutte and Henri Bouma and John Schavemaker and Laura Daniele and Maya Sappelli and Gijs Koot and Pieter Eendebak and George Azzopardi and Martijn Spitters and Maaike Boer and others},
doi = {10.1109/CBMI.2015.7153623},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {2015 13th International Workshop on Content-Based Multimedia Indexing (CBMI)},
pages = {1--4},
organization = {IEEE},
abstract = {The number of networked cameras is growing exponentially. Multiple applications in different domains result in an increasing need to search semantically over video sensor data. In this paper, we present the GOOSE demonstrator, which is a real-time general-purpose search engine that allows users to pose natural language queries to retrieve corresponding images. Top-down, this demonstrator interprets queries, which are presented as an intuitive graph to collect user feedback. Bottom-up, the system automatically recognizes and localizes concepts in images and it can incrementally learn novel concepts. A smart ranking combines both and allows effective retrieval of relevant images.},
keywords = {image classification, image retrieval},
pubstate = {published},
tppubtype = {inproceedings}
}
Guo, Jiapan; Shi, Chenyu; Azzopardi, George; Petkov, Nicolai
Recognition of architectural and electrical symbols by COSFIRE filters with inhibition Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: object detection, pattern recognition, trainable filters
@inproceedings{guo2015recognition,
title = {Recognition of architectural and electrical symbols by COSFIRE filters with inhibition},
author = {Jiapan Guo and Chenyu Shi and George Azzopardi and Nicolai Petkov},
doi = {10.1007/978-3-319-23117-4_30},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {348--358},
organization = {Springer, Cham},
abstract = {The automatic recognition of symbols can be used to automatically convert scanned drawings into digital representations compatible with computer aided design software. We propose a novel approach to automatically recognize architectural and electrical symbols. The proposed method extends the existing trainable COSFIRE approach by adding an inhibition mechanism that is inspired by shape-selective TEO neurons in visual cortex. A COSFIRE filter with inhibition takes as input excitatory and inhibitory responses from line and edge detectors. The type (excitatory or inhibitory) and the spatial arrangement of low level features are determined in an automatic configuration step that analyzes two types of prototype pattern called positive and negative. Excitatory features are extracted from a positive pattern and inhibitory features are extracted from one or more negative patterns. In our experiments we use four subsets of images with different noise levels from the Graphics Recognition data set (GREC 2011) and demonstrate that the inhibition mechanism that we introduce improves the effectiveness of recognition substantially.},
keywords = {object detection, pattern recognition, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Strisciuglio, Nicola; Azzopardi, George; Vento, Mario; Petkov, Nicolai
Multiscale blood vessel delineation using B-COSFIRE filters Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, medical image analysis, segmentation, trainable filters
@inproceedings{strisciuglio2015multiscale,
title = {Multiscale blood vessel delineation using B-COSFIRE filters},
author = {Nicola Strisciuglio and George Azzopardi and Mario Vento and Nicolai Petkov},
doi = {10.1007/978-3-319-23117-4_26},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {300--312},
organization = {Springer, Cham},
abstract = {We propose a delineation algorithm that deals with bar-like structures of different thickness. Detection of linear structures is applicable to several fields ranging from medical images for segmentation of vessels to aerial images for delineation of roads or rivers. The proposed method is suited for any delineation problem and employs a set of B-COSFIRE filters selective for lines and line-endings of different thickness. We determine the most effective filters for the application at hand by Generalized Matrix Learning Vector Quantization (GMLVQ) algorithm. We demonstrate the effectiveness of the proposed method by applying it to the task of vessel segmentation in retinal images. We perform experiments on two benchmark data sets, namely DRIVE and STARE. The experimental results show that the proposed delineation algorithm is highly effective and efficient. It can be considered as a general framework for a delineation task in various applications.},
keywords = {brain-inspired, medical image analysis, segmentation, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Neocleous, Andreas; Azzopardi, George; Schizas, Christos N; Petkov, Nicolai
Filter-Based Approach for Ornamentation Detection and Recognition in Singing Folk Music Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: signal processing, time-series, trainable filters
@inproceedings{neocleous2015filter,
title = {Filter-Based Approach for Ornamentation Detection and Recognition in Singing Folk Music},
author = {Andreas Neocleous and George Azzopardi and Christos N Schizas and Nicolai Petkov},
doi = {10.1007/978-3-319-23192-1_47},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {558--569},
organization = {Springer International Publishing},
abstract = {Ornamentations in music play a significant role for the emotion whi1ch a performer or a composer aims to create. The automated identification of ornamentations enhances the understanding of music, which can be used as a feature for tasks such as performer identification or mood classification. Existing methods rely on a pre-processing step that performs note segmentation. We propose an alternative method by adapting the existing two-dimensional COSFIRE filter approach to one-dimension (1D) for the automatic identification of ornamentations in monophonic folk songs. We construct a set of 1D COSFIRE filters that are selective for the 12 notes of the Western music theory. The response of a 1D COSFIRE filter is computed as the geometric mean of the differences between the fundamental frequency values in a local neighbourhood and the preferred values at the corresponding positions. We apply the proposed 1D COSFIRE filters to the pitch tracks of a song at every position along the entire signal, which in turn give response values in the range [0,1]. The 1D COSFIRE filters that we propose are effective to recognize meaningful musical information which can be transformed into symbolic representations and used for further analysis. We demonstrate the effectiveness of the proposed methodology in a new data set that we introduce, which comprises five monophonic Cypriot folk tunes consisting of 428 ornamentations. The proposed method is effective for the detection and recognition of ornamentations in singing folk music.},
keywords = {signal processing, time-series, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Shi, Chenyu; Guo, Jiapan; Azzopardi, George; Meijer, Joost M; Jonkman, Marcel F; Petkov, Nicolai
Automatic Differentiation of u-and n-serrated Patterns in Direct Immunofluorescence Images Inproceedings
BibTeX | Tags:
@inproceedings{shi2015automatic,
title = {Automatic Differentiation of u-and n-serrated Patterns in Direct Immunofluorescence Images},
author = {Chenyu Shi and Jiapan Guo and George Azzopardi and Joost M Meijer and Marcel F Jonkman and Nicolai Petkov},
year = {2015},
date = {2015-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {513--521},
organization = {Springer International Publishing},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bouma, Henri; Eendebak, Pieter T; Schutte, Klamer; Azzopardi, George; Burghouts, Gertjan J
Incremental concept learning with few training examples and hierarchical classification Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: image classification
@inproceedings{bouma2015incremental,
title = {Incremental concept learning with few training examples and hierarchical classification},
author = {Henri Bouma and Pieter T Eendebak and Klamer Schutte and George Azzopardi and Gertjan J Burghouts},
doi = {10.1117/12.2194438},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Optics and Photonics for Counterterrorism, Crime Fighting, and Defence XI; and Optical Materials and Biomaterials in Security and Defence Systems Technology XII},
volume = {9652},
pages = {96520E},
organization = {International Society for Optics and Photonics},
abstract = {Object recognition and localization are important to automatically interpret video and allow better querying on its content. We propose a method for object localization that learns incrementally and addresses four key aspects. Firstly, we show that for certain applications, recognition is feasible with only a few training samples. Secondly, we show that novel objects can be added incrementally without retraining existing objects, which is important for fast interaction. Thirdly, we show that an unbalanced number of positive training samples leads to biased classifier scores that can be corrected by modifying weights. Fourthly, we show that the detector performance can deteriorate due to hard-negative mining for similar or closely related classes (e.g., for Barbie and dress, because the doll is wearing a dress). This can be solved by our hierarchical classification. We introduce a new dataset, which we call TOSO, and use it to demonstrate the effectiveness of the proposed method for the localization and recognition of multiple objects in images.},
keywords = {image classification},
pubstate = {published},
tppubtype = {inproceedings}
}
Strisciuglio, Nicola; Vento, Mario; Azzopardi, George; Petkov, Nicolai
Unsupervised delineation of the vessel tree in retinal fundus images Inproceedings
Abstract | Links | BibTeX | Tags: brain-inspired, medical image analysis, segmentation
@inproceedings{strisciuglio2015unsupervised,
title = {Unsupervised delineation of the vessel tree in retinal fundus images},
author = {Nicola Strisciuglio and Mario Vento and George Azzopardi and Nicolai Petkov},
editor = {Joao Manuel R.S. Tavares and R.M. Natal Jorge},
url = {https://research.rug.nl/en/publications/unsupervised-delineation-of-the-vessel-tree-in-retinal-fundus-ima},
isbn = {9781138029262},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Computational Vision and Medical Image Processing V - Proceedings of 5th Eccomas Thematic Proceedings of the 5th Eccomas Thematic Conference on Computational Vision and Medical Image Processing},
pages = {149-156},
abstract = {Retinal imaging has gained particular popularity as it provides an opportunity to diagnose various medical pathologies in a non-invasive way. One of the basic and very important steps in the analysis of such images is the delineation of the vessel tree from the background. Such segmentation facilitates the investigation of the morphological characteristics of the vessel tree and the analysis of any lesions in the background, which are both indicators for various pathologies. We propose a novel method called B-COSFIRE for the delineation of the vessel tree. It is based on the classic COSFIRE approach, which is a trainable nonlinear filtering method. RE filter to be configured by the automatic analysis of any given vessel-like pattern. The responses of a B-COSFIRE filter is achieved by combining the responses of difference-of-Gaussians filters whose areas of support are determined in an automatic configuration step. We configure two types of B-COSFIRE filters, one that responds selectively along vessels and another that is selective to vessel endings. The segmentation of the vessel tree is achieved by summing up the response maps of both types of filters followed by thresholding. We demonstrate high effectiveness of the proposed approach by performing experiments on four public data sets, namely DRIVE, STARE, CHASE_DB1 and HRF. The delineation approach that we propose also has lower time complexity than existing methods.},
keywords = {brain-inspired, medical image analysis, segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
Shi, Chenyu; Meijer, Joost M; Guo, Jiapan; Azzopardi, George; Jonkman, Marcel F; Petkov, Nicolai
Automatic classification of serrated patterns in direct immunouorescence images Inproceedings
BibTeX | Tags:
@inproceedings{shi2015automaticb,
title = {Automatic classification of serrated patterns in direct immunouorescence images},
author = {Chenyu Shi and Joost M Meijer and Jiapan Guo and George Azzopardi and Marcel F Jonkman and Nicolai Petkov},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Autonomous Systems 2015 - Proceedings of the 8th GI Conference},
publisher = {VDI Verlag},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fernández-Robles, Laura; Azzopardi, George; Alegre, Enrique; Petkov, Nicolai
Cutting Edge Localisation in an Edge Profile Milling Head Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: machine vision, trainable filters
@inproceedings{Fern\'{a}ndez-Robles2015,
title = {Cutting Edge Localisation in an Edge Profile Milling Head},
author = {Laura Fern\'{a}ndez-Robles and George Azzopardi and Enrique Alegre and Nicolai Petkov},
doi = {10.1007/978-3-319-23117-4_29},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Proceedings Part II of CAIP2015, LNCS 9257
},
pages = {336-347},
publisher = {Springer},
abstract = {Wear evaluation of cutting tools is a key issue for prolonging their lifetime and ensuring high quality of products. In this paper, we present a method for the effective localisation of cutting edges of inserts in digital images of an edge profile milling head. We introduce a new image data set of 144 images of an edge milling head that contains 30 inserts. We use a circular Hough transform to detect the screws that fasten the inserts. In a cropped area around a detected screw, we use Canny’s edge detection algorithm and Standard Hough Transform to localise line segments that characterise insert edges. We use this information and the geometry of the insert to identify which of these line segments is the cutting edge. The output of our algorithm is a set of quadrilateral regions around the identified cutting edges. These regions can then be used as input to other algorithms for the quality assessment of the cutting edges. Our results show that the proposed method is very effective for the localisation of the cutting edges of inserts in an edge profile milling machine.},
keywords = {machine vision, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
2014
de Vries, Harm; Azzopardi, George; Knobbe, Arno; Koelewijn, Andre
Parametric nonlinear regression models for dike monitoring systems Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: predictive analysis, time-series
@inproceedings{deVries2014,
title = {Parametric nonlinear regression models for dike monitoring systems},
author = {Harm de Vries and George Azzopardi and Arno Knobbe and Andre Koelewijn},
doi = {https://doi.org/10.1007/978-3-319-12571-8_30},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
booktitle = {Advances in Intelligent Data Analysis, LNCS
},
volume = {8819},
number = {345-355},
publisher = {Springer},
abstract = {Dike monitoring is crucial for protection against flooding disasters, an especially important topic in low countries, such as the Netherlands where many regions are below sea level. Recently, there has been growing interest in extending traditional dike monitoring by means of a sensor network. This paper presents a case study of a set of pore pressure sensors installed in a sea dike in Boston (UK), and which are continuously affected by water levels, the foremost influencing environmental factor. We estimate one-to-one relationships between a water height sensor and individual pore pressure sensors by parametric nonlinear regression models that are based on domain knowledge. We demonstrate the effectiveness of the proposed method by the high goodness of fits we obtain on real test data. Furthermore, we show how the proposed models can be used for the detection of anomalies.},
keywords = {predictive analysis, time-series},
pubstate = {published},
tppubtype = {inproceedings}
}
2013
Azzopardi, George; Petkov, Nicolai
A shape descriptor based on trainable COSFIRE filters for the recognition of handwritten digits Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, image classification, trainable filters
@inproceedings{azzopardi2013shape,
title = {A shape descriptor based on trainable COSFIRE filters for the recognition of handwritten digits},
author = {George Azzopardi and Nicolai Petkov},
doi = {10.1007/978-3-642-40246-3_2},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {9--16},
organization = {Springer Berlin Heidelberg},
abstract = {The recognition of handwritten digits is an application which has been used as a benchmark for comparing shape recognition methods. We train COSFIRE filters to be selective for different parts of handwritten digits. In analogy with the neurophysiological concept of population coding we use the responses of multiple COSFIRE filters as a shape descriptor of a handwritten digit. We demonstrate the effectiveness of the proposed approach on two data sets of handwritten digits: Western Arabic (MNIST) and Farsi for which we achieve high recognition rates of 99.52% and 99.33%, respectively. COSFIRE filters are conceptually simple, easy to implement and they are versatile trainable feature detectors. The shape descriptor that we propose is highly effective to the automatic recognition of handwritten digits.},
keywords = {brain-inspired, image classification, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Bouma, H.; Azzopardi, G.; Spitters, M. M.; de Wit, J. J.; Versloot, C. A.; van der Zon, R. W. L.; Eendebak, P. T.; Baan, J.; ten Hove, R. J. M.; van Eekeren, A. W. M.; ter Haar, F. B.; den Hollander, R. J. M.; van Huis, R. J.; de Boer, M. H. T. T.; van Antwerpen, G.; Broekhuijsen, B. J.; Daniele, L. M.; Brandt, P.; Schavemaker, J. G. M.; Kraaij, W.; Schutte, K.
TNO at TRECVID 2013: Multimedia event detection and instance search Inproceedings
Links | BibTeX | Tags: image retrieval
@inproceedings{bouma2013tno,
title = {TNO at TRECVID 2013: Multimedia event detection and instance search},
author = {H. Bouma and G. Azzopardi and M. M. Spitters and J. J. de Wit and C. A. Versloot and R. W. L. van der Zon and P. T. Eendebak and J. Baan and R. J. M. ten Hove and A. W. M. van Eekeren and F. B. ter Haar and R. J. M. den Hollander and R. J. van Huis and M. H. T. T. de Boer and G. van Antwerpen and B. J. Broekhuijsen and L. M. Daniele and P. Brandt and J. G. M. Schavemaker and W. Kraaij and K. Schutte},
url = {https://www-nlpir.nist.gov/projects/tvpubs/tv13.papers/tno.pdf},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
organization = {2013},
keywords = {image retrieval},
pubstate = {published},
tppubtype = {inproceedings}
}