2023
Aswath, Anusha; Alsahaf, Ahmad; Westenbrink, B. Daan; Giepmans, Ben N. G.; Azzopardi, George
COFI - Coarse-semantic to fine-instance unsupervised mitochondria segmentation in EM Inproceedings
Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, convnets, deep learning, segmentation
@inproceedings{Anusha2023,
title = {COFI - Coarse-semantic to fine-instance unsupervised mitochondria segmentation in EM},
author = {Anusha Aswath and Ahmad Alsahaf and B. Daan Westenbrink and Ben N. G. Giepmans and George Azzopardi
},
doi = {https://doi.org/10.1007/978-3-031-44240-7_9},
year = {2023},
date = {2023-09-20},
urldate = {2023-07-01},
booktitle = {Computer Analysis of Images and Patterns. CAIP 2023. Lecture Notes in Computer Science},
volume = {14185},
publisher = {Springer},
keywords = {brain-inspired, contour detection, convnets, deep learning, segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
Bennabhaktula, Guru Swaroop; Antonisse, Joey; Azzopardi, George
Abstract | Links | BibTeX | Altmetric | Tags: adversarial attacks, brain-inspired, convnets, deep learning, image classification, noise suppression
@inproceedings{bennabhaktula2021improving,
title = {On Improving Generalization of CNN-Based Image Classification with Delineation Maps Using the CORF Push-Pull Inhibition Operator},
author = {Guru Swaroop Bennabhaktula and Joey Antonisse and George Azzopardi},
doi = {10.1007/978-3-030-89128-2_42},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {434--444},
organization = {Springer},
abstract = {Deployed image classification pipelines are typically dependent on the images captured in real-world environments. This means that images might be affected by different sources of perturbations (e.g. sensor noise in low-light environments). The main challenge arises by the fact that image quality directly impacts the reliability and consistency of classification tasks. This challenge has, hence, attracted wide interest within the computer vision communities. We propose a transformation step that attempts to enhance the generalization ability of CNN models in the presence of unseen noise in the test set. Concretely, the delineation maps of given images are determined using the CORF push-pull inhibition operator. Such an operation transforms an input image into a space that is more robust to noise before being processed by a CNN. We evaluated our approach on the Fashion MNIST data set with an AlexNet model. It turned out that the proposed CORF-augmented pipeline achieved comparable results on noise-free images to those of a conventional AlexNet classification model without CORF delineation maps, but it consistently achieved significantly superior performance on test images perturbed with different levels of Gaussian and uniform noise.},
keywords = {adversarial attacks, brain-inspired, convnets, deep learning, image classification, noise suppression},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
Bennabhaktula, Guru; Alegre, Enrique; Karastoyanova, Dimka; Azzopardi, George
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, convnets, deep learning, forensic image analysis
@inproceedings{bennabhaktula2020device,
title = {Device-based Image Matching with Similarity Learning by Convolutional Neural Networks that Exploit the Underlying Camera Sensor Pattern Noise},
author = {Guru Bennabhaktula and Enrique Alegre and Dimka Karastoyanova and George Azzopardi},
doi = {10.5220/0009155505780584},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the 9th International Conference on Pattern Recognition Applications and Methods - ICPRAM},
pages = {578--584},
organization = {SciTePress},
abstract = {One of the challenging problems in digital image forensics is the capability to identify images that are captured by the same camera device. This knowledge can help forensic experts in gathering intelligence about suspects by analyzing digital images. In this paper, we propose a two-part network to quantify the likelihood that a given pair of images have the same source camera, and we evaluated it on the benchmark Dresden data set containing 1851 images from 31 different cameras. To the best of our knowledge, we are the first ones addressing the challenge of device-based image matching. Though the proposed approach is not yet forensics ready, our experiments show that this direction is worth pursuing, achieving at this moment 85 percent accuracy. This ongoing work is part of the EU-funded project 4NSEEK concerned with forensics against child sexual abuse.},
keywords = {camera identification, convnets, deep learning, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Timmerman, Derrick; Bennabhaktula, Swaroop; Alegre, Enrique; Azzopardi, George
Video Camera Identification from Sensor Pattern Noise with a Constrained ConvNet Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: camera identification, constrained networks, convnets, deep learning, forensic image analysis
@inproceedings{timmerman2020video,
title = {Video Camera Identification from Sensor Pattern Noise with a Constrained ConvNet},
author = {Derrick Timmerman and Swaroop Bennabhaktula and Enrique Alegre and George Azzopardi},
editor = {Maria De Marsico, Gabriella Sanniti di Baja, Ana Fred},
doi = {https://doi.org/10.48550/arXiv.2012.06277},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the 10th International Conference on Pattern Recognition Applications and Methods - ICPRAM},
journal = {arXiv preprint arXiv:2012.06277},
pages = {417-425},
abstract = {The identification of source cameras from videos, though it is a highly relevant forensic analysis topic, has been studied much less than its counterpart that uses images. In this work we propose a method to identify the source camera of a video based on camera specific noise patterns that we extract from video frames. For the extraction of noise pattern features, we propose an extended version of a constrained convolutional layer capable of processing color inputs. Our system is designed to classify individual video frames which are in turn combined by a majority vote to identify the source camera. We evaluated this approach on the benchmark VISION data set consisting of 1539 videos from 28 different cameras. To the best of our knowledge, this is the first work that addresses the challenge of video camera identification on a device level. The experiments show that our approach is very promising, achieving up to 93.1% accuracy while being robust to the WhatsApp and YouTube compression techniques. This work is part of the EU-funded project 4NSEEK focused on forensics against child sexual abuse.},
keywords = {camera identification, constrained networks, convnets, deep learning, forensic image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Bhole, Amey; Falzon, Owen; Biehl, Michael; Azzopardi, George
A Computer Vision Pipeline that Uses Thermal and RGB Images for the Recognition of Holstein Cattle Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, image classification, pattern recognition, smart farming
@inproceedings{bhole2019computer,
title = {A Computer Vision Pipeline that Uses Thermal and RGB Images for the Recognition of Holstein Cattle},
author = {Amey Bhole and Owen Falzon and Michael Biehl and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-030-29891-3_10},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
volume = {11679},
pages = {108--119},
organization = {Springer},
abstract = {The monitoring of farm animals is important as it allows farmers keeping track of the performance indicators and any signs of health issues, which is useful to improve the production of milk, meat, eggs and others. In Europe, bovine identification is mostly dependent upon the electronic ID/RFID ear tags, as opposed to branding and tattooing. The RFID based ear-tagging approach has been called into question because of implementation and management costs, physical damage and animal welfare concerns. In this paper, we conduct a case study for individual identification of Holstein cattle, characterized by black, brown and white patterns, in collaboration with the Dairy campus in Leeuwarden. We use a FLIR E6 thermal camera to collect an infrared and RGB image of the side view of each cow just after leaving the milking station. We apply a fully automatic pipeline, which consists of image processing, computer vision and machine learning techniques on a data set containing 1237 images and 136 classes (i.e. individual animals). In particular, we use the thermal images to segment the cattle from the background and remove horizontal and vertical pipes that occlude the cattle in the station, followed by filling the blank areas with an inpainting algorithm. We use the segmented image and apply transfer learning to a pre-trained AlexNet convolutional neural network. We apply five-fold cross-validation and achieve an average accuracy rate of 0.9754 ± 0.0097. The results obtained suggest that the proposed non-invasive approach is highly effective in the automatic recognition of Holstein cattle from the side view. In principle, this approach is applicable to any farm animals that are characterized by distinctive coat patterns.},
keywords = {convnets, deep learning, image classification, pattern recognition, smart farming},
pubstate = {published},
tppubtype = {inproceedings}
}
Demajo, Lara Marie; Guillaumier, Kristian; Azzopardi, George
Age group recognition from face images using a fusion of CNN-and COSFIRE-based features Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, convnets, deep learning, face analysis, trainable filters
@inproceedings{demajo2019age,
title = {Age group recognition from face images using a fusion of CNN-and COSFIRE-based features},
author = {Lara Marie Demajo and Kristian Guillaumier and George Azzopardi},
doi = {https://doi.org/10.1145/3309772.3309784},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Proceedings of the 2nd International Conference on Applications of Intelligent Systems},
pages = {1--6},
abstract = {Automatic age group classification is the ability of an algorithm to classify face images into predetermined age groups. It is an important task due to its numerous applications such as monitoring, biometrics and commercial profiling. In this work we propose a fusion technique that combines CNN- and COSFIRE-based features for the recognition of age groups from face images. Both CNN and COSFIRE are trainable approaches that have been demonstrated to be effective in various computer vision applications. As to CNN, we use the pre-trained VGG-Face architecture and for COSFIRE we configure new COSFIRE filters from training data. Since recent literature suggests that CNNs deliver the highest accuracy rates within such problems, the hypothesis which we want to investigate in this work is whether combining CNN and COSFIRE approaches together will improve results. The proposed fusion technique using stacked Support Vector Machine (SVM) classifiers, and trained and tested with the FERET data set images has shown that, indeed, CNN- and COSFIRE-based features are complimentary as their combination reduces the error rate by more than 25%.},
keywords = {brain-inspired, convnets, deep learning, face analysis, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
Kind, Adrian; Azzopardi, George
An Explainable AI-Based Computer Aided Detection System for Diabetic Retinopathy Using Retinal Fundus Images Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, medical image analysis, pattern recognition
@inproceedings{kind2019explainable,
title = {An Explainable AI-Based Computer Aided Detection System for Diabetic Retinopathy Using Retinal Fundus Images},
author = {Adrian Kind and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-030-29888-3_37},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {International Conference on Computer Analysis of Images and Patterns},
pages = {457--468},
organization = {Springer},
abstract = {Diabetic patients have a high risk of developing diabetic retinopathy (DR), which is one of the major causes of blindness. With early detection and the right treatment patients may be spared from losing their vision. We propose a computer-aided detection system, which uses retinal fundus images as input and it detects all types of lesions that define diabetic retinopathy. The aim of our system is to assist eye specialists by automatically detecting the healthy retinas and referring the images of the unhealthy ones. For the latter cases, the system offers an interactive tool where the doctor can examine the local lesions that our system marks as suspicious. The final decision remains in the hands of the ophthalmologists. Our approach consists of a multi-class detector, that is able to locate and recognize all candidate DR-defining lesions. If the system detects at least one lesion, then the image is marked as unhealthy. The lesion detector is built on the faster R-CNN ResNet 101 architecture, which we train by transfer learning. We evaluate our approach on three benchmark data sets, namely Messidor-2, IDRiD, and E-Ophtha by measuring the sensitivity (SE) and specificity (SP) based on the binary classification of healthy and unhealthy images. The results that we obtain for Messidor-2 and IDRiD are (SE: 0.965, SP: 0.843), and (SE: 0.83, SP: 0.94), respectively. For the E-Ophtha data set we follow the literature and perform two experiments, one where we detect only lesions of the type micro aneurysms (SE: 0.939, SP: 0.82) and the other when we detect only exudates (SE: 0.851, SP: 0.971). Besides the high effectiveness that we achieve, the other important contribution of our work is the interactive tool, which we offer to the medical experts, highlighting all suspicious lesions detected by the proposed system.},
keywords = {convnets, deep learning, medical image analysis, pattern recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
Simanjuntak, Frans; Azzopardi, George
Fusion of CNN-and COSFIRE-Based Features with Application to Gender Recognition from Face Images Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, convnets, deep learning, face analysis, trainable filters
@inproceedings{simanjuntak2019fusion,
title = {Fusion of CNN-and COSFIRE-Based Features with Application to Gender Recognition from Face Images},
author = {Frans Simanjuntak and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-030-17795-9_33},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Science and Information Conference},
pages = {444--458},
organization = {Springer},
abstract = {Convolution neural networks (CNNs) have been demonstrated to be very eective in various computer vision tasks. The main strength of such networks is that features are learned from some training data. In cases where training data is not abundant, transfer learning can be used in order to adapt features that are pre-trained from other tasks. Similarly, the COSFIRE approach is also trainable as it configures lters to be selective for features selected from training data. In this study we propose a fusion method of these two approaches and evaluate their performance on the application of gender recognition from face images. In particular, we use the pre-trained VGGFace CNN, which when used as standalone, it achieved 97.45% on the GENDER-FERET data set. With one of the proposed fusion approaches the recognition rate on the same task is improved to 98.9%, that is reducing the error rate by more than 50%. Our experiments demonstrate that COSFIRE filters can provide complementary features to CNNs, which contribute to a better performance.},
keywords = {brain-inspired, convnets, deep learning, face analysis, trainable filters},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
Rodríguez-Sánchez, Antonio; Chea, Daly; Azzopardi, George; Stabinger, Sebastian
A deep learning approach for detecting and correcting highlights in endoscopic images Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: convnets, deep learning, medical image analysis
@inproceedings{8310082,
title = {A deep learning approach for detecting and correcting highlights in endoscopic images},
author = {Rodr\'{i}guez-S\'{a}nchez, Antonio and Chea, Daly and Azzopardi, George and Stabinger, Sebastian},
doi = {10.1109/IPTA.2017.8310082},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {2017 Seventh International Conference on Image Processing Theory, Tools and Applications (IPTA)},
pages = {1-6},
abstract = {The image of an object changes dramatically depending on the lightning conditions surrounding that object. Shadows, reflections and highlights can make the object very difficult to be recognized for an automatic system. Additionally, images used in medical applications, such as endoscopic images and videos contain a large amount of such reflective components. This can pose an extra difficulty for experts to analyze such type of videos and images. It can then be useful to detect - and possibly correct - the locations where those highlights happen. In this work we designed a Convolutional Neural Network for that task. We trained such a network using a dataset that contains groundtruth highlights showing that those reflective elements can be learnt and thus located and extracted. We then used that trained network to localize and correct the highlights in endoscopic images from the El Salvador Atlas Gastrointestinal videos obtaining promising results.},
keywords = {convnets, deep learning, medical image analysis},
pubstate = {published},
tppubtype = {inproceedings}
}