2022
Bhole, Amey; Udmale, Sandeep S; Falzon, Owen; Azzopardi, George
CORF3D contour maps with application to Holstein cattle recognition from RGB and thermal images Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, contour detection, convnets, deep learning, noise suppression, pattern recognition, smart farming
@article{bhole2022corf3d,
title = {CORF3D contour maps with application to Holstein cattle recognition from RGB and thermal images},
author = {Amey Bhole and Sandeep S Udmale and Owen Falzon and George Azzopardi},
doi = {https://doi.org/10.1016/j.eswa.2021.116354},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Expert Systems with Applications},
volume = {192},
number = {116354},
publisher = {Pergamon},
abstract = {Livestock management involves the monitoring of farm animals by tracking certain physiological and phenotypical characteristics over time. In the dairy industry, for instance, cattle are typically equipped with RFID ear tags. The corresponding data (e.g. milk properties) can then be automatically assigned to the respective cow when they enter the milking station. In order to move towards a more scalable, affordable, and welfare-friendly approach, automatic non-invasive solutions are more desirable. Thus, a non-invasive approach is proposed in this paper for the automatic identification of individual Holstein cattle from the side view while exiting a milking station. It considers input images from a thermal-RGB camera. The thermal images are used to delineate the cow from the background. Subsequently, any occluding rods from the milking station are removed and inpainted with the fast marching algorithm. Then, it extracts the RGB map of the segmented cattle along with a novel CORF3D contour map. The latter contains three contour maps extracted by the Combination of Receptive Fields (CORF) model with different strengths of push\textendashpull inhibition. This mechanism suppresses noise in the form of grain type texture. The effectiveness of the proposed approach is demonstrated by means of experiments using a 5-fold and a leave-one day-out cross-validation on a new data set of 3694 images of 383 cows collected from the Dairy Campus in Leeuwarden (the Netherlands) over 9 days. In particular, when combining RGB and CORF3D maps by late fusion, an average accuracy of was obtained for the 5-fold cross validation and for the leave-one day-out experiment. The two maps were combined by first learning two ConvNet classification models, one for each type of map. The feature vectors in the two FC layers obtained from training images were then concatenated and used to learn a linear SVM classification model. In principle, the proposed approach with the novel CORF3D contour maps is suitable for various image classification applications, especially where grain type texture is a confounding variable.},
keywords = {brain-inspired, contour detection, convnets, deep learning, noise suppression, pattern recognition, smart farming},
pubstate = {published},
tppubtype = {article}
}
2019
Alsahaf, Ahmad; Azzopardi, George; Ducro, Bart; Hanenberg, Egiel; Veerkamp, Roel F; Petkov, Nicolai
Estimation of Muscle Scores of Live Pigs Using a Kinect Camera Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: image processing, pattern recognition, predictive analysis, smart farming
@article{alsahaf2019estimation,
title = {Estimation of Muscle Scores of Live Pigs Using a Kinect Camera},
author = {Ahmad Alsahaf and George Azzopardi and Bart Ducro and Egiel Hanenberg and Roel F Veerkamp and Nicolai Petkov},
doi = {10.1109/ACCESS.2019.2910986},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {IEEE Access},
volume = {7},
pages = {52238--52245},
publisher = {IEEE},
abstract = {The muscle grading of livestock is a primary component of valuation in the meat industry. In pigs, the muscularity of a live animal is traditionally estimated by visual and tactile inspection from an experienced assessor. In addition to being a time-consuming process, scoring of this kind suffers from inconsistencies inherent to the subjectivity of human assessment. On the other hand, accurate, computer-driven methods for carcass composition estimation, such as magnetic resonance imaging (MRI) and computed tomography scans (CT-scans), are expensive and cumbersome to both the animals and their handlers. In this paper, we propose a method that is fast, inexpensive, and non-invasive for estimating the muscularity of live pigs, using RGB-D computer vision and machine learning. We used morphological features extracted from the depth images of pigs to train a classifier that estimates the muscle scores that are likely to be given by a human assessor. The depth images were obtained from a Kinect v1 camera which was placed over an aisle through which the pigs passed freely. The data came from 3246 pigs, each having 20 depth images, and a muscle score from 1 to 7 (reduced later to 5 scores) assigned by an experienced assessor. The classification based on morphological features of the pig's body shape-using a gradient boosted classifier-resulted in a mean absolute error of 0.65 in tenfold cross-validation. Notably, the majority of the errors corresponded to pigs being classified as having muscle scores adjacent to the groundtruth labels given by the assessor. According to the end users of this application, the proposed approach could be used to replace expert assessors at the farm.},
keywords = {image processing, pattern recognition, predictive analysis, smart farming},
pubstate = {published},
tppubtype = {article}
}
2017
Fernández-Robles, Laura; Azzopardi, George; Alegre, Enrique; Petkov, Nicolai
Machine-vision-based identification of broken inserts in edge profile milling heads Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: machine vision, pattern recognition, visual quality inspection
@article{Fernandez2017,
title = {Machine-vision-based identification of broken inserts in edge profile milling heads},
author = {Laura Fern\'{a}ndez-Robles and George Azzopardi and Enrique Alegre and Nicolai Petkov},
doi = {https://doi.org/10.1016/j.rcim.2016.10.004},
year = {2017},
date = {2017-04-01},
urldate = {2017-04-01},
journal = {Robotics and Computer-Integrated Manufacturing},
volume = {44},
pages = {276-283},
abstract = {This paper presents a reliable machine vision system to automatically detect inserts and determine if they are broken. Unlike the machining operations studied in the literature, we are dealing with edge milling head tools for aggressive machining of thick plates (up to 12 centimetres) in a single pass. The studied cutting head tool is characterised by its relatively high number of inserts (up to 30) which makes the localisation of inserts a key aspect. The identification of broken inserts is critical for a proper tool monitoring system. In the method that we propose, we first localise the screws of the inserts and then we determine the expected position and orientation of the cutting edge by applying some geometrical operations. We compute the deviations from the expected cutting edge to the real edge of the inserts to determine if an insert is broken. We evaluated the proposed method on a new dataset that we acquired and made public. The obtained result (a harmonic mean of precision and recall 91.43%) shows that the machine vision system that we present is effective and suitable for the identification of broken inserts in machining head tools and ready to be installed in an on-line system.},
keywords = {machine vision, pattern recognition, visual quality inspection},
pubstate = {published},
tppubtype = {article}
}
Gecer, Baris; Azzopardi, George; Petkov, Nicolai
Color-blob-based COSFIRE filters for object recognition Journal Article
Abstract | Links | BibTeX | Altmetric | Tags: brain-inspired, image classification, pattern recognition, trainable filters
@article{gecer2017color,
title = {Color-blob-based COSFIRE filters for object recognition},
author = {Baris Gecer and George Azzopardi and Nicolai Petkov},
doi = {https://doi.org/10.1016/j.imavis.2016.10.006},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
journal = {Image and Vision Computing},
volume = {57},
pages = {165--174},
publisher = {Elsevier},
abstract = {Most object recognition methods rely on contour-defined features obtained by edge detection or region segmentation. They are not robust to diffuse region boundaries. Furthermore, such methods do not exploit region color information. We propose color-blob-based COSFIRE (Combination of Shifted Filter Responses) filters to be selective for combinations of diffuse circular regions (blobs) in specific mutual spatial arrangements. Such a filter combines the responses of a certain selection of Difference-of-Gaussians filters, essentially blob detectors, of different scales, in certain channels of a color space, and at certain relative positions to each other. Its parameters are determined/learned in an automatic configuration process that analyzes the properties of a given prototype object of interest. We use these filters to compute features that are effective for the recognition of the prototype objects. We form feature vectors that we use with an SVM classifier. We evaluate the proposed method on a traffic sign (GTSRB) and a butterfly data sets. For the GTSRB data set we achieve a recognition rate of 98.94%, which is slightly higher than human performance and for the butterfly data set we achieve 89.02%. The proposed color-blob-based COSFIRE filters are very effective and outperform the contour-based COSFIRE filters. A COSFIRE filter is trainable, it can be configured with a single prototype pattern and it does not require domain knowledge.},
keywords = {brain-inspired, image classification, pattern recognition, trainable filters},
pubstate = {published},
tppubtype = {article}
}