2023
1.
Wang, Xueyi; Risi, Nicoletta; Martinez, Estefania Talavera; Chicca, Elisabetta; Karastoyanova, Dimka; Azzopardi, George
Fall detection with event-based data: A case study Inproceedings
Links | BibTeX | Altmetric | Tags: deep learning, event-based, fall detection
@inproceedings{Wang2023,
title = {Fall detection with event-based data: A case study},
author = {Xueyi Wang and Nicoletta Risi and Estefania Talavera Martinez and Elisabetta Chicca and Dimka Karastoyanova and George Azzopardi},
doi = {https://doi.org/10.1007/978-3-031-44240-7_4},
year = {2023},
date = {2023-09-20},
urldate = {2023-09-20},
booktitle = {Computer Analysis of Images and Patterns. CAIP 2023. Lecture Notes in Computer Science},
volume = {14185},
publisher = {Springer},
keywords = {deep learning, event-based, fall detection},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
2.
Wang, Xueyi; Martinez, Estefania Talavera; Karastoyanova, Dimka; Azzopardi, George
Fall detection and recognition from egocentric visual data: A case study Inproceedings
Abstract | Links | BibTeX | Altmetric | Tags: egocentric vision, fall detection, wearables
@inproceedings{Wang2021,
title = {Fall detection and recognition from egocentric visual data: A case study},
author = {Xueyi Wang and Estefania Talavera Martinez and Dimka Karastoyanova and George Azzopardi},
editor = {Alberto Del Bimbo and Rita Cucchiara and Stan Sclaroff and Giovanni Maria Farinella and Tao Mei and Marco Bertini and others},
url = {https://doi.org/10.34894/3DV8BF},
doi = {https://doi.org/10.1007/978-3-030-68763-2_33},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {25th International Conference on Pattern Recognition Workshops, ICPR 2020},
abstract = {Falling is among the most damaging events for elderly people, which sometimes may end with significant injuries. Due to fear of falling, many elderly people choose to stay more at home in order to feel safer. In this work, we propose a new fall detection and recognition approach, which analyses egocentric videos collected by wearable cameras through a computer vision/machine learning pipeline. More specifically, we conduct a case study with one volunteer who collected video data from two cameras; one attached to the chest and the other one attached to the waist. A total of 776 videos were collected describing four types of falls and nine kinds of non-falls. Our method works as follows: extracts several uniformly distributed frames from the videos, uses a pre-trained ConvNet model to describe each frame by a feature vector, followed by feature fusion and a classification model. Our proposed model demonstrates its suitability for the detection and recognition of falls from the data captured by the two cameras together. For this case study, we detect all falls with only one false positive, and reach a balanced accuracy of 93% in the recognition of the 13 types of activities. Similar results are obtained for videos of the two cameras when considered separately. Moreover, we observe better performance of videos collected in indoor scenes.},
note = {The data set can be downloaded from https://doi.org/10.34894/3DV8BF},
keywords = {egocentric vision, fall detection, wearables},
pubstate = {published},
tppubtype = {inproceedings}
}
Falling is among the most damaging events for elderly people, which sometimes may end with significant injuries. Due to fear of falling, many elderly people choose to stay more at home in order to feel safer. In this work, we propose a new fall detection and recognition approach, which analyses egocentric videos collected by wearable cameras through a computer vision/machine learning pipeline. More specifically, we conduct a case study with one volunteer who collected video data from two cameras; one attached to the chest and the other one attached to the waist. A total of 776 videos were collected describing four types of falls and nine kinds of non-falls. Our method works as follows: extracts several uniformly distributed frames from the videos, uses a pre-trained ConvNet model to describe each frame by a feature vector, followed by feature fusion and a classification model. Our proposed model demonstrates its suitability for the detection and recognition of falls from the data captured by the two cameras together. For this case study, we detect all falls with only one false positive, and reach a balanced accuracy of 93% in the recognition of the 13 types of activities. Similar results are obtained for videos of the two cameras when considered separately. Moreover, we observe better performance of videos collected in indoor scenes.