Regularization in matrix relevance learning

Date
Abstract
Bib
@techreport{MLR0208Schneider2008a,
author = {P. Schneider and K. Bunte and B. Hammer and T. Villmann and M. Biehl},
title = {Regularization in matrix relevance learning},
number = {MLR-02-2008},
issn = {1865-3960},
url2 = {https://www.techfak.uni-bielefeld.de/~fschleif/mlr/mlr_02_2008.pdf},
journal = {Machine Learning Reports},
institution = {Leipzig University},
volume = {2},
pages = {19--36},
year = {2008},
abstract = {We present a regularization method which extends the recently introduced Generalized Matrix LVQ.  This learning algorithm extends the concept of adaptive distance measures in LVQ to the use of relevance matrices.  In general, relevance learning can display a tendency towards over-simplification in the course of training.  An overly pronounced elimination of dimensions in feature space can have negative effects on the performance and may lead to instabilities in the training.  Complementing the standard GMLVQ cost function by an appropriate regularization term prevents this unfavorable behavior and can help to improve the generalization ability.  The approach is first tested and illustrated in terms of artificial model data. Furthermore we apply the scheme to a benchmark classification problem from the medical domain.  For both data sets, we demonstrate the usefulness of regularization also in the case of rank limited relevance matrices, i.e. GMLVQ with an implicit, low dimensional representation of the data},
}