Large Margin Linear Discriminative Visualization by Matrix Relevance Learning

Date
Abstract
Links
Bib
@inproceedings{GMLVQ_theory_2012,
author = {Michael Biehl and Kerstin Bunte and Barbara Hammer and Frank-Michael Schleif and Petra Schneider and Thomas Villmann},
title = {{Large Margin Linear Discriminative Visualization by Matrix Relevance Learning}},
booktitle = {Proc. of the International Joint Conference on Neural Networks (IJCNN)},
pages = {1873--1880},
month = {"Jun."},
editor = {Hussein Abbass, Daryl Essam and Ruhul Sarker},
address = {Brisbane, Australia},
publisher = {IEEE},
year = {2012},
doi = {10.1109/IJCNN.2012.6252627},
url = {http://dx.doi.org/10.1109/IJCNN.2012.6252627},
abstract = {We suggest and investigate the use of Generalized Matrix Relevance Learning (GMLVQ) in the context of discriminative visualization.  This prototype-based, supervised learning scheme parameterizes an adaptive distance measure in terms of a matrix of relevance factors.  By means of a few benchmark problems, we demonstrate that the training process yields low rank matrices which can be used efficiently for the discriminative visualization of labeled data.  Comparison with well known standard methods illustrate the flexibility and discriminative power of the novel approach.  The mathematical analysis of GMLVQ shows that the corresponding stationarity condition can be formulated as an eigenvalue problem with one or several strongly dominating eigenvectors.  We also study the inclusion of a penalty term which enforces non-singularity of the relevance matrix and can be used to control the role of higher order eigenvalues, efficiently},
}