A general framework for dimensionality reduction for large data sets

Date
Abstract
Links
Bib
@INPROCEEDINGS{Hammer_WSOM11,
author = {Barbara Hammer and Michael Biehl and Kerstin Bunte and Bassam Mokbel},
title = {A general framework for dimensionality reduction for large data sets},
booktitle = {Advances in Self-Organizing Maps, WSOM 2011},
date-added = {2011-03-10 14:16:25 +0100},
date-modified = {2011-06-20 15:47:47 +0200},
editor = {Jorma Laaksonen and Timo Honkela},
pages = {277--287},
publisher = {Springer},
series = {Lecture Notes in Computer Science 6731},
year = {2011},
doi = {10.1007/978-3-642-21566-7_28},
url = {http://dx.doi.org/10.1007/978-3-642-21566-7_28},
abstract = {With electronic data increasing dramatically in almost all areas of research, a plethora of new techniques for automatic dimensionality reduction  and data visualization has become available in recent years. These offer an interface which allows humans to rapidly scan through large volumes of data.  With data sets becoming larger and larger, however, the standard methods can no longer be applied directly.  Random subsampling or prior clustering still being one of the most popular solutions in this case,  we discuss a principled alternative and formalize the approaches under a general perspectives of dimensionality reduction as cost optimization.  We have a first look at the question whether these techniques can be accompanied by theoretical guarantees},
}